1400c40cfSStephan Mueller /* 2400c40cfSStephan Mueller * algif_aead: User-space interface for AEAD algorithms 3400c40cfSStephan Mueller * 4400c40cfSStephan Mueller * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de> 5400c40cfSStephan Mueller * 6400c40cfSStephan Mueller * This file provides the user-space API for AEAD ciphers. 7400c40cfSStephan Mueller * 8400c40cfSStephan Mueller * This file is derived from algif_skcipher.c. 9400c40cfSStephan Mueller * 10400c40cfSStephan Mueller * This program is free software; you can redistribute it and/or modify it 11400c40cfSStephan Mueller * under the terms of the GNU General Public License as published by the Free 12400c40cfSStephan Mueller * Software Foundation; either version 2 of the License, or (at your option) 13400c40cfSStephan Mueller * any later version. 14400c40cfSStephan Mueller */ 15400c40cfSStephan Mueller 1689081da5SHerbert Xu #include <crypto/aead.h> 17400c40cfSStephan Mueller #include <crypto/scatterwalk.h> 18400c40cfSStephan Mueller #include <crypto/if_alg.h> 19400c40cfSStephan Mueller #include <linux/init.h> 20400c40cfSStephan Mueller #include <linux/list.h> 21400c40cfSStephan Mueller #include <linux/kernel.h> 22400c40cfSStephan Mueller #include <linux/mm.h> 23400c40cfSStephan Mueller #include <linux/module.h> 24400c40cfSStephan Mueller #include <linux/net.h> 25400c40cfSStephan Mueller #include <net/sock.h> 26400c40cfSStephan Mueller 27400c40cfSStephan Mueller struct aead_sg_list { 28400c40cfSStephan Mueller unsigned int cur; 29400c40cfSStephan Mueller struct scatterlist sg[ALG_MAX_PAGES]; 30400c40cfSStephan Mueller }; 31400c40cfSStephan Mueller 32400c40cfSStephan Mueller struct aead_ctx { 33400c40cfSStephan Mueller struct aead_sg_list tsgl; 34400c40cfSStephan Mueller /* 35400c40cfSStephan Mueller * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum 36400c40cfSStephan Mueller * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES 377b2a18e0STadeusz Struk * pages 38400c40cfSStephan Mueller */ 39400c40cfSStephan Mueller #define RSGL_MAX_ENTRIES ALG_MAX_PAGES 40400c40cfSStephan Mueller struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES]; 41400c40cfSStephan Mueller 42400c40cfSStephan Mueller void *iv; 43400c40cfSStephan Mueller 44400c40cfSStephan Mueller struct af_alg_completion completion; 45400c40cfSStephan Mueller 46400c40cfSStephan Mueller unsigned long used; 47400c40cfSStephan Mueller 48400c40cfSStephan Mueller unsigned int len; 49400c40cfSStephan Mueller bool more; 50400c40cfSStephan Mueller bool merge; 51400c40cfSStephan Mueller bool enc; 52400c40cfSStephan Mueller 53400c40cfSStephan Mueller size_t aead_assoclen; 54400c40cfSStephan Mueller struct aead_request aead_req; 55400c40cfSStephan Mueller }; 56400c40cfSStephan Mueller 57400c40cfSStephan Mueller static inline int aead_sndbuf(struct sock *sk) 58400c40cfSStephan Mueller { 59400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 60400c40cfSStephan Mueller struct aead_ctx *ctx = ask->private; 61400c40cfSStephan Mueller 62400c40cfSStephan Mueller return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 63400c40cfSStephan Mueller ctx->used, 0); 64400c40cfSStephan Mueller } 65400c40cfSStephan Mueller 66400c40cfSStephan Mueller static inline bool aead_writable(struct sock *sk) 67400c40cfSStephan Mueller { 68400c40cfSStephan Mueller return PAGE_SIZE <= aead_sndbuf(sk); 69400c40cfSStephan Mueller } 70400c40cfSStephan Mueller 71400c40cfSStephan Mueller static inline bool aead_sufficient_data(struct aead_ctx *ctx) 72400c40cfSStephan Mueller { 73400c40cfSStephan Mueller unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); 74400c40cfSStephan Mueller 7519fa7752SHerbert Xu return ctx->used >= ctx->aead_assoclen + as; 76400c40cfSStephan Mueller } 77400c40cfSStephan Mueller 78400c40cfSStephan Mueller static void aead_put_sgl(struct sock *sk) 79400c40cfSStephan Mueller { 80400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 81400c40cfSStephan Mueller struct aead_ctx *ctx = ask->private; 82400c40cfSStephan Mueller struct aead_sg_list *sgl = &ctx->tsgl; 83400c40cfSStephan Mueller struct scatterlist *sg = sgl->sg; 84400c40cfSStephan Mueller unsigned int i; 85400c40cfSStephan Mueller 86400c40cfSStephan Mueller for (i = 0; i < sgl->cur; i++) { 87400c40cfSStephan Mueller if (!sg_page(sg + i)) 88400c40cfSStephan Mueller continue; 89400c40cfSStephan Mueller 90400c40cfSStephan Mueller put_page(sg_page(sg + i)); 91400c40cfSStephan Mueller sg_assign_page(sg + i, NULL); 92400c40cfSStephan Mueller } 93bf433416SLars Persson sg_init_table(sg, ALG_MAX_PAGES); 94400c40cfSStephan Mueller sgl->cur = 0; 95400c40cfSStephan Mueller ctx->used = 0; 96400c40cfSStephan Mueller ctx->more = 0; 97400c40cfSStephan Mueller ctx->merge = 0; 98400c40cfSStephan Mueller } 99400c40cfSStephan Mueller 100400c40cfSStephan Mueller static void aead_wmem_wakeup(struct sock *sk) 101400c40cfSStephan Mueller { 102400c40cfSStephan Mueller struct socket_wq *wq; 103400c40cfSStephan Mueller 104400c40cfSStephan Mueller if (!aead_writable(sk)) 105400c40cfSStephan Mueller return; 106400c40cfSStephan Mueller 107400c40cfSStephan Mueller rcu_read_lock(); 108400c40cfSStephan Mueller wq = rcu_dereference(sk->sk_wq); 109400c40cfSStephan Mueller if (wq_has_sleeper(wq)) 110400c40cfSStephan Mueller wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 111400c40cfSStephan Mueller POLLRDNORM | 112400c40cfSStephan Mueller POLLRDBAND); 113400c40cfSStephan Mueller sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 114400c40cfSStephan Mueller rcu_read_unlock(); 115400c40cfSStephan Mueller } 116400c40cfSStephan Mueller 117400c40cfSStephan Mueller static int aead_wait_for_data(struct sock *sk, unsigned flags) 118400c40cfSStephan Mueller { 119400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 120400c40cfSStephan Mueller struct aead_ctx *ctx = ask->private; 121400c40cfSStephan Mueller long timeout; 122400c40cfSStephan Mueller DEFINE_WAIT(wait); 123400c40cfSStephan Mueller int err = -ERESTARTSYS; 124400c40cfSStephan Mueller 125400c40cfSStephan Mueller if (flags & MSG_DONTWAIT) 126400c40cfSStephan Mueller return -EAGAIN; 127400c40cfSStephan Mueller 128400c40cfSStephan Mueller set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 129400c40cfSStephan Mueller 130400c40cfSStephan Mueller for (;;) { 131400c40cfSStephan Mueller if (signal_pending(current)) 132400c40cfSStephan Mueller break; 133400c40cfSStephan Mueller prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 134400c40cfSStephan Mueller timeout = MAX_SCHEDULE_TIMEOUT; 135400c40cfSStephan Mueller if (sk_wait_event(sk, &timeout, !ctx->more)) { 136400c40cfSStephan Mueller err = 0; 137400c40cfSStephan Mueller break; 138400c40cfSStephan Mueller } 139400c40cfSStephan Mueller } 140400c40cfSStephan Mueller finish_wait(sk_sleep(sk), &wait); 141400c40cfSStephan Mueller 142400c40cfSStephan Mueller clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 143400c40cfSStephan Mueller 144400c40cfSStephan Mueller return err; 145400c40cfSStephan Mueller } 146400c40cfSStephan Mueller 147400c40cfSStephan Mueller static void aead_data_wakeup(struct sock *sk) 148400c40cfSStephan Mueller { 149400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 150400c40cfSStephan Mueller struct aead_ctx *ctx = ask->private; 151400c40cfSStephan Mueller struct socket_wq *wq; 152400c40cfSStephan Mueller 153400c40cfSStephan Mueller if (ctx->more) 154400c40cfSStephan Mueller return; 155400c40cfSStephan Mueller if (!ctx->used) 156400c40cfSStephan Mueller return; 157400c40cfSStephan Mueller 158400c40cfSStephan Mueller rcu_read_lock(); 159400c40cfSStephan Mueller wq = rcu_dereference(sk->sk_wq); 160400c40cfSStephan Mueller if (wq_has_sleeper(wq)) 161400c40cfSStephan Mueller wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 162400c40cfSStephan Mueller POLLRDNORM | 163400c40cfSStephan Mueller POLLRDBAND); 164400c40cfSStephan Mueller sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 165400c40cfSStephan Mueller rcu_read_unlock(); 166400c40cfSStephan Mueller } 167400c40cfSStephan Mueller 168eccd02f3SLinus Torvalds static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 169400c40cfSStephan Mueller { 170400c40cfSStephan Mueller struct sock *sk = sock->sk; 171400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 172400c40cfSStephan Mueller struct aead_ctx *ctx = ask->private; 173400c40cfSStephan Mueller unsigned ivsize = 174400c40cfSStephan Mueller crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req)); 175400c40cfSStephan Mueller struct aead_sg_list *sgl = &ctx->tsgl; 176400c40cfSStephan Mueller struct af_alg_control con = {}; 177400c40cfSStephan Mueller long copied = 0; 178400c40cfSStephan Mueller bool enc = 0; 179400c40cfSStephan Mueller bool init = 0; 180400c40cfSStephan Mueller int err = -EINVAL; 181400c40cfSStephan Mueller 182400c40cfSStephan Mueller if (msg->msg_controllen) { 183400c40cfSStephan Mueller err = af_alg_cmsg_send(msg, &con); 184400c40cfSStephan Mueller if (err) 185400c40cfSStephan Mueller return err; 186400c40cfSStephan Mueller 187400c40cfSStephan Mueller init = 1; 188400c40cfSStephan Mueller switch (con.op) { 189400c40cfSStephan Mueller case ALG_OP_ENCRYPT: 190400c40cfSStephan Mueller enc = 1; 191400c40cfSStephan Mueller break; 192400c40cfSStephan Mueller case ALG_OP_DECRYPT: 193400c40cfSStephan Mueller enc = 0; 194400c40cfSStephan Mueller break; 195400c40cfSStephan Mueller default: 196400c40cfSStephan Mueller return -EINVAL; 197400c40cfSStephan Mueller } 198400c40cfSStephan Mueller 199400c40cfSStephan Mueller if (con.iv && con.iv->ivlen != ivsize) 200400c40cfSStephan Mueller return -EINVAL; 201400c40cfSStephan Mueller } 202400c40cfSStephan Mueller 203400c40cfSStephan Mueller lock_sock(sk); 204400c40cfSStephan Mueller if (!ctx->more && ctx->used) 205400c40cfSStephan Mueller goto unlock; 206400c40cfSStephan Mueller 207400c40cfSStephan Mueller if (init) { 208400c40cfSStephan Mueller ctx->enc = enc; 209400c40cfSStephan Mueller if (con.iv) 210400c40cfSStephan Mueller memcpy(ctx->iv, con.iv->iv, ivsize); 211400c40cfSStephan Mueller 212400c40cfSStephan Mueller ctx->aead_assoclen = con.aead_assoclen; 213400c40cfSStephan Mueller } 214400c40cfSStephan Mueller 215400c40cfSStephan Mueller while (size) { 216400c40cfSStephan Mueller unsigned long len = size; 217400c40cfSStephan Mueller struct scatterlist *sg = NULL; 218400c40cfSStephan Mueller 219400c40cfSStephan Mueller /* use the existing memory in an allocated page */ 220400c40cfSStephan Mueller if (ctx->merge) { 221400c40cfSStephan Mueller sg = sgl->sg + sgl->cur - 1; 222400c40cfSStephan Mueller len = min_t(unsigned long, len, 223400c40cfSStephan Mueller PAGE_SIZE - sg->offset - sg->length); 224400c40cfSStephan Mueller err = memcpy_from_msg(page_address(sg_page(sg)) + 225400c40cfSStephan Mueller sg->offset + sg->length, 226400c40cfSStephan Mueller msg, len); 227400c40cfSStephan Mueller if (err) 228400c40cfSStephan Mueller goto unlock; 229400c40cfSStephan Mueller 230400c40cfSStephan Mueller sg->length += len; 231400c40cfSStephan Mueller ctx->merge = (sg->offset + sg->length) & 232400c40cfSStephan Mueller (PAGE_SIZE - 1); 233400c40cfSStephan Mueller 234400c40cfSStephan Mueller ctx->used += len; 235400c40cfSStephan Mueller copied += len; 236400c40cfSStephan Mueller size -= len; 237400c40cfSStephan Mueller continue; 238400c40cfSStephan Mueller } 239400c40cfSStephan Mueller 240400c40cfSStephan Mueller if (!aead_writable(sk)) { 241400c40cfSStephan Mueller /* user space sent too much data */ 242400c40cfSStephan Mueller aead_put_sgl(sk); 243400c40cfSStephan Mueller err = -EMSGSIZE; 244400c40cfSStephan Mueller goto unlock; 245400c40cfSStephan Mueller } 246400c40cfSStephan Mueller 247400c40cfSStephan Mueller /* allocate a new page */ 248400c40cfSStephan Mueller len = min_t(unsigned long, size, aead_sndbuf(sk)); 249400c40cfSStephan Mueller while (len) { 250400c40cfSStephan Mueller int plen = 0; 251400c40cfSStephan Mueller 252400c40cfSStephan Mueller if (sgl->cur >= ALG_MAX_PAGES) { 253400c40cfSStephan Mueller aead_put_sgl(sk); 254400c40cfSStephan Mueller err = -E2BIG; 255400c40cfSStephan Mueller goto unlock; 256400c40cfSStephan Mueller } 257400c40cfSStephan Mueller 258400c40cfSStephan Mueller sg = sgl->sg + sgl->cur; 259400c40cfSStephan Mueller plen = min_t(int, len, PAGE_SIZE); 260400c40cfSStephan Mueller 261400c40cfSStephan Mueller sg_assign_page(sg, alloc_page(GFP_KERNEL)); 262400c40cfSStephan Mueller err = -ENOMEM; 263400c40cfSStephan Mueller if (!sg_page(sg)) 264400c40cfSStephan Mueller goto unlock; 265400c40cfSStephan Mueller 266400c40cfSStephan Mueller err = memcpy_from_msg(page_address(sg_page(sg)), 267400c40cfSStephan Mueller msg, plen); 268400c40cfSStephan Mueller if (err) { 269400c40cfSStephan Mueller __free_page(sg_page(sg)); 270400c40cfSStephan Mueller sg_assign_page(sg, NULL); 271400c40cfSStephan Mueller goto unlock; 272400c40cfSStephan Mueller } 273400c40cfSStephan Mueller 274400c40cfSStephan Mueller sg->offset = 0; 275400c40cfSStephan Mueller sg->length = plen; 276400c40cfSStephan Mueller len -= plen; 277400c40cfSStephan Mueller ctx->used += plen; 278400c40cfSStephan Mueller copied += plen; 279400c40cfSStephan Mueller sgl->cur++; 280400c40cfSStephan Mueller size -= plen; 281400c40cfSStephan Mueller ctx->merge = plen & (PAGE_SIZE - 1); 282400c40cfSStephan Mueller } 283400c40cfSStephan Mueller } 284400c40cfSStephan Mueller 285400c40cfSStephan Mueller err = 0; 286400c40cfSStephan Mueller 287400c40cfSStephan Mueller ctx->more = msg->msg_flags & MSG_MORE; 288400c40cfSStephan Mueller if (!ctx->more && !aead_sufficient_data(ctx)) { 289400c40cfSStephan Mueller aead_put_sgl(sk); 290400c40cfSStephan Mueller err = -EMSGSIZE; 291400c40cfSStephan Mueller } 292400c40cfSStephan Mueller 293400c40cfSStephan Mueller unlock: 294400c40cfSStephan Mueller aead_data_wakeup(sk); 295400c40cfSStephan Mueller release_sock(sk); 296400c40cfSStephan Mueller 297400c40cfSStephan Mueller return err ?: copied; 298400c40cfSStephan Mueller } 299400c40cfSStephan Mueller 300400c40cfSStephan Mueller static ssize_t aead_sendpage(struct socket *sock, struct page *page, 301400c40cfSStephan Mueller int offset, size_t size, int flags) 302400c40cfSStephan Mueller { 303400c40cfSStephan Mueller struct sock *sk = sock->sk; 304400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 305400c40cfSStephan Mueller struct aead_ctx *ctx = ask->private; 306400c40cfSStephan Mueller struct aead_sg_list *sgl = &ctx->tsgl; 307400c40cfSStephan Mueller int err = -EINVAL; 308400c40cfSStephan Mueller 309400c40cfSStephan Mueller if (flags & MSG_SENDPAGE_NOTLAST) 310400c40cfSStephan Mueller flags |= MSG_MORE; 311400c40cfSStephan Mueller 312400c40cfSStephan Mueller if (sgl->cur >= ALG_MAX_PAGES) 313400c40cfSStephan Mueller return -E2BIG; 314400c40cfSStephan Mueller 315400c40cfSStephan Mueller lock_sock(sk); 316400c40cfSStephan Mueller if (!ctx->more && ctx->used) 317400c40cfSStephan Mueller goto unlock; 318400c40cfSStephan Mueller 319400c40cfSStephan Mueller if (!size) 320400c40cfSStephan Mueller goto done; 321400c40cfSStephan Mueller 322400c40cfSStephan Mueller if (!aead_writable(sk)) { 323400c40cfSStephan Mueller /* user space sent too much data */ 324400c40cfSStephan Mueller aead_put_sgl(sk); 325400c40cfSStephan Mueller err = -EMSGSIZE; 326400c40cfSStephan Mueller goto unlock; 327400c40cfSStephan Mueller } 328400c40cfSStephan Mueller 329400c40cfSStephan Mueller ctx->merge = 0; 330400c40cfSStephan Mueller 331400c40cfSStephan Mueller get_page(page); 332400c40cfSStephan Mueller sg_set_page(sgl->sg + sgl->cur, page, size, offset); 333400c40cfSStephan Mueller sgl->cur++; 334400c40cfSStephan Mueller ctx->used += size; 335400c40cfSStephan Mueller 336400c40cfSStephan Mueller err = 0; 337400c40cfSStephan Mueller 338400c40cfSStephan Mueller done: 339400c40cfSStephan Mueller ctx->more = flags & MSG_MORE; 340400c40cfSStephan Mueller if (!ctx->more && !aead_sufficient_data(ctx)) { 341400c40cfSStephan Mueller aead_put_sgl(sk); 342400c40cfSStephan Mueller err = -EMSGSIZE; 343400c40cfSStephan Mueller } 344400c40cfSStephan Mueller 345400c40cfSStephan Mueller unlock: 346400c40cfSStephan Mueller aead_data_wakeup(sk); 347400c40cfSStephan Mueller release_sock(sk); 348400c40cfSStephan Mueller 349400c40cfSStephan Mueller return err ?: size; 350400c40cfSStephan Mueller } 351400c40cfSStephan Mueller 352eccd02f3SLinus Torvalds static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) 353400c40cfSStephan Mueller { 354400c40cfSStephan Mueller struct sock *sk = sock->sk; 355400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 356400c40cfSStephan Mueller struct aead_ctx *ctx = ask->private; 357400c40cfSStephan Mueller unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); 358400c40cfSStephan Mueller struct aead_sg_list *sgl = &ctx->tsgl; 359400c40cfSStephan Mueller unsigned int i = 0; 360400c40cfSStephan Mueller int err = -EINVAL; 361400c40cfSStephan Mueller unsigned long used = 0; 362400c40cfSStephan Mueller size_t outlen = 0; 363400c40cfSStephan Mueller size_t usedpages = 0; 364400c40cfSStephan Mueller unsigned int cnt = 0; 365400c40cfSStephan Mueller 366400c40cfSStephan Mueller /* Limit number of IOV blocks to be accessed below */ 367400c40cfSStephan Mueller if (msg->msg_iter.nr_segs > RSGL_MAX_ENTRIES) 368400c40cfSStephan Mueller return -ENOMSG; 369400c40cfSStephan Mueller 370400c40cfSStephan Mueller lock_sock(sk); 371400c40cfSStephan Mueller 372400c40cfSStephan Mueller /* 373400c40cfSStephan Mueller * AEAD memory structure: For encryption, the tag is appended to the 374400c40cfSStephan Mueller * ciphertext which implies that the memory allocated for the ciphertext 375400c40cfSStephan Mueller * must be increased by the tag length. For decryption, the tag 376400c40cfSStephan Mueller * is expected to be concatenated to the ciphertext. The plaintext 377400c40cfSStephan Mueller * therefore has a memory size of the ciphertext minus the tag length. 378400c40cfSStephan Mueller * 379400c40cfSStephan Mueller * The memory structure for cipher operation has the following 380400c40cfSStephan Mueller * structure: 381400c40cfSStephan Mueller * AEAD encryption input: assoc data || plaintext 382400c40cfSStephan Mueller * AEAD encryption output: cipherntext || auth tag 383400c40cfSStephan Mueller * AEAD decryption input: assoc data || ciphertext || auth tag 384400c40cfSStephan Mueller * AEAD decryption output: plaintext 385400c40cfSStephan Mueller */ 386400c40cfSStephan Mueller 387400c40cfSStephan Mueller if (ctx->more) { 388400c40cfSStephan Mueller err = aead_wait_for_data(sk, flags); 389400c40cfSStephan Mueller if (err) 390400c40cfSStephan Mueller goto unlock; 391400c40cfSStephan Mueller } 392400c40cfSStephan Mueller 393400c40cfSStephan Mueller used = ctx->used; 394400c40cfSStephan Mueller 395400c40cfSStephan Mueller /* 396400c40cfSStephan Mueller * Make sure sufficient data is present -- note, the same check is 397400c40cfSStephan Mueller * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg 398400c40cfSStephan Mueller * shall provide an information to the data sender that something is 399400c40cfSStephan Mueller * wrong, but they are irrelevant to maintain the kernel integrity. 400400c40cfSStephan Mueller * We need this check here too in case user space decides to not honor 401400c40cfSStephan Mueller * the error message in sendmsg/sendpage and still call recvmsg. This 402400c40cfSStephan Mueller * check here protects the kernel integrity. 403400c40cfSStephan Mueller */ 404400c40cfSStephan Mueller if (!aead_sufficient_data(ctx)) 405400c40cfSStephan Mueller goto unlock; 406400c40cfSStephan Mueller 40719fa7752SHerbert Xu outlen = used; 40819fa7752SHerbert Xu 409400c40cfSStephan Mueller /* 410400c40cfSStephan Mueller * The cipher operation input data is reduced by the associated data 411400c40cfSStephan Mueller * length as this data is processed separately later on. 412400c40cfSStephan Mueller */ 41319fa7752SHerbert Xu used -= ctx->aead_assoclen + (ctx->enc ? as : 0); 414400c40cfSStephan Mueller 415400c40cfSStephan Mueller /* convert iovecs of output buffers into scatterlists */ 416400c40cfSStephan Mueller while (iov_iter_count(&msg->msg_iter)) { 417400c40cfSStephan Mueller size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), 418400c40cfSStephan Mueller (outlen - usedpages)); 419400c40cfSStephan Mueller 420400c40cfSStephan Mueller /* make one iovec available as scatterlist */ 421400c40cfSStephan Mueller err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter, 422400c40cfSStephan Mueller seglen); 423400c40cfSStephan Mueller if (err < 0) 424400c40cfSStephan Mueller goto unlock; 425400c40cfSStephan Mueller usedpages += err; 4267b2a18e0STadeusz Struk /* chain the new scatterlist with previous one */ 427400c40cfSStephan Mueller if (cnt) 4287b2a18e0STadeusz Struk af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]); 4297b2a18e0STadeusz Struk 430400c40cfSStephan Mueller /* we do not need more iovecs as we have sufficient memory */ 431400c40cfSStephan Mueller if (outlen <= usedpages) 432400c40cfSStephan Mueller break; 433400c40cfSStephan Mueller iov_iter_advance(&msg->msg_iter, err); 434400c40cfSStephan Mueller cnt++; 435400c40cfSStephan Mueller } 436400c40cfSStephan Mueller 437400c40cfSStephan Mueller err = -EINVAL; 438400c40cfSStephan Mueller /* ensure output buffer is sufficiently large */ 439400c40cfSStephan Mueller if (usedpages < outlen) 440400c40cfSStephan Mueller goto unlock; 441400c40cfSStephan Mueller 44219fa7752SHerbert Xu sg_mark_end(sgl->sg + sgl->cur - 1); 443400c40cfSStephan Mueller 44419fa7752SHerbert Xu aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->rsgl[0].sg, 44519fa7752SHerbert Xu used, ctx->iv); 44619fa7752SHerbert Xu aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen); 447400c40cfSStephan Mueller 448400c40cfSStephan Mueller err = af_alg_wait_for_completion(ctx->enc ? 449400c40cfSStephan Mueller crypto_aead_encrypt(&ctx->aead_req) : 450400c40cfSStephan Mueller crypto_aead_decrypt(&ctx->aead_req), 451400c40cfSStephan Mueller &ctx->completion); 452400c40cfSStephan Mueller 453400c40cfSStephan Mueller if (err) { 454400c40cfSStephan Mueller /* EBADMSG implies a valid cipher operation took place */ 455400c40cfSStephan Mueller if (err == -EBADMSG) 456400c40cfSStephan Mueller aead_put_sgl(sk); 457400c40cfSStephan Mueller goto unlock; 458400c40cfSStephan Mueller } 459400c40cfSStephan Mueller 460400c40cfSStephan Mueller aead_put_sgl(sk); 461400c40cfSStephan Mueller 462400c40cfSStephan Mueller err = 0; 463400c40cfSStephan Mueller 464400c40cfSStephan Mueller unlock: 465400c40cfSStephan Mueller for (i = 0; i < cnt; i++) 466400c40cfSStephan Mueller af_alg_free_sg(&ctx->rsgl[i]); 467400c40cfSStephan Mueller 468400c40cfSStephan Mueller aead_wmem_wakeup(sk); 469400c40cfSStephan Mueller release_sock(sk); 470400c40cfSStephan Mueller 471400c40cfSStephan Mueller return err ? err : outlen; 472400c40cfSStephan Mueller } 473400c40cfSStephan Mueller 474400c40cfSStephan Mueller static unsigned int aead_poll(struct file *file, struct socket *sock, 475400c40cfSStephan Mueller poll_table *wait) 476400c40cfSStephan Mueller { 477400c40cfSStephan Mueller struct sock *sk = sock->sk; 478400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 479400c40cfSStephan Mueller struct aead_ctx *ctx = ask->private; 480400c40cfSStephan Mueller unsigned int mask; 481400c40cfSStephan Mueller 482400c40cfSStephan Mueller sock_poll_wait(file, sk_sleep(sk), wait); 483400c40cfSStephan Mueller mask = 0; 484400c40cfSStephan Mueller 485400c40cfSStephan Mueller if (!ctx->more) 486400c40cfSStephan Mueller mask |= POLLIN | POLLRDNORM; 487400c40cfSStephan Mueller 488400c40cfSStephan Mueller if (aead_writable(sk)) 489400c40cfSStephan Mueller mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 490400c40cfSStephan Mueller 491400c40cfSStephan Mueller return mask; 492400c40cfSStephan Mueller } 493400c40cfSStephan Mueller 494400c40cfSStephan Mueller static struct proto_ops algif_aead_ops = { 495400c40cfSStephan Mueller .family = PF_ALG, 496400c40cfSStephan Mueller 497400c40cfSStephan Mueller .connect = sock_no_connect, 498400c40cfSStephan Mueller .socketpair = sock_no_socketpair, 499400c40cfSStephan Mueller .getname = sock_no_getname, 500400c40cfSStephan Mueller .ioctl = sock_no_ioctl, 501400c40cfSStephan Mueller .listen = sock_no_listen, 502400c40cfSStephan Mueller .shutdown = sock_no_shutdown, 503400c40cfSStephan Mueller .getsockopt = sock_no_getsockopt, 504400c40cfSStephan Mueller .mmap = sock_no_mmap, 505400c40cfSStephan Mueller .bind = sock_no_bind, 506400c40cfSStephan Mueller .accept = sock_no_accept, 507400c40cfSStephan Mueller .setsockopt = sock_no_setsockopt, 508400c40cfSStephan Mueller 509400c40cfSStephan Mueller .release = af_alg_release, 510400c40cfSStephan Mueller .sendmsg = aead_sendmsg, 511400c40cfSStephan Mueller .sendpage = aead_sendpage, 512400c40cfSStephan Mueller .recvmsg = aead_recvmsg, 513400c40cfSStephan Mueller .poll = aead_poll, 514400c40cfSStephan Mueller }; 515400c40cfSStephan Mueller 516400c40cfSStephan Mueller static void *aead_bind(const char *name, u32 type, u32 mask) 517400c40cfSStephan Mueller { 5185e4b8c1fSHerbert Xu return crypto_alloc_aead(name, type, mask); 519400c40cfSStephan Mueller } 520400c40cfSStephan Mueller 521400c40cfSStephan Mueller static void aead_release(void *private) 522400c40cfSStephan Mueller { 523400c40cfSStephan Mueller crypto_free_aead(private); 524400c40cfSStephan Mueller } 525400c40cfSStephan Mueller 526400c40cfSStephan Mueller static int aead_setauthsize(void *private, unsigned int authsize) 527400c40cfSStephan Mueller { 528400c40cfSStephan Mueller return crypto_aead_setauthsize(private, authsize); 529400c40cfSStephan Mueller } 530400c40cfSStephan Mueller 531400c40cfSStephan Mueller static int aead_setkey(void *private, const u8 *key, unsigned int keylen) 532400c40cfSStephan Mueller { 533400c40cfSStephan Mueller return crypto_aead_setkey(private, key, keylen); 534400c40cfSStephan Mueller } 535400c40cfSStephan Mueller 536400c40cfSStephan Mueller static void aead_sock_destruct(struct sock *sk) 537400c40cfSStephan Mueller { 538400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 539400c40cfSStephan Mueller struct aead_ctx *ctx = ask->private; 540400c40cfSStephan Mueller unsigned int ivlen = crypto_aead_ivsize( 541400c40cfSStephan Mueller crypto_aead_reqtfm(&ctx->aead_req)); 542400c40cfSStephan Mueller 543400c40cfSStephan Mueller aead_put_sgl(sk); 544400c40cfSStephan Mueller sock_kzfree_s(sk, ctx->iv, ivlen); 545400c40cfSStephan Mueller sock_kfree_s(sk, ctx, ctx->len); 546400c40cfSStephan Mueller af_alg_release_parent(sk); 547400c40cfSStephan Mueller } 548400c40cfSStephan Mueller 549400c40cfSStephan Mueller static int aead_accept_parent(void *private, struct sock *sk) 550400c40cfSStephan Mueller { 551400c40cfSStephan Mueller struct aead_ctx *ctx; 552400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 553400c40cfSStephan Mueller unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private); 554400c40cfSStephan Mueller unsigned int ivlen = crypto_aead_ivsize(private); 555400c40cfSStephan Mueller 556400c40cfSStephan Mueller ctx = sock_kmalloc(sk, len, GFP_KERNEL); 557400c40cfSStephan Mueller if (!ctx) 558400c40cfSStephan Mueller return -ENOMEM; 559400c40cfSStephan Mueller memset(ctx, 0, len); 560400c40cfSStephan Mueller 561400c40cfSStephan Mueller ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL); 562400c40cfSStephan Mueller if (!ctx->iv) { 563400c40cfSStephan Mueller sock_kfree_s(sk, ctx, len); 564400c40cfSStephan Mueller return -ENOMEM; 565400c40cfSStephan Mueller } 566400c40cfSStephan Mueller memset(ctx->iv, 0, ivlen); 567400c40cfSStephan Mueller 568400c40cfSStephan Mueller ctx->len = len; 569400c40cfSStephan Mueller ctx->used = 0; 570400c40cfSStephan Mueller ctx->more = 0; 571400c40cfSStephan Mueller ctx->merge = 0; 572400c40cfSStephan Mueller ctx->enc = 0; 573400c40cfSStephan Mueller ctx->tsgl.cur = 0; 574400c40cfSStephan Mueller ctx->aead_assoclen = 0; 575400c40cfSStephan Mueller af_alg_init_completion(&ctx->completion); 576400c40cfSStephan Mueller sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES); 577400c40cfSStephan Mueller 578400c40cfSStephan Mueller ask->private = ctx; 579400c40cfSStephan Mueller 580400c40cfSStephan Mueller aead_request_set_tfm(&ctx->aead_req, private); 581400c40cfSStephan Mueller aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 582400c40cfSStephan Mueller af_alg_complete, &ctx->completion); 583400c40cfSStephan Mueller 584400c40cfSStephan Mueller sk->sk_destruct = aead_sock_destruct; 585400c40cfSStephan Mueller 586400c40cfSStephan Mueller return 0; 587400c40cfSStephan Mueller } 588400c40cfSStephan Mueller 589400c40cfSStephan Mueller static const struct af_alg_type algif_type_aead = { 590400c40cfSStephan Mueller .bind = aead_bind, 591400c40cfSStephan Mueller .release = aead_release, 592400c40cfSStephan Mueller .setkey = aead_setkey, 593400c40cfSStephan Mueller .setauthsize = aead_setauthsize, 594400c40cfSStephan Mueller .accept = aead_accept_parent, 595400c40cfSStephan Mueller .ops = &algif_aead_ops, 596400c40cfSStephan Mueller .name = "aead", 597400c40cfSStephan Mueller .owner = THIS_MODULE 598400c40cfSStephan Mueller }; 599400c40cfSStephan Mueller 600400c40cfSStephan Mueller static int __init algif_aead_init(void) 601400c40cfSStephan Mueller { 602400c40cfSStephan Mueller return af_alg_register_type(&algif_type_aead); 603400c40cfSStephan Mueller } 604400c40cfSStephan Mueller 605400c40cfSStephan Mueller static void __exit algif_aead_exit(void) 606400c40cfSStephan Mueller { 607400c40cfSStephan Mueller int err = af_alg_unregister_type(&algif_type_aead); 608400c40cfSStephan Mueller BUG_ON(err); 609400c40cfSStephan Mueller } 610400c40cfSStephan Mueller 611400c40cfSStephan Mueller module_init(algif_aead_init); 612400c40cfSStephan Mueller module_exit(algif_aead_exit); 613400c40cfSStephan Mueller MODULE_LICENSE("GPL"); 614400c40cfSStephan Mueller MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); 615400c40cfSStephan Mueller MODULE_DESCRIPTION("AEAD kernel crypto API user space interface"); 616