1*2874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2400c40cfSStephan Mueller /* 3400c40cfSStephan Mueller * algif_aead: User-space interface for AEAD algorithms 4400c40cfSStephan Mueller * 5400c40cfSStephan Mueller * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de> 6400c40cfSStephan Mueller * 7400c40cfSStephan Mueller * This file provides the user-space API for AEAD ciphers. 8400c40cfSStephan Mueller * 9d887c52dSStephan Mueller * The following concept of the memory management is used: 10d887c52dSStephan Mueller * 11d887c52dSStephan Mueller * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is 12d887c52dSStephan Mueller * filled by user space with the data submitted via sendpage/sendmsg. Filling 13d887c52dSStephan Mueller * up the TX SGL does not cause a crypto operation -- the data will only be 14d887c52dSStephan Mueller * tracked by the kernel. Upon receipt of one recvmsg call, the caller must 15d887c52dSStephan Mueller * provide a buffer which is tracked with the RX SGL. 16d887c52dSStephan Mueller * 17d887c52dSStephan Mueller * During the processing of the recvmsg operation, the cipher request is 18d887c52dSStephan Mueller * allocated and prepared. As part of the recvmsg operation, the processed 19d887c52dSStephan Mueller * TX buffers are extracted from the TX SGL into a separate SGL. 20d887c52dSStephan Mueller * 21d887c52dSStephan Mueller * After the completion of the crypto operation, the RX SGL and the cipher 22d887c52dSStephan Mueller * request is released. The extracted TX SGL parts are released together with 23d887c52dSStephan Mueller * the RX SGL release. 24400c40cfSStephan Mueller */ 25400c40cfSStephan Mueller 2683094e5eSTadeusz Struk #include <crypto/internal/aead.h> 27400c40cfSStephan Mueller #include <crypto/scatterwalk.h> 28400c40cfSStephan Mueller #include <crypto/if_alg.h> 2972548b09SStephan Mueller #include <crypto/skcipher.h> 3072548b09SStephan Mueller #include <crypto/null.h> 31400c40cfSStephan Mueller #include <linux/init.h> 32400c40cfSStephan Mueller #include <linux/list.h> 33400c40cfSStephan Mueller #include <linux/kernel.h> 34400c40cfSStephan Mueller #include <linux/mm.h> 35400c40cfSStephan Mueller #include <linux/module.h> 36400c40cfSStephan Mueller #include <linux/net.h> 37400c40cfSStephan Mueller #include <net/sock.h> 38400c40cfSStephan Mueller 392a2a251fSStephan Mueller struct aead_tfm { 402a2a251fSStephan Mueller struct crypto_aead *aead; 418d605398SKees Cook struct crypto_sync_skcipher *null_tfm; 422a2a251fSStephan Mueller }; 432a2a251fSStephan Mueller 44d887c52dSStephan Mueller static inline bool aead_sufficient_data(struct sock *sk) 45d887c52dSStephan Mueller { 46d887c52dSStephan Mueller struct alg_sock *ask = alg_sk(sk); 47d887c52dSStephan Mueller struct sock *psk = ask->parent; 48d887c52dSStephan Mueller struct alg_sock *pask = alg_sk(psk); 492d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 50d887c52dSStephan Mueller struct aead_tfm *aeadc = pask->private; 51d887c52dSStephan Mueller struct crypto_aead *tfm = aeadc->aead; 52d887c52dSStephan Mueller unsigned int as = crypto_aead_authsize(tfm); 53400c40cfSStephan Mueller 540c1e16cdSStephan Mueller /* 550c1e16cdSStephan Mueller * The minimum amount of memory needed for an AEAD cipher is 560c1e16cdSStephan Mueller * the AAD and in case of decryption the tag. 570c1e16cdSStephan Mueller */ 580c1e16cdSStephan Mueller return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); 59400c40cfSStephan Mueller } 60400c40cfSStephan Mueller 61eccd02f3SLinus Torvalds static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 62400c40cfSStephan Mueller { 63400c40cfSStephan Mueller struct sock *sk = sock->sk; 64400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 65d887c52dSStephan Mueller struct sock *psk = ask->parent; 66d887c52dSStephan Mueller struct alg_sock *pask = alg_sk(psk); 67d887c52dSStephan Mueller struct aead_tfm *aeadc = pask->private; 68d887c52dSStephan Mueller struct crypto_aead *tfm = aeadc->aead; 69d887c52dSStephan Mueller unsigned int ivsize = crypto_aead_ivsize(tfm); 70400c40cfSStephan Mueller 712d97591eSStephan Mueller return af_alg_sendmsg(sock, msg, size, ivsize); 7283094e5eSTadeusz Struk } 7383094e5eSTadeusz Struk 748d605398SKees Cook static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm, 7572548b09SStephan Mueller struct scatterlist *src, 7672548b09SStephan Mueller struct scatterlist *dst, unsigned int len) 7772548b09SStephan Mueller { 788d605398SKees Cook SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); 7972548b09SStephan Mueller 808d605398SKees Cook skcipher_request_set_sync_tfm(skreq, null_tfm); 8172548b09SStephan Mueller skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG, 8272548b09SStephan Mueller NULL, NULL); 8372548b09SStephan Mueller skcipher_request_set_crypt(skreq, src, dst, len, NULL); 8472548b09SStephan Mueller 8572548b09SStephan Mueller return crypto_skcipher_encrypt(skreq); 8672548b09SStephan Mueller } 8772548b09SStephan Mueller 88d887c52dSStephan Mueller static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, 89d887c52dSStephan Mueller size_t ignored, int flags) 90400c40cfSStephan Mueller { 91400c40cfSStephan Mueller struct sock *sk = sock->sk; 92400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 93d887c52dSStephan Mueller struct sock *psk = ask->parent; 94d887c52dSStephan Mueller struct alg_sock *pask = alg_sk(psk); 952d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 96d887c52dSStephan Mueller struct aead_tfm *aeadc = pask->private; 97d887c52dSStephan Mueller struct crypto_aead *tfm = aeadc->aead; 988d605398SKees Cook struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm; 998e1fa89aSStephan Mueller unsigned int i, as = crypto_aead_authsize(tfm); 1002d97591eSStephan Mueller struct af_alg_async_req *areq; 1018e1fa89aSStephan Mueller struct af_alg_tsgl *tsgl, *tmp; 1028e1fa89aSStephan Mueller struct scatterlist *rsgl_src, *tsgl_src = NULL; 103d887c52dSStephan Mueller int err = 0; 104d887c52dSStephan Mueller size_t used = 0; /* [in] TX bufs to be en/decrypted */ 105d887c52dSStephan Mueller size_t outlen = 0; /* [out] RX bufs produced by kernel */ 106d887c52dSStephan Mueller size_t usedpages = 0; /* [in] RX bufs to be used from user */ 107d887c52dSStephan Mueller size_t processed = 0; /* [in] TX bufs to be consumed */ 108400c40cfSStephan Mueller 10911edb555SStephan Mueller if (!ctx->used) { 11011edb555SStephan Mueller err = af_alg_wait_for_data(sk, flags); 11111edb555SStephan Mueller if (err) 11211edb555SStephan Mueller return err; 11311edb555SStephan Mueller } 11411edb555SStephan Mueller 115400c40cfSStephan Mueller /* 116d887c52dSStephan Mueller * Data length provided by caller via sendmsg/sendpage that has not 117d887c52dSStephan Mueller * yet been processed. 118400c40cfSStephan Mueller */ 119400c40cfSStephan Mueller used = ctx->used; 120400c40cfSStephan Mueller 121400c40cfSStephan Mueller /* 122400c40cfSStephan Mueller * Make sure sufficient data is present -- note, the same check is 123400c40cfSStephan Mueller * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg 124400c40cfSStephan Mueller * shall provide an information to the data sender that something is 125400c40cfSStephan Mueller * wrong, but they are irrelevant to maintain the kernel integrity. 126400c40cfSStephan Mueller * We need this check here too in case user space decides to not honor 127400c40cfSStephan Mueller * the error message in sendmsg/sendpage and still call recvmsg. This 128400c40cfSStephan Mueller * check here protects the kernel integrity. 129400c40cfSStephan Mueller */ 130d887c52dSStephan Mueller if (!aead_sufficient_data(sk)) 131d887c52dSStephan Mueller return -EINVAL; 132400c40cfSStephan Mueller 1330c1e16cdSStephan Mueller /* 1340c1e16cdSStephan Mueller * Calculate the minimum output buffer size holding the result of the 1350c1e16cdSStephan Mueller * cipher operation. When encrypting data, the receiving buffer is 1360c1e16cdSStephan Mueller * larger by the tag length compared to the input buffer as the 1370c1e16cdSStephan Mueller * encryption operation generates the tag. For decryption, the input 1380c1e16cdSStephan Mueller * buffer provides the tag which is consumed resulting in only the 1390c1e16cdSStephan Mueller * plaintext without a buffer for the tag returned to the caller. 1400c1e16cdSStephan Mueller */ 1410c1e16cdSStephan Mueller if (ctx->enc) 1420c1e16cdSStephan Mueller outlen = used + as; 1430c1e16cdSStephan Mueller else 1440c1e16cdSStephan Mueller outlen = used - as; 14519fa7752SHerbert Xu 146400c40cfSStephan Mueller /* 147400c40cfSStephan Mueller * The cipher operation input data is reduced by the associated data 148400c40cfSStephan Mueller * length as this data is processed separately later on. 149400c40cfSStephan Mueller */ 1500c1e16cdSStephan Mueller used -= ctx->aead_assoclen; 151400c40cfSStephan Mueller 152d887c52dSStephan Mueller /* Allocate cipher request for current operation. */ 1532d97591eSStephan Mueller areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + 1542d97591eSStephan Mueller crypto_aead_reqsize(tfm)); 1552d97591eSStephan Mueller if (IS_ERR(areq)) 1562d97591eSStephan Mueller return PTR_ERR(areq); 157400c40cfSStephan Mueller 158d887c52dSStephan Mueller /* convert iovecs of output buffers into RX SGL */ 1592d97591eSStephan Mueller err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages); 160d887c52dSStephan Mueller if (err) 161d887c52dSStephan Mueller goto free; 162400c40cfSStephan Mueller 163d887c52dSStephan Mueller /* 164d887c52dSStephan Mueller * Ensure output buffer is sufficiently large. If the caller provides 165d887c52dSStephan Mueller * less buffer space, only use the relative required input size. This 166d887c52dSStephan Mueller * allows AIO operation where the caller sent all data to be processed 167d887c52dSStephan Mueller * and the AIO operation performs the operation on the different chunks 168d887c52dSStephan Mueller * of the input data. 169d887c52dSStephan Mueller */ 1700c1e16cdSStephan Mueller if (usedpages < outlen) { 171d887c52dSStephan Mueller size_t less = outlen - usedpages; 172d887c52dSStephan Mueller 173d887c52dSStephan Mueller if (used < less) { 1740c1e16cdSStephan Mueller err = -EINVAL; 175d887c52dSStephan Mueller goto free; 176d887c52dSStephan Mueller } 177d887c52dSStephan Mueller used -= less; 178d887c52dSStephan Mueller outlen -= less; 1790c1e16cdSStephan Mueller } 180400c40cfSStephan Mueller 181d887c52dSStephan Mueller processed = used + ctx->aead_assoclen; 1828e1fa89aSStephan Mueller list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) { 1838e1fa89aSStephan Mueller for (i = 0; i < tsgl->cur; i++) { 1848e1fa89aSStephan Mueller struct scatterlist *process_sg = tsgl->sg + i; 1858e1fa89aSStephan Mueller 1868e1fa89aSStephan Mueller if (!(process_sg->length) || !sg_page(process_sg)) 1878e1fa89aSStephan Mueller continue; 1888e1fa89aSStephan Mueller tsgl_src = process_sg; 1898e1fa89aSStephan Mueller break; 1908e1fa89aSStephan Mueller } 1918e1fa89aSStephan Mueller if (tsgl_src) 1928e1fa89aSStephan Mueller break; 1938e1fa89aSStephan Mueller } 1948e1fa89aSStephan Mueller if (processed && !tsgl_src) { 1958e1fa89aSStephan Mueller err = -EFAULT; 1968e1fa89aSStephan Mueller goto free; 1978e1fa89aSStephan Mueller } 19872548b09SStephan Mueller 19972548b09SStephan Mueller /* 20072548b09SStephan Mueller * Copy of AAD from source to destination 20172548b09SStephan Mueller * 20272548b09SStephan Mueller * The AAD is copied to the destination buffer without change. Even 20372548b09SStephan Mueller * when user space uses an in-place cipher operation, the kernel 20472548b09SStephan Mueller * will copy the data as it does not see whether such in-place operation 20572548b09SStephan Mueller * is initiated. 20672548b09SStephan Mueller * 20772548b09SStephan Mueller * To ensure efficiency, the following implementation ensure that the 20872548b09SStephan Mueller * ciphers are invoked to perform a crypto operation in-place. This 20972548b09SStephan Mueller * is achieved by memory management specified as follows. 21072548b09SStephan Mueller */ 21172548b09SStephan Mueller 21272548b09SStephan Mueller /* Use the RX SGL as source (and destination) for crypto op. */ 2138e1fa89aSStephan Mueller rsgl_src = areq->first_rsgl.sgl.sg; 21472548b09SStephan Mueller 21572548b09SStephan Mueller if (ctx->enc) { 21672548b09SStephan Mueller /* 21772548b09SStephan Mueller * Encryption operation - The in-place cipher operation is 21872548b09SStephan Mueller * achieved by the following operation: 21972548b09SStephan Mueller * 22075d11e75SStephan Mueller * TX SGL: AAD || PT 22172548b09SStephan Mueller * | | 22272548b09SStephan Mueller * | copy | 22372548b09SStephan Mueller * v v 22475d11e75SStephan Mueller * RX SGL: AAD || PT || Tag 22572548b09SStephan Mueller */ 2268e1fa89aSStephan Mueller err = crypto_aead_copy_sgl(null_tfm, tsgl_src, 22772548b09SStephan Mueller areq->first_rsgl.sgl.sg, processed); 22872548b09SStephan Mueller if (err) 22972548b09SStephan Mueller goto free; 2302d97591eSStephan Mueller af_alg_pull_tsgl(sk, processed, NULL, 0); 23172548b09SStephan Mueller } else { 23272548b09SStephan Mueller /* 23372548b09SStephan Mueller * Decryption operation - To achieve an in-place cipher 23472548b09SStephan Mueller * operation, the following SGL structure is used: 23572548b09SStephan Mueller * 23672548b09SStephan Mueller * TX SGL: AAD || CT || Tag 23772548b09SStephan Mueller * | | ^ 23872548b09SStephan Mueller * | copy | | Create SGL link. 23972548b09SStephan Mueller * v v | 24072548b09SStephan Mueller * RX SGL: AAD || CT ----+ 24172548b09SStephan Mueller */ 24272548b09SStephan Mueller 24372548b09SStephan Mueller /* Copy AAD || CT to RX SGL buffer for in-place operation. */ 2448e1fa89aSStephan Mueller err = crypto_aead_copy_sgl(null_tfm, tsgl_src, 24572548b09SStephan Mueller areq->first_rsgl.sgl.sg, outlen); 24672548b09SStephan Mueller if (err) 24772548b09SStephan Mueller goto free; 24872548b09SStephan Mueller 24972548b09SStephan Mueller /* Create TX SGL for tag and chain it to RX SGL. */ 2502d97591eSStephan Mueller areq->tsgl_entries = af_alg_count_tsgl(sk, processed, 25172548b09SStephan Mueller processed - as); 252d887c52dSStephan Mueller if (!areq->tsgl_entries) 253d887c52dSStephan Mueller areq->tsgl_entries = 1; 25476e43e37SKees Cook areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl), 25576e43e37SKees Cook areq->tsgl_entries), 256d887c52dSStephan Mueller GFP_KERNEL); 257d887c52dSStephan Mueller if (!areq->tsgl) { 258d887c52dSStephan Mueller err = -ENOMEM; 259d887c52dSStephan Mueller goto free; 260d887c52dSStephan Mueller } 261d887c52dSStephan Mueller sg_init_table(areq->tsgl, areq->tsgl_entries); 26272548b09SStephan Mueller 26372548b09SStephan Mueller /* Release TX SGL, except for tag data and reassign tag data. */ 2642d97591eSStephan Mueller af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as); 26572548b09SStephan Mueller 26672548b09SStephan Mueller /* chain the areq TX SGL holding the tag with RX SGL */ 2672d97591eSStephan Mueller if (usedpages) { 26872548b09SStephan Mueller /* RX SGL present */ 2692d97591eSStephan Mueller struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl; 27072548b09SStephan Mueller 27172548b09SStephan Mueller sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); 27272548b09SStephan Mueller sg_chain(sgl_prev->sg, sgl_prev->npages + 1, 27372548b09SStephan Mueller areq->tsgl); 27472548b09SStephan Mueller } else 27572548b09SStephan Mueller /* no RX SGL present (e.g. authentication only) */ 2768e1fa89aSStephan Mueller rsgl_src = areq->tsgl; 27772548b09SStephan Mueller } 278400c40cfSStephan Mueller 279d887c52dSStephan Mueller /* Initialize the crypto operation */ 2808e1fa89aSStephan Mueller aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src, 281d887c52dSStephan Mueller areq->first_rsgl.sgl.sg, used, ctx->iv); 2822d97591eSStephan Mueller aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); 2832d97591eSStephan Mueller aead_request_set_tfm(&areq->cra_u.aead_req, tfm); 284d887c52dSStephan Mueller 285d887c52dSStephan Mueller if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { 286d887c52dSStephan Mueller /* AIO operation */ 2877d2c3f54SStephan Mueller sock_hold(sk); 288d887c52dSStephan Mueller areq->iocb = msg->msg_iocb; 289d53c5135SStephan Mueller 290d53c5135SStephan Mueller /* Remember output size that will be generated. */ 291d53c5135SStephan Mueller areq->outlen = outlen; 292d53c5135SStephan Mueller 2932d97591eSStephan Mueller aead_request_set_callback(&areq->cra_u.aead_req, 294d887c52dSStephan Mueller CRYPTO_TFM_REQ_MAY_BACKLOG, 2952d97591eSStephan Mueller af_alg_async_cb, areq); 2962d97591eSStephan Mueller err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : 2972d97591eSStephan Mueller crypto_aead_decrypt(&areq->cra_u.aead_req); 2987d2c3f54SStephan Mueller 2997d2c3f54SStephan Mueller /* AIO operation in progress */ 300d53c5135SStephan Mueller if (err == -EINPROGRESS || err == -EBUSY) 3017d2c3f54SStephan Mueller return -EIOCBQUEUED; 3027d2c3f54SStephan Mueller 3037d2c3f54SStephan Mueller sock_put(sk); 304d887c52dSStephan Mueller } else { 305d887c52dSStephan Mueller /* Synchronous operation */ 3062d97591eSStephan Mueller aead_request_set_callback(&areq->cra_u.aead_req, 307d887c52dSStephan Mueller CRYPTO_TFM_REQ_MAY_BACKLOG, 3082c3f8b16SGilad Ben-Yossef crypto_req_done, &ctx->wait); 3092c3f8b16SGilad Ben-Yossef err = crypto_wait_req(ctx->enc ? 3102d97591eSStephan Mueller crypto_aead_encrypt(&areq->cra_u.aead_req) : 3112d97591eSStephan Mueller crypto_aead_decrypt(&areq->cra_u.aead_req), 3122c3f8b16SGilad Ben-Yossef &ctx->wait); 313400c40cfSStephan Mueller } 314400c40cfSStephan Mueller 315d887c52dSStephan Mueller 316d887c52dSStephan Mueller free: 3177d2c3f54SStephan Mueller af_alg_free_resources(areq); 318400c40cfSStephan Mueller 319400c40cfSStephan Mueller return err ? err : outlen; 320400c40cfSStephan Mueller } 321400c40cfSStephan Mueller 322d887c52dSStephan Mueller static int aead_recvmsg(struct socket *sock, struct msghdr *msg, 323d887c52dSStephan Mueller size_t ignored, int flags) 32483094e5eSTadeusz Struk { 325d887c52dSStephan Mueller struct sock *sk = sock->sk; 326d887c52dSStephan Mueller int ret = 0; 327d887c52dSStephan Mueller 328d887c52dSStephan Mueller lock_sock(sk); 329d887c52dSStephan Mueller while (msg_data_left(msg)) { 330d887c52dSStephan Mueller int err = _aead_recvmsg(sock, msg, ignored, flags); 331d887c52dSStephan Mueller 332d887c52dSStephan Mueller /* 333d887c52dSStephan Mueller * This error covers -EIOCBQUEUED which implies that we can 334d887c52dSStephan Mueller * only handle one AIO request. If the caller wants to have 335d887c52dSStephan Mueller * multiple AIO requests in parallel, he must make multiple 336d887c52dSStephan Mueller * separate AIO calls. 3375703c826SStephan Mueller * 3385703c826SStephan Mueller * Also return the error if no data has been processed so far. 339d887c52dSStephan Mueller */ 340d887c52dSStephan Mueller if (err <= 0) { 3415703c826SStephan Mueller if (err == -EIOCBQUEUED || err == -EBADMSG || !ret) 342d887c52dSStephan Mueller ret = err; 343d887c52dSStephan Mueller goto out; 344d887c52dSStephan Mueller } 345d887c52dSStephan Mueller 346d887c52dSStephan Mueller ret += err; 347d887c52dSStephan Mueller } 348d887c52dSStephan Mueller 349d887c52dSStephan Mueller out: 3502d97591eSStephan Mueller af_alg_wmem_wakeup(sk); 351d887c52dSStephan Mueller release_sock(sk); 352d887c52dSStephan Mueller return ret; 35383094e5eSTadeusz Struk } 35483094e5eSTadeusz Struk 355400c40cfSStephan Mueller static struct proto_ops algif_aead_ops = { 356400c40cfSStephan Mueller .family = PF_ALG, 357400c40cfSStephan Mueller 358400c40cfSStephan Mueller .connect = sock_no_connect, 359400c40cfSStephan Mueller .socketpair = sock_no_socketpair, 360400c40cfSStephan Mueller .getname = sock_no_getname, 361400c40cfSStephan Mueller .ioctl = sock_no_ioctl, 362400c40cfSStephan Mueller .listen = sock_no_listen, 363400c40cfSStephan Mueller .shutdown = sock_no_shutdown, 364400c40cfSStephan Mueller .getsockopt = sock_no_getsockopt, 365400c40cfSStephan Mueller .mmap = sock_no_mmap, 366400c40cfSStephan Mueller .bind = sock_no_bind, 367400c40cfSStephan Mueller .accept = sock_no_accept, 368400c40cfSStephan Mueller .setsockopt = sock_no_setsockopt, 369400c40cfSStephan Mueller 370400c40cfSStephan Mueller .release = af_alg_release, 371400c40cfSStephan Mueller .sendmsg = aead_sendmsg, 3722d97591eSStephan Mueller .sendpage = af_alg_sendpage, 373400c40cfSStephan Mueller .recvmsg = aead_recvmsg, 374a11e1d43SLinus Torvalds .poll = af_alg_poll, 375400c40cfSStephan Mueller }; 376400c40cfSStephan Mueller 3772a2a251fSStephan Mueller static int aead_check_key(struct socket *sock) 3782a2a251fSStephan Mueller { 3792a2a251fSStephan Mueller int err = 0; 3802a2a251fSStephan Mueller struct sock *psk; 3812a2a251fSStephan Mueller struct alg_sock *pask; 3822a2a251fSStephan Mueller struct aead_tfm *tfm; 3832a2a251fSStephan Mueller struct sock *sk = sock->sk; 3842a2a251fSStephan Mueller struct alg_sock *ask = alg_sk(sk); 3852a2a251fSStephan Mueller 3862a2a251fSStephan Mueller lock_sock(sk); 3872a2a251fSStephan Mueller if (ask->refcnt) 3882a2a251fSStephan Mueller goto unlock_child; 3892a2a251fSStephan Mueller 3902a2a251fSStephan Mueller psk = ask->parent; 3912a2a251fSStephan Mueller pask = alg_sk(ask->parent); 3922a2a251fSStephan Mueller tfm = pask->private; 3932a2a251fSStephan Mueller 3942a2a251fSStephan Mueller err = -ENOKEY; 3952a2a251fSStephan Mueller lock_sock_nested(psk, SINGLE_DEPTH_NESTING); 396dc26c17fSEric Biggers if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) 3972a2a251fSStephan Mueller goto unlock; 3982a2a251fSStephan Mueller 3992a2a251fSStephan Mueller if (!pask->refcnt++) 4002a2a251fSStephan Mueller sock_hold(psk); 4012a2a251fSStephan Mueller 4022a2a251fSStephan Mueller ask->refcnt = 1; 4032a2a251fSStephan Mueller sock_put(psk); 4042a2a251fSStephan Mueller 4052a2a251fSStephan Mueller err = 0; 4062a2a251fSStephan Mueller 4072a2a251fSStephan Mueller unlock: 4082a2a251fSStephan Mueller release_sock(psk); 4092a2a251fSStephan Mueller unlock_child: 4102a2a251fSStephan Mueller release_sock(sk); 4112a2a251fSStephan Mueller 4122a2a251fSStephan Mueller return err; 4132a2a251fSStephan Mueller } 4142a2a251fSStephan Mueller 4152a2a251fSStephan Mueller static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg, 4162a2a251fSStephan Mueller size_t size) 4172a2a251fSStephan Mueller { 4182a2a251fSStephan Mueller int err; 4192a2a251fSStephan Mueller 4202a2a251fSStephan Mueller err = aead_check_key(sock); 4212a2a251fSStephan Mueller if (err) 4222a2a251fSStephan Mueller return err; 4232a2a251fSStephan Mueller 4242a2a251fSStephan Mueller return aead_sendmsg(sock, msg, size); 4252a2a251fSStephan Mueller } 4262a2a251fSStephan Mueller 4272a2a251fSStephan Mueller static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page, 4282a2a251fSStephan Mueller int offset, size_t size, int flags) 4292a2a251fSStephan Mueller { 4302a2a251fSStephan Mueller int err; 4312a2a251fSStephan Mueller 4322a2a251fSStephan Mueller err = aead_check_key(sock); 4332a2a251fSStephan Mueller if (err) 4342a2a251fSStephan Mueller return err; 4352a2a251fSStephan Mueller 4362d97591eSStephan Mueller return af_alg_sendpage(sock, page, offset, size, flags); 4372a2a251fSStephan Mueller } 4382a2a251fSStephan Mueller 4392a2a251fSStephan Mueller static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg, 4402a2a251fSStephan Mueller size_t ignored, int flags) 4412a2a251fSStephan Mueller { 4422a2a251fSStephan Mueller int err; 4432a2a251fSStephan Mueller 4442a2a251fSStephan Mueller err = aead_check_key(sock); 4452a2a251fSStephan Mueller if (err) 4462a2a251fSStephan Mueller return err; 4472a2a251fSStephan Mueller 4482a2a251fSStephan Mueller return aead_recvmsg(sock, msg, ignored, flags); 4492a2a251fSStephan Mueller } 4502a2a251fSStephan Mueller 4512a2a251fSStephan Mueller static struct proto_ops algif_aead_ops_nokey = { 4522a2a251fSStephan Mueller .family = PF_ALG, 4532a2a251fSStephan Mueller 4542a2a251fSStephan Mueller .connect = sock_no_connect, 4552a2a251fSStephan Mueller .socketpair = sock_no_socketpair, 4562a2a251fSStephan Mueller .getname = sock_no_getname, 4572a2a251fSStephan Mueller .ioctl = sock_no_ioctl, 4582a2a251fSStephan Mueller .listen = sock_no_listen, 4592a2a251fSStephan Mueller .shutdown = sock_no_shutdown, 4602a2a251fSStephan Mueller .getsockopt = sock_no_getsockopt, 4612a2a251fSStephan Mueller .mmap = sock_no_mmap, 4622a2a251fSStephan Mueller .bind = sock_no_bind, 4632a2a251fSStephan Mueller .accept = sock_no_accept, 4642a2a251fSStephan Mueller .setsockopt = sock_no_setsockopt, 4652a2a251fSStephan Mueller 4662a2a251fSStephan Mueller .release = af_alg_release, 4672a2a251fSStephan Mueller .sendmsg = aead_sendmsg_nokey, 4682a2a251fSStephan Mueller .sendpage = aead_sendpage_nokey, 4692a2a251fSStephan Mueller .recvmsg = aead_recvmsg_nokey, 470a11e1d43SLinus Torvalds .poll = af_alg_poll, 4712a2a251fSStephan Mueller }; 4722a2a251fSStephan Mueller 473400c40cfSStephan Mueller static void *aead_bind(const char *name, u32 type, u32 mask) 474400c40cfSStephan Mueller { 4752a2a251fSStephan Mueller struct aead_tfm *tfm; 4762a2a251fSStephan Mueller struct crypto_aead *aead; 4778d605398SKees Cook struct crypto_sync_skcipher *null_tfm; 4782a2a251fSStephan Mueller 4792a2a251fSStephan Mueller tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); 4802a2a251fSStephan Mueller if (!tfm) 4812a2a251fSStephan Mueller return ERR_PTR(-ENOMEM); 4822a2a251fSStephan Mueller 4832a2a251fSStephan Mueller aead = crypto_alloc_aead(name, type, mask); 4842a2a251fSStephan Mueller if (IS_ERR(aead)) { 4852a2a251fSStephan Mueller kfree(tfm); 4862a2a251fSStephan Mueller return ERR_CAST(aead); 4872a2a251fSStephan Mueller } 4882a2a251fSStephan Mueller 4893a2d4fb5SEric Biggers null_tfm = crypto_get_default_null_skcipher(); 49072548b09SStephan Mueller if (IS_ERR(null_tfm)) { 49172548b09SStephan Mueller crypto_free_aead(aead); 49272548b09SStephan Mueller kfree(tfm); 49372548b09SStephan Mueller return ERR_CAST(null_tfm); 49472548b09SStephan Mueller } 49572548b09SStephan Mueller 4962a2a251fSStephan Mueller tfm->aead = aead; 49772548b09SStephan Mueller tfm->null_tfm = null_tfm; 4982a2a251fSStephan Mueller 4992a2a251fSStephan Mueller return tfm; 500400c40cfSStephan Mueller } 501400c40cfSStephan Mueller 502400c40cfSStephan Mueller static void aead_release(void *private) 503400c40cfSStephan Mueller { 5042a2a251fSStephan Mueller struct aead_tfm *tfm = private; 5052a2a251fSStephan Mueller 5062a2a251fSStephan Mueller crypto_free_aead(tfm->aead); 5073a2d4fb5SEric Biggers crypto_put_default_null_skcipher(); 5082a2a251fSStephan Mueller kfree(tfm); 509400c40cfSStephan Mueller } 510400c40cfSStephan Mueller 511400c40cfSStephan Mueller static int aead_setauthsize(void *private, unsigned int authsize) 512400c40cfSStephan Mueller { 5132a2a251fSStephan Mueller struct aead_tfm *tfm = private; 5142a2a251fSStephan Mueller 5152a2a251fSStephan Mueller return crypto_aead_setauthsize(tfm->aead, authsize); 516400c40cfSStephan Mueller } 517400c40cfSStephan Mueller 518400c40cfSStephan Mueller static int aead_setkey(void *private, const u8 *key, unsigned int keylen) 519400c40cfSStephan Mueller { 5202a2a251fSStephan Mueller struct aead_tfm *tfm = private; 5212a2a251fSStephan Mueller 522dc26c17fSEric Biggers return crypto_aead_setkey(tfm->aead, key, keylen); 523400c40cfSStephan Mueller } 524400c40cfSStephan Mueller 525400c40cfSStephan Mueller static void aead_sock_destruct(struct sock *sk) 526400c40cfSStephan Mueller { 527400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 5282d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 529d887c52dSStephan Mueller struct sock *psk = ask->parent; 530d887c52dSStephan Mueller struct alg_sock *pask = alg_sk(psk); 531d887c52dSStephan Mueller struct aead_tfm *aeadc = pask->private; 532d887c52dSStephan Mueller struct crypto_aead *tfm = aeadc->aead; 533d887c52dSStephan Mueller unsigned int ivlen = crypto_aead_ivsize(tfm); 534400c40cfSStephan Mueller 5352d97591eSStephan Mueller af_alg_pull_tsgl(sk, ctx->used, NULL, 0); 536400c40cfSStephan Mueller sock_kzfree_s(sk, ctx->iv, ivlen); 537400c40cfSStephan Mueller sock_kfree_s(sk, ctx, ctx->len); 538400c40cfSStephan Mueller af_alg_release_parent(sk); 539400c40cfSStephan Mueller } 540400c40cfSStephan Mueller 5412a2a251fSStephan Mueller static int aead_accept_parent_nokey(void *private, struct sock *sk) 542400c40cfSStephan Mueller { 5432d97591eSStephan Mueller struct af_alg_ctx *ctx; 544400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 5452a2a251fSStephan Mueller struct aead_tfm *tfm = private; 5462a2a251fSStephan Mueller struct crypto_aead *aead = tfm->aead; 547d887c52dSStephan Mueller unsigned int len = sizeof(*ctx); 5482a2a251fSStephan Mueller unsigned int ivlen = crypto_aead_ivsize(aead); 549400c40cfSStephan Mueller 550400c40cfSStephan Mueller ctx = sock_kmalloc(sk, len, GFP_KERNEL); 551400c40cfSStephan Mueller if (!ctx) 552400c40cfSStephan Mueller return -ENOMEM; 553400c40cfSStephan Mueller memset(ctx, 0, len); 554400c40cfSStephan Mueller 555400c40cfSStephan Mueller ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL); 556400c40cfSStephan Mueller if (!ctx->iv) { 557400c40cfSStephan Mueller sock_kfree_s(sk, ctx, len); 558400c40cfSStephan Mueller return -ENOMEM; 559400c40cfSStephan Mueller } 560400c40cfSStephan Mueller memset(ctx->iv, 0, ivlen); 561400c40cfSStephan Mueller 562d887c52dSStephan Mueller INIT_LIST_HEAD(&ctx->tsgl_list); 563400c40cfSStephan Mueller ctx->len = len; 564400c40cfSStephan Mueller ctx->used = 0; 565af955bf1SJonathan Cameron atomic_set(&ctx->rcvused, 0); 566400c40cfSStephan Mueller ctx->more = 0; 567400c40cfSStephan Mueller ctx->merge = 0; 568400c40cfSStephan Mueller ctx->enc = 0; 569400c40cfSStephan Mueller ctx->aead_assoclen = 0; 5702c3f8b16SGilad Ben-Yossef crypto_init_wait(&ctx->wait); 571400c40cfSStephan Mueller 572400c40cfSStephan Mueller ask->private = ctx; 573400c40cfSStephan Mueller 574400c40cfSStephan Mueller sk->sk_destruct = aead_sock_destruct; 575400c40cfSStephan Mueller 576400c40cfSStephan Mueller return 0; 577400c40cfSStephan Mueller } 578400c40cfSStephan Mueller 5792a2a251fSStephan Mueller static int aead_accept_parent(void *private, struct sock *sk) 5802a2a251fSStephan Mueller { 5812a2a251fSStephan Mueller struct aead_tfm *tfm = private; 5822a2a251fSStephan Mueller 583dc26c17fSEric Biggers if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) 5842a2a251fSStephan Mueller return -ENOKEY; 5852a2a251fSStephan Mueller 5862a2a251fSStephan Mueller return aead_accept_parent_nokey(private, sk); 5872a2a251fSStephan Mueller } 5882a2a251fSStephan Mueller 589400c40cfSStephan Mueller static const struct af_alg_type algif_type_aead = { 590400c40cfSStephan Mueller .bind = aead_bind, 591400c40cfSStephan Mueller .release = aead_release, 592400c40cfSStephan Mueller .setkey = aead_setkey, 593400c40cfSStephan Mueller .setauthsize = aead_setauthsize, 594400c40cfSStephan Mueller .accept = aead_accept_parent, 5952a2a251fSStephan Mueller .accept_nokey = aead_accept_parent_nokey, 596400c40cfSStephan Mueller .ops = &algif_aead_ops, 5972a2a251fSStephan Mueller .ops_nokey = &algif_aead_ops_nokey, 598400c40cfSStephan Mueller .name = "aead", 599400c40cfSStephan Mueller .owner = THIS_MODULE 600400c40cfSStephan Mueller }; 601400c40cfSStephan Mueller 602400c40cfSStephan Mueller static int __init algif_aead_init(void) 603400c40cfSStephan Mueller { 604400c40cfSStephan Mueller return af_alg_register_type(&algif_type_aead); 605400c40cfSStephan Mueller } 606400c40cfSStephan Mueller 607400c40cfSStephan Mueller static void __exit algif_aead_exit(void) 608400c40cfSStephan Mueller { 609400c40cfSStephan Mueller int err = af_alg_unregister_type(&algif_type_aead); 610400c40cfSStephan Mueller BUG_ON(err); 611400c40cfSStephan Mueller } 612400c40cfSStephan Mueller 613400c40cfSStephan Mueller module_init(algif_aead_init); 614400c40cfSStephan Mueller module_exit(algif_aead_exit); 615400c40cfSStephan Mueller MODULE_LICENSE("GPL"); 616400c40cfSStephan Mueller MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); 617400c40cfSStephan Mueller MODULE_DESCRIPTION("AEAD kernel crypto API user space interface"); 618