1400c40cfSStephan Mueller /* 2400c40cfSStephan Mueller * algif_aead: User-space interface for AEAD algorithms 3400c40cfSStephan Mueller * 4400c40cfSStephan Mueller * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de> 5400c40cfSStephan Mueller * 6400c40cfSStephan Mueller * This file provides the user-space API for AEAD ciphers. 7400c40cfSStephan Mueller * 8400c40cfSStephan Mueller * This program is free software; you can redistribute it and/or modify it 9400c40cfSStephan Mueller * under the terms of the GNU General Public License as published by the Free 10400c40cfSStephan Mueller * Software Foundation; either version 2 of the License, or (at your option) 11400c40cfSStephan Mueller * any later version. 12d887c52dSStephan Mueller * 13d887c52dSStephan Mueller * The following concept of the memory management is used: 14d887c52dSStephan Mueller * 15d887c52dSStephan Mueller * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is 16d887c52dSStephan Mueller * filled by user space with the data submitted via sendpage/sendmsg. Filling 17d887c52dSStephan Mueller * up the TX SGL does not cause a crypto operation -- the data will only be 18d887c52dSStephan Mueller * tracked by the kernel. Upon receipt of one recvmsg call, the caller must 19d887c52dSStephan Mueller * provide a buffer which is tracked with the RX SGL. 20d887c52dSStephan Mueller * 21d887c52dSStephan Mueller * During the processing of the recvmsg operation, the cipher request is 22d887c52dSStephan Mueller * allocated and prepared. As part of the recvmsg operation, the processed 23d887c52dSStephan Mueller * TX buffers are extracted from the TX SGL into a separate SGL. 24d887c52dSStephan Mueller * 25d887c52dSStephan Mueller * After the completion of the crypto operation, the RX SGL and the cipher 26d887c52dSStephan Mueller * request is released. The extracted TX SGL parts are released together with 27d887c52dSStephan Mueller * the RX SGL release. 28400c40cfSStephan Mueller */ 29400c40cfSStephan Mueller 3083094e5eSTadeusz Struk #include <crypto/internal/aead.h> 31400c40cfSStephan Mueller #include <crypto/scatterwalk.h> 32400c40cfSStephan Mueller #include <crypto/if_alg.h> 3372548b09SStephan Mueller #include <crypto/skcipher.h> 3472548b09SStephan Mueller #include <crypto/null.h> 35400c40cfSStephan Mueller #include <linux/init.h> 36400c40cfSStephan Mueller #include <linux/list.h> 37400c40cfSStephan Mueller #include <linux/kernel.h> 38400c40cfSStephan Mueller #include <linux/mm.h> 39400c40cfSStephan Mueller #include <linux/module.h> 40400c40cfSStephan Mueller #include <linux/net.h> 41400c40cfSStephan Mueller #include <net/sock.h> 42400c40cfSStephan Mueller 432a2a251fSStephan Mueller struct aead_tfm { 442a2a251fSStephan Mueller struct crypto_aead *aead; 4572548b09SStephan Mueller struct crypto_skcipher *null_tfm; 462a2a251fSStephan Mueller }; 472a2a251fSStephan Mueller 48d887c52dSStephan Mueller static inline bool aead_sufficient_data(struct sock *sk) 49d887c52dSStephan Mueller { 50d887c52dSStephan Mueller struct alg_sock *ask = alg_sk(sk); 51d887c52dSStephan Mueller struct sock *psk = ask->parent; 52d887c52dSStephan Mueller struct alg_sock *pask = alg_sk(psk); 532d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 54d887c52dSStephan Mueller struct aead_tfm *aeadc = pask->private; 55d887c52dSStephan Mueller struct crypto_aead *tfm = aeadc->aead; 56d887c52dSStephan Mueller unsigned int as = crypto_aead_authsize(tfm); 57400c40cfSStephan Mueller 580c1e16cdSStephan Mueller /* 590c1e16cdSStephan Mueller * The minimum amount of memory needed for an AEAD cipher is 600c1e16cdSStephan Mueller * the AAD and in case of decryption the tag. 610c1e16cdSStephan Mueller */ 620c1e16cdSStephan Mueller return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); 63400c40cfSStephan Mueller } 64400c40cfSStephan Mueller 65eccd02f3SLinus Torvalds static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 66400c40cfSStephan Mueller { 67400c40cfSStephan Mueller struct sock *sk = sock->sk; 68400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 69d887c52dSStephan Mueller struct sock *psk = ask->parent; 70d887c52dSStephan Mueller struct alg_sock *pask = alg_sk(psk); 71d887c52dSStephan Mueller struct aead_tfm *aeadc = pask->private; 72d887c52dSStephan Mueller struct crypto_aead *tfm = aeadc->aead; 73d887c52dSStephan Mueller unsigned int ivsize = crypto_aead_ivsize(tfm); 74400c40cfSStephan Mueller 752d97591eSStephan Mueller return af_alg_sendmsg(sock, msg, size, ivsize); 7683094e5eSTadeusz Struk } 7783094e5eSTadeusz Struk 7872548b09SStephan Mueller static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm, 7972548b09SStephan Mueller struct scatterlist *src, 8072548b09SStephan Mueller struct scatterlist *dst, unsigned int len) 8172548b09SStephan Mueller { 8272548b09SStephan Mueller SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); 8372548b09SStephan Mueller 8472548b09SStephan Mueller skcipher_request_set_tfm(skreq, null_tfm); 8572548b09SStephan Mueller skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG, 8672548b09SStephan Mueller NULL, NULL); 8772548b09SStephan Mueller skcipher_request_set_crypt(skreq, src, dst, len, NULL); 8872548b09SStephan Mueller 8972548b09SStephan Mueller return crypto_skcipher_encrypt(skreq); 9072548b09SStephan Mueller } 9172548b09SStephan Mueller 92d887c52dSStephan Mueller static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, 93d887c52dSStephan Mueller size_t ignored, int flags) 94400c40cfSStephan Mueller { 95400c40cfSStephan Mueller struct sock *sk = sock->sk; 96400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 97d887c52dSStephan Mueller struct sock *psk = ask->parent; 98d887c52dSStephan Mueller struct alg_sock *pask = alg_sk(psk); 992d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 100d887c52dSStephan Mueller struct aead_tfm *aeadc = pask->private; 101d887c52dSStephan Mueller struct crypto_aead *tfm = aeadc->aead; 10272548b09SStephan Mueller struct crypto_skcipher *null_tfm = aeadc->null_tfm; 1038e1fa89aSStephan Mueller unsigned int i, as = crypto_aead_authsize(tfm); 1042d97591eSStephan Mueller struct af_alg_async_req *areq; 1058e1fa89aSStephan Mueller struct af_alg_tsgl *tsgl, *tmp; 1068e1fa89aSStephan Mueller struct scatterlist *rsgl_src, *tsgl_src = NULL; 107d887c52dSStephan Mueller int err = 0; 108d887c52dSStephan Mueller size_t used = 0; /* [in] TX bufs to be en/decrypted */ 109d887c52dSStephan Mueller size_t outlen = 0; /* [out] RX bufs produced by kernel */ 110d887c52dSStephan Mueller size_t usedpages = 0; /* [in] RX bufs to be used from user */ 111d887c52dSStephan Mueller size_t processed = 0; /* [in] TX bufs to be consumed */ 112400c40cfSStephan Mueller 11311edb555SStephan Mueller if (!ctx->used) { 11411edb555SStephan Mueller err = af_alg_wait_for_data(sk, flags); 11511edb555SStephan Mueller if (err) 11611edb555SStephan Mueller return err; 11711edb555SStephan Mueller } 11811edb555SStephan Mueller 119400c40cfSStephan Mueller /* 120d887c52dSStephan Mueller * Data length provided by caller via sendmsg/sendpage that has not 121d887c52dSStephan Mueller * yet been processed. 122400c40cfSStephan Mueller */ 123400c40cfSStephan Mueller used = ctx->used; 124400c40cfSStephan Mueller 125400c40cfSStephan Mueller /* 126400c40cfSStephan Mueller * Make sure sufficient data is present -- note, the same check is 127400c40cfSStephan Mueller * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg 128400c40cfSStephan Mueller * shall provide an information to the data sender that something is 129400c40cfSStephan Mueller * wrong, but they are irrelevant to maintain the kernel integrity. 130400c40cfSStephan Mueller * We need this check here too in case user space decides to not honor 131400c40cfSStephan Mueller * the error message in sendmsg/sendpage and still call recvmsg. This 132400c40cfSStephan Mueller * check here protects the kernel integrity. 133400c40cfSStephan Mueller */ 134d887c52dSStephan Mueller if (!aead_sufficient_data(sk)) 135d887c52dSStephan Mueller return -EINVAL; 136400c40cfSStephan Mueller 1370c1e16cdSStephan Mueller /* 1380c1e16cdSStephan Mueller * Calculate the minimum output buffer size holding the result of the 1390c1e16cdSStephan Mueller * cipher operation. When encrypting data, the receiving buffer is 1400c1e16cdSStephan Mueller * larger by the tag length compared to the input buffer as the 1410c1e16cdSStephan Mueller * encryption operation generates the tag. For decryption, the input 1420c1e16cdSStephan Mueller * buffer provides the tag which is consumed resulting in only the 1430c1e16cdSStephan Mueller * plaintext without a buffer for the tag returned to the caller. 1440c1e16cdSStephan Mueller */ 1450c1e16cdSStephan Mueller if (ctx->enc) 1460c1e16cdSStephan Mueller outlen = used + as; 1470c1e16cdSStephan Mueller else 1480c1e16cdSStephan Mueller outlen = used - as; 14919fa7752SHerbert Xu 150400c40cfSStephan Mueller /* 151400c40cfSStephan Mueller * The cipher operation input data is reduced by the associated data 152400c40cfSStephan Mueller * length as this data is processed separately later on. 153400c40cfSStephan Mueller */ 1540c1e16cdSStephan Mueller used -= ctx->aead_assoclen; 155400c40cfSStephan Mueller 156d887c52dSStephan Mueller /* Allocate cipher request for current operation. */ 1572d97591eSStephan Mueller areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + 1582d97591eSStephan Mueller crypto_aead_reqsize(tfm)); 1592d97591eSStephan Mueller if (IS_ERR(areq)) 1602d97591eSStephan Mueller return PTR_ERR(areq); 161400c40cfSStephan Mueller 162d887c52dSStephan Mueller /* convert iovecs of output buffers into RX SGL */ 1632d97591eSStephan Mueller err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages); 164d887c52dSStephan Mueller if (err) 165d887c52dSStephan Mueller goto free; 166400c40cfSStephan Mueller 167d887c52dSStephan Mueller /* 168d887c52dSStephan Mueller * Ensure output buffer is sufficiently large. If the caller provides 169d887c52dSStephan Mueller * less buffer space, only use the relative required input size. This 170d887c52dSStephan Mueller * allows AIO operation where the caller sent all data to be processed 171d887c52dSStephan Mueller * and the AIO operation performs the operation on the different chunks 172d887c52dSStephan Mueller * of the input data. 173d887c52dSStephan Mueller */ 1740c1e16cdSStephan Mueller if (usedpages < outlen) { 175d887c52dSStephan Mueller size_t less = outlen - usedpages; 176d887c52dSStephan Mueller 177d887c52dSStephan Mueller if (used < less) { 1780c1e16cdSStephan Mueller err = -EINVAL; 179d887c52dSStephan Mueller goto free; 180d887c52dSStephan Mueller } 181d887c52dSStephan Mueller used -= less; 182d887c52dSStephan Mueller outlen -= less; 1830c1e16cdSStephan Mueller } 184400c40cfSStephan Mueller 185d887c52dSStephan Mueller processed = used + ctx->aead_assoclen; 1868e1fa89aSStephan Mueller list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) { 1878e1fa89aSStephan Mueller for (i = 0; i < tsgl->cur; i++) { 1888e1fa89aSStephan Mueller struct scatterlist *process_sg = tsgl->sg + i; 1898e1fa89aSStephan Mueller 1908e1fa89aSStephan Mueller if (!(process_sg->length) || !sg_page(process_sg)) 1918e1fa89aSStephan Mueller continue; 1928e1fa89aSStephan Mueller tsgl_src = process_sg; 1938e1fa89aSStephan Mueller break; 1948e1fa89aSStephan Mueller } 1958e1fa89aSStephan Mueller if (tsgl_src) 1968e1fa89aSStephan Mueller break; 1978e1fa89aSStephan Mueller } 1988e1fa89aSStephan Mueller if (processed && !tsgl_src) { 1998e1fa89aSStephan Mueller err = -EFAULT; 2008e1fa89aSStephan Mueller goto free; 2018e1fa89aSStephan Mueller } 20272548b09SStephan Mueller 20372548b09SStephan Mueller /* 20472548b09SStephan Mueller * Copy of AAD from source to destination 20572548b09SStephan Mueller * 20672548b09SStephan Mueller * The AAD is copied to the destination buffer without change. Even 20772548b09SStephan Mueller * when user space uses an in-place cipher operation, the kernel 20872548b09SStephan Mueller * will copy the data as it does not see whether such in-place operation 20972548b09SStephan Mueller * is initiated. 21072548b09SStephan Mueller * 21172548b09SStephan Mueller * To ensure efficiency, the following implementation ensure that the 21272548b09SStephan Mueller * ciphers are invoked to perform a crypto operation in-place. This 21372548b09SStephan Mueller * is achieved by memory management specified as follows. 21472548b09SStephan Mueller */ 21572548b09SStephan Mueller 21672548b09SStephan Mueller /* Use the RX SGL as source (and destination) for crypto op. */ 2178e1fa89aSStephan Mueller rsgl_src = areq->first_rsgl.sgl.sg; 21872548b09SStephan Mueller 21972548b09SStephan Mueller if (ctx->enc) { 22072548b09SStephan Mueller /* 22172548b09SStephan Mueller * Encryption operation - The in-place cipher operation is 22272548b09SStephan Mueller * achieved by the following operation: 22372548b09SStephan Mueller * 22475d11e75SStephan Mueller * TX SGL: AAD || PT 22572548b09SStephan Mueller * | | 22672548b09SStephan Mueller * | copy | 22772548b09SStephan Mueller * v v 22875d11e75SStephan Mueller * RX SGL: AAD || PT || Tag 22972548b09SStephan Mueller */ 2308e1fa89aSStephan Mueller err = crypto_aead_copy_sgl(null_tfm, tsgl_src, 23172548b09SStephan Mueller areq->first_rsgl.sgl.sg, processed); 23272548b09SStephan Mueller if (err) 23372548b09SStephan Mueller goto free; 2342d97591eSStephan Mueller af_alg_pull_tsgl(sk, processed, NULL, 0); 23572548b09SStephan Mueller } else { 23672548b09SStephan Mueller /* 23772548b09SStephan Mueller * Decryption operation - To achieve an in-place cipher 23872548b09SStephan Mueller * operation, the following SGL structure is used: 23972548b09SStephan Mueller * 24072548b09SStephan Mueller * TX SGL: AAD || CT || Tag 24172548b09SStephan Mueller * | | ^ 24272548b09SStephan Mueller * | copy | | Create SGL link. 24372548b09SStephan Mueller * v v | 24472548b09SStephan Mueller * RX SGL: AAD || CT ----+ 24572548b09SStephan Mueller */ 24672548b09SStephan Mueller 24772548b09SStephan Mueller /* Copy AAD || CT to RX SGL buffer for in-place operation. */ 2488e1fa89aSStephan Mueller err = crypto_aead_copy_sgl(null_tfm, tsgl_src, 24972548b09SStephan Mueller areq->first_rsgl.sgl.sg, outlen); 25072548b09SStephan Mueller if (err) 25172548b09SStephan Mueller goto free; 25272548b09SStephan Mueller 25372548b09SStephan Mueller /* Create TX SGL for tag and chain it to RX SGL. */ 2542d97591eSStephan Mueller areq->tsgl_entries = af_alg_count_tsgl(sk, processed, 25572548b09SStephan Mueller processed - as); 256d887c52dSStephan Mueller if (!areq->tsgl_entries) 257d887c52dSStephan Mueller areq->tsgl_entries = 1; 25876e43e37SKees Cook areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl), 25976e43e37SKees Cook areq->tsgl_entries), 260d887c52dSStephan Mueller GFP_KERNEL); 261d887c52dSStephan Mueller if (!areq->tsgl) { 262d887c52dSStephan Mueller err = -ENOMEM; 263d887c52dSStephan Mueller goto free; 264d887c52dSStephan Mueller } 265d887c52dSStephan Mueller sg_init_table(areq->tsgl, areq->tsgl_entries); 26672548b09SStephan Mueller 26772548b09SStephan Mueller /* Release TX SGL, except for tag data and reassign tag data. */ 2682d97591eSStephan Mueller af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as); 26972548b09SStephan Mueller 27072548b09SStephan Mueller /* chain the areq TX SGL holding the tag with RX SGL */ 2712d97591eSStephan Mueller if (usedpages) { 27272548b09SStephan Mueller /* RX SGL present */ 2732d97591eSStephan Mueller struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl; 27472548b09SStephan Mueller 27572548b09SStephan Mueller sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); 27672548b09SStephan Mueller sg_chain(sgl_prev->sg, sgl_prev->npages + 1, 27772548b09SStephan Mueller areq->tsgl); 27872548b09SStephan Mueller } else 27972548b09SStephan Mueller /* no RX SGL present (e.g. authentication only) */ 2808e1fa89aSStephan Mueller rsgl_src = areq->tsgl; 28172548b09SStephan Mueller } 282400c40cfSStephan Mueller 283d887c52dSStephan Mueller /* Initialize the crypto operation */ 2848e1fa89aSStephan Mueller aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src, 285d887c52dSStephan Mueller areq->first_rsgl.sgl.sg, used, ctx->iv); 2862d97591eSStephan Mueller aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); 2872d97591eSStephan Mueller aead_request_set_tfm(&areq->cra_u.aead_req, tfm); 288d887c52dSStephan Mueller 289d887c52dSStephan Mueller if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { 290d887c52dSStephan Mueller /* AIO operation */ 2917d2c3f54SStephan Mueller sock_hold(sk); 292d887c52dSStephan Mueller areq->iocb = msg->msg_iocb; 293d53c5135SStephan Mueller 294d53c5135SStephan Mueller /* Remember output size that will be generated. */ 295d53c5135SStephan Mueller areq->outlen = outlen; 296d53c5135SStephan Mueller 2972d97591eSStephan Mueller aead_request_set_callback(&areq->cra_u.aead_req, 298d887c52dSStephan Mueller CRYPTO_TFM_REQ_MAY_BACKLOG, 2992d97591eSStephan Mueller af_alg_async_cb, areq); 3002d97591eSStephan Mueller err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : 3012d97591eSStephan Mueller crypto_aead_decrypt(&areq->cra_u.aead_req); 3027d2c3f54SStephan Mueller 3037d2c3f54SStephan Mueller /* AIO operation in progress */ 304d53c5135SStephan Mueller if (err == -EINPROGRESS || err == -EBUSY) 3057d2c3f54SStephan Mueller return -EIOCBQUEUED; 3067d2c3f54SStephan Mueller 3077d2c3f54SStephan Mueller sock_put(sk); 308d887c52dSStephan Mueller } else { 309d887c52dSStephan Mueller /* Synchronous operation */ 3102d97591eSStephan Mueller aead_request_set_callback(&areq->cra_u.aead_req, 311d887c52dSStephan Mueller CRYPTO_TFM_REQ_MAY_BACKLOG, 3122c3f8b16SGilad Ben-Yossef crypto_req_done, &ctx->wait); 3132c3f8b16SGilad Ben-Yossef err = crypto_wait_req(ctx->enc ? 3142d97591eSStephan Mueller crypto_aead_encrypt(&areq->cra_u.aead_req) : 3152d97591eSStephan Mueller crypto_aead_decrypt(&areq->cra_u.aead_req), 3162c3f8b16SGilad Ben-Yossef &ctx->wait); 317400c40cfSStephan Mueller } 318400c40cfSStephan Mueller 319d887c52dSStephan Mueller 320d887c52dSStephan Mueller free: 3217d2c3f54SStephan Mueller af_alg_free_resources(areq); 322400c40cfSStephan Mueller 323400c40cfSStephan Mueller return err ? err : outlen; 324400c40cfSStephan Mueller } 325400c40cfSStephan Mueller 326d887c52dSStephan Mueller static int aead_recvmsg(struct socket *sock, struct msghdr *msg, 327d887c52dSStephan Mueller size_t ignored, int flags) 32883094e5eSTadeusz Struk { 329d887c52dSStephan Mueller struct sock *sk = sock->sk; 330d887c52dSStephan Mueller int ret = 0; 331d887c52dSStephan Mueller 332d887c52dSStephan Mueller lock_sock(sk); 333d887c52dSStephan Mueller while (msg_data_left(msg)) { 334d887c52dSStephan Mueller int err = _aead_recvmsg(sock, msg, ignored, flags); 335d887c52dSStephan Mueller 336d887c52dSStephan Mueller /* 337d887c52dSStephan Mueller * This error covers -EIOCBQUEUED which implies that we can 338d887c52dSStephan Mueller * only handle one AIO request. If the caller wants to have 339d887c52dSStephan Mueller * multiple AIO requests in parallel, he must make multiple 340d887c52dSStephan Mueller * separate AIO calls. 3415703c826SStephan Mueller * 3425703c826SStephan Mueller * Also return the error if no data has been processed so far. 343d887c52dSStephan Mueller */ 344d887c52dSStephan Mueller if (err <= 0) { 3455703c826SStephan Mueller if (err == -EIOCBQUEUED || err == -EBADMSG || !ret) 346d887c52dSStephan Mueller ret = err; 347d887c52dSStephan Mueller goto out; 348d887c52dSStephan Mueller } 349d887c52dSStephan Mueller 350d887c52dSStephan Mueller ret += err; 351d887c52dSStephan Mueller } 352d887c52dSStephan Mueller 353d887c52dSStephan Mueller out: 3542d97591eSStephan Mueller af_alg_wmem_wakeup(sk); 355d887c52dSStephan Mueller release_sock(sk); 356d887c52dSStephan Mueller return ret; 35783094e5eSTadeusz Struk } 35883094e5eSTadeusz Struk 359400c40cfSStephan Mueller static struct proto_ops algif_aead_ops = { 360400c40cfSStephan Mueller .family = PF_ALG, 361400c40cfSStephan Mueller 362400c40cfSStephan Mueller .connect = sock_no_connect, 363400c40cfSStephan Mueller .socketpair = sock_no_socketpair, 364400c40cfSStephan Mueller .getname = sock_no_getname, 365400c40cfSStephan Mueller .ioctl = sock_no_ioctl, 366400c40cfSStephan Mueller .listen = sock_no_listen, 367400c40cfSStephan Mueller .shutdown = sock_no_shutdown, 368400c40cfSStephan Mueller .getsockopt = sock_no_getsockopt, 369400c40cfSStephan Mueller .mmap = sock_no_mmap, 370400c40cfSStephan Mueller .bind = sock_no_bind, 371400c40cfSStephan Mueller .accept = sock_no_accept, 372400c40cfSStephan Mueller .setsockopt = sock_no_setsockopt, 373400c40cfSStephan Mueller 374400c40cfSStephan Mueller .release = af_alg_release, 375400c40cfSStephan Mueller .sendmsg = aead_sendmsg, 3762d97591eSStephan Mueller .sendpage = af_alg_sendpage, 377400c40cfSStephan Mueller .recvmsg = aead_recvmsg, 378*a11e1d43SLinus Torvalds .poll = af_alg_poll, 379400c40cfSStephan Mueller }; 380400c40cfSStephan Mueller 3812a2a251fSStephan Mueller static int aead_check_key(struct socket *sock) 3822a2a251fSStephan Mueller { 3832a2a251fSStephan Mueller int err = 0; 3842a2a251fSStephan Mueller struct sock *psk; 3852a2a251fSStephan Mueller struct alg_sock *pask; 3862a2a251fSStephan Mueller struct aead_tfm *tfm; 3872a2a251fSStephan Mueller struct sock *sk = sock->sk; 3882a2a251fSStephan Mueller struct alg_sock *ask = alg_sk(sk); 3892a2a251fSStephan Mueller 3902a2a251fSStephan Mueller lock_sock(sk); 3912a2a251fSStephan Mueller if (ask->refcnt) 3922a2a251fSStephan Mueller goto unlock_child; 3932a2a251fSStephan Mueller 3942a2a251fSStephan Mueller psk = ask->parent; 3952a2a251fSStephan Mueller pask = alg_sk(ask->parent); 3962a2a251fSStephan Mueller tfm = pask->private; 3972a2a251fSStephan Mueller 3982a2a251fSStephan Mueller err = -ENOKEY; 3992a2a251fSStephan Mueller lock_sock_nested(psk, SINGLE_DEPTH_NESTING); 400dc26c17fSEric Biggers if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) 4012a2a251fSStephan Mueller goto unlock; 4022a2a251fSStephan Mueller 4032a2a251fSStephan Mueller if (!pask->refcnt++) 4042a2a251fSStephan Mueller sock_hold(psk); 4052a2a251fSStephan Mueller 4062a2a251fSStephan Mueller ask->refcnt = 1; 4072a2a251fSStephan Mueller sock_put(psk); 4082a2a251fSStephan Mueller 4092a2a251fSStephan Mueller err = 0; 4102a2a251fSStephan Mueller 4112a2a251fSStephan Mueller unlock: 4122a2a251fSStephan Mueller release_sock(psk); 4132a2a251fSStephan Mueller unlock_child: 4142a2a251fSStephan Mueller release_sock(sk); 4152a2a251fSStephan Mueller 4162a2a251fSStephan Mueller return err; 4172a2a251fSStephan Mueller } 4182a2a251fSStephan Mueller 4192a2a251fSStephan Mueller static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg, 4202a2a251fSStephan Mueller size_t size) 4212a2a251fSStephan Mueller { 4222a2a251fSStephan Mueller int err; 4232a2a251fSStephan Mueller 4242a2a251fSStephan Mueller err = aead_check_key(sock); 4252a2a251fSStephan Mueller if (err) 4262a2a251fSStephan Mueller return err; 4272a2a251fSStephan Mueller 4282a2a251fSStephan Mueller return aead_sendmsg(sock, msg, size); 4292a2a251fSStephan Mueller } 4302a2a251fSStephan Mueller 4312a2a251fSStephan Mueller static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page, 4322a2a251fSStephan Mueller int offset, size_t size, int flags) 4332a2a251fSStephan Mueller { 4342a2a251fSStephan Mueller int err; 4352a2a251fSStephan Mueller 4362a2a251fSStephan Mueller err = aead_check_key(sock); 4372a2a251fSStephan Mueller if (err) 4382a2a251fSStephan Mueller return err; 4392a2a251fSStephan Mueller 4402d97591eSStephan Mueller return af_alg_sendpage(sock, page, offset, size, flags); 4412a2a251fSStephan Mueller } 4422a2a251fSStephan Mueller 4432a2a251fSStephan Mueller static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg, 4442a2a251fSStephan Mueller size_t ignored, int flags) 4452a2a251fSStephan Mueller { 4462a2a251fSStephan Mueller int err; 4472a2a251fSStephan Mueller 4482a2a251fSStephan Mueller err = aead_check_key(sock); 4492a2a251fSStephan Mueller if (err) 4502a2a251fSStephan Mueller return err; 4512a2a251fSStephan Mueller 4522a2a251fSStephan Mueller return aead_recvmsg(sock, msg, ignored, flags); 4532a2a251fSStephan Mueller } 4542a2a251fSStephan Mueller 4552a2a251fSStephan Mueller static struct proto_ops algif_aead_ops_nokey = { 4562a2a251fSStephan Mueller .family = PF_ALG, 4572a2a251fSStephan Mueller 4582a2a251fSStephan Mueller .connect = sock_no_connect, 4592a2a251fSStephan Mueller .socketpair = sock_no_socketpair, 4602a2a251fSStephan Mueller .getname = sock_no_getname, 4612a2a251fSStephan Mueller .ioctl = sock_no_ioctl, 4622a2a251fSStephan Mueller .listen = sock_no_listen, 4632a2a251fSStephan Mueller .shutdown = sock_no_shutdown, 4642a2a251fSStephan Mueller .getsockopt = sock_no_getsockopt, 4652a2a251fSStephan Mueller .mmap = sock_no_mmap, 4662a2a251fSStephan Mueller .bind = sock_no_bind, 4672a2a251fSStephan Mueller .accept = sock_no_accept, 4682a2a251fSStephan Mueller .setsockopt = sock_no_setsockopt, 4692a2a251fSStephan Mueller 4702a2a251fSStephan Mueller .release = af_alg_release, 4712a2a251fSStephan Mueller .sendmsg = aead_sendmsg_nokey, 4722a2a251fSStephan Mueller .sendpage = aead_sendpage_nokey, 4732a2a251fSStephan Mueller .recvmsg = aead_recvmsg_nokey, 474*a11e1d43SLinus Torvalds .poll = af_alg_poll, 4752a2a251fSStephan Mueller }; 4762a2a251fSStephan Mueller 477400c40cfSStephan Mueller static void *aead_bind(const char *name, u32 type, u32 mask) 478400c40cfSStephan Mueller { 4792a2a251fSStephan Mueller struct aead_tfm *tfm; 4802a2a251fSStephan Mueller struct crypto_aead *aead; 48172548b09SStephan Mueller struct crypto_skcipher *null_tfm; 4822a2a251fSStephan Mueller 4832a2a251fSStephan Mueller tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); 4842a2a251fSStephan Mueller if (!tfm) 4852a2a251fSStephan Mueller return ERR_PTR(-ENOMEM); 4862a2a251fSStephan Mueller 4872a2a251fSStephan Mueller aead = crypto_alloc_aead(name, type, mask); 4882a2a251fSStephan Mueller if (IS_ERR(aead)) { 4892a2a251fSStephan Mueller kfree(tfm); 4902a2a251fSStephan Mueller return ERR_CAST(aead); 4912a2a251fSStephan Mueller } 4922a2a251fSStephan Mueller 4933a2d4fb5SEric Biggers null_tfm = crypto_get_default_null_skcipher(); 49472548b09SStephan Mueller if (IS_ERR(null_tfm)) { 49572548b09SStephan Mueller crypto_free_aead(aead); 49672548b09SStephan Mueller kfree(tfm); 49772548b09SStephan Mueller return ERR_CAST(null_tfm); 49872548b09SStephan Mueller } 49972548b09SStephan Mueller 5002a2a251fSStephan Mueller tfm->aead = aead; 50172548b09SStephan Mueller tfm->null_tfm = null_tfm; 5022a2a251fSStephan Mueller 5032a2a251fSStephan Mueller return tfm; 504400c40cfSStephan Mueller } 505400c40cfSStephan Mueller 506400c40cfSStephan Mueller static void aead_release(void *private) 507400c40cfSStephan Mueller { 5082a2a251fSStephan Mueller struct aead_tfm *tfm = private; 5092a2a251fSStephan Mueller 5102a2a251fSStephan Mueller crypto_free_aead(tfm->aead); 5113a2d4fb5SEric Biggers crypto_put_default_null_skcipher(); 5122a2a251fSStephan Mueller kfree(tfm); 513400c40cfSStephan Mueller } 514400c40cfSStephan Mueller 515400c40cfSStephan Mueller static int aead_setauthsize(void *private, unsigned int authsize) 516400c40cfSStephan Mueller { 5172a2a251fSStephan Mueller struct aead_tfm *tfm = private; 5182a2a251fSStephan Mueller 5192a2a251fSStephan Mueller return crypto_aead_setauthsize(tfm->aead, authsize); 520400c40cfSStephan Mueller } 521400c40cfSStephan Mueller 522400c40cfSStephan Mueller static int aead_setkey(void *private, const u8 *key, unsigned int keylen) 523400c40cfSStephan Mueller { 5242a2a251fSStephan Mueller struct aead_tfm *tfm = private; 5252a2a251fSStephan Mueller 526dc26c17fSEric Biggers return crypto_aead_setkey(tfm->aead, key, keylen); 527400c40cfSStephan Mueller } 528400c40cfSStephan Mueller 529400c40cfSStephan Mueller static void aead_sock_destruct(struct sock *sk) 530400c40cfSStephan Mueller { 531400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 5322d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 533d887c52dSStephan Mueller struct sock *psk = ask->parent; 534d887c52dSStephan Mueller struct alg_sock *pask = alg_sk(psk); 535d887c52dSStephan Mueller struct aead_tfm *aeadc = pask->private; 536d887c52dSStephan Mueller struct crypto_aead *tfm = aeadc->aead; 537d887c52dSStephan Mueller unsigned int ivlen = crypto_aead_ivsize(tfm); 538400c40cfSStephan Mueller 5392d97591eSStephan Mueller af_alg_pull_tsgl(sk, ctx->used, NULL, 0); 540400c40cfSStephan Mueller sock_kzfree_s(sk, ctx->iv, ivlen); 541400c40cfSStephan Mueller sock_kfree_s(sk, ctx, ctx->len); 542400c40cfSStephan Mueller af_alg_release_parent(sk); 543400c40cfSStephan Mueller } 544400c40cfSStephan Mueller 5452a2a251fSStephan Mueller static int aead_accept_parent_nokey(void *private, struct sock *sk) 546400c40cfSStephan Mueller { 5472d97591eSStephan Mueller struct af_alg_ctx *ctx; 548400c40cfSStephan Mueller struct alg_sock *ask = alg_sk(sk); 5492a2a251fSStephan Mueller struct aead_tfm *tfm = private; 5502a2a251fSStephan Mueller struct crypto_aead *aead = tfm->aead; 551d887c52dSStephan Mueller unsigned int len = sizeof(*ctx); 5522a2a251fSStephan Mueller unsigned int ivlen = crypto_aead_ivsize(aead); 553400c40cfSStephan Mueller 554400c40cfSStephan Mueller ctx = sock_kmalloc(sk, len, GFP_KERNEL); 555400c40cfSStephan Mueller if (!ctx) 556400c40cfSStephan Mueller return -ENOMEM; 557400c40cfSStephan Mueller memset(ctx, 0, len); 558400c40cfSStephan Mueller 559400c40cfSStephan Mueller ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL); 560400c40cfSStephan Mueller if (!ctx->iv) { 561400c40cfSStephan Mueller sock_kfree_s(sk, ctx, len); 562400c40cfSStephan Mueller return -ENOMEM; 563400c40cfSStephan Mueller } 564400c40cfSStephan Mueller memset(ctx->iv, 0, ivlen); 565400c40cfSStephan Mueller 566d887c52dSStephan Mueller INIT_LIST_HEAD(&ctx->tsgl_list); 567400c40cfSStephan Mueller ctx->len = len; 568400c40cfSStephan Mueller ctx->used = 0; 569af955bf1SJonathan Cameron atomic_set(&ctx->rcvused, 0); 570400c40cfSStephan Mueller ctx->more = 0; 571400c40cfSStephan Mueller ctx->merge = 0; 572400c40cfSStephan Mueller ctx->enc = 0; 573400c40cfSStephan Mueller ctx->aead_assoclen = 0; 5742c3f8b16SGilad Ben-Yossef crypto_init_wait(&ctx->wait); 575400c40cfSStephan Mueller 576400c40cfSStephan Mueller ask->private = ctx; 577400c40cfSStephan Mueller 578400c40cfSStephan Mueller sk->sk_destruct = aead_sock_destruct; 579400c40cfSStephan Mueller 580400c40cfSStephan Mueller return 0; 581400c40cfSStephan Mueller } 582400c40cfSStephan Mueller 5832a2a251fSStephan Mueller static int aead_accept_parent(void *private, struct sock *sk) 5842a2a251fSStephan Mueller { 5852a2a251fSStephan Mueller struct aead_tfm *tfm = private; 5862a2a251fSStephan Mueller 587dc26c17fSEric Biggers if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) 5882a2a251fSStephan Mueller return -ENOKEY; 5892a2a251fSStephan Mueller 5902a2a251fSStephan Mueller return aead_accept_parent_nokey(private, sk); 5912a2a251fSStephan Mueller } 5922a2a251fSStephan Mueller 593400c40cfSStephan Mueller static const struct af_alg_type algif_type_aead = { 594400c40cfSStephan Mueller .bind = aead_bind, 595400c40cfSStephan Mueller .release = aead_release, 596400c40cfSStephan Mueller .setkey = aead_setkey, 597400c40cfSStephan Mueller .setauthsize = aead_setauthsize, 598400c40cfSStephan Mueller .accept = aead_accept_parent, 5992a2a251fSStephan Mueller .accept_nokey = aead_accept_parent_nokey, 600400c40cfSStephan Mueller .ops = &algif_aead_ops, 6012a2a251fSStephan Mueller .ops_nokey = &algif_aead_ops_nokey, 602400c40cfSStephan Mueller .name = "aead", 603400c40cfSStephan Mueller .owner = THIS_MODULE 604400c40cfSStephan Mueller }; 605400c40cfSStephan Mueller 606400c40cfSStephan Mueller static int __init algif_aead_init(void) 607400c40cfSStephan Mueller { 608400c40cfSStephan Mueller return af_alg_register_type(&algif_type_aead); 609400c40cfSStephan Mueller } 610400c40cfSStephan Mueller 611400c40cfSStephan Mueller static void __exit algif_aead_exit(void) 612400c40cfSStephan Mueller { 613400c40cfSStephan Mueller int err = af_alg_unregister_type(&algif_type_aead); 614400c40cfSStephan Mueller BUG_ON(err); 615400c40cfSStephan Mueller } 616400c40cfSStephan Mueller 617400c40cfSStephan Mueller module_init(algif_aead_init); 618400c40cfSStephan Mueller module_exit(algif_aead_exit); 619400c40cfSStephan Mueller MODULE_LICENSE("GPL"); 620400c40cfSStephan Mueller MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); 621400c40cfSStephan Mueller MODULE_DESCRIPTION("AEAD kernel crypto API user space interface"); 622