Lines Matching +full:queue +full:- +full:sizes
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2016-2017 HiSilicon Limited. */
4 #include <linux/dma-mapping.h>
133 req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S; in sec_alg_skcipher_init_template()
134 req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S; in sec_alg_skcipher_init_template()
135 req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S; in sec_alg_skcipher_init_template()
136 req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S; in sec_alg_skcipher_init_template()
138 req->cipher_key_addr_lo = lower_32_bits(ctx->pkey); in sec_alg_skcipher_init_template()
139 req->cipher_key_addr_hi = upper_32_bits(ctx->pkey); in sec_alg_skcipher_init_template()
150 ctx->cipher_alg = alg; in sec_alg_skcipher_init_context()
151 memcpy(ctx->key, key, keylen); in sec_alg_skcipher_init_context()
152 sec_alg_skcipher_init_template(ctx, &ctx->req_template, in sec_alg_skcipher_init_context()
153 ctx->cipher_alg); in sec_alg_skcipher_init_context()
164 sgl_next = sgl_current->next; in sec_free_hw_sgl()
165 sgl_next_dma = sgl_current->next_sgl; in sec_free_hw_sgl()
167 dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl); in sec_free_hw_sgl()
188 return -EINVAL; in sec_alloc_and_fill_hw_sgl()
193 sgl_next = dma_pool_zalloc(info->hw_sgl_pool, in sec_alloc_and_fill_hw_sgl()
196 ret = -ENOMEM; in sec_alloc_and_fill_hw_sgl()
204 sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM; in sec_alloc_and_fill_hw_sgl()
205 sgl_current->next_sgl = sgl_next_dma; in sec_alloc_and_fill_hw_sgl()
206 sgl_current->next = sgl_next; in sec_alloc_and_fill_hw_sgl()
210 sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg); in sec_alloc_and_fill_hw_sgl()
211 sgl_current->sge_entries[sge_index].len = sg_dma_len(sg); in sec_alloc_and_fill_hw_sgl()
212 sgl_current->data_bytes_in_sgl += sg_dma_len(sg); in sec_alloc_and_fill_hw_sgl()
214 sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM; in sec_alloc_and_fill_hw_sgl()
215 sgl_current->next_sgl = 0; in sec_alloc_and_fill_hw_sgl()
216 (*sec_sgl)->entry_sum_in_chain = count; in sec_alloc_and_fill_hw_sgl()
232 struct device *dev = ctx->queue->dev_info->dev; in sec_alg_skcipher_setkey()
234 mutex_lock(&ctx->lock); in sec_alg_skcipher_setkey()
235 if (ctx->key) { in sec_alg_skcipher_setkey()
237 memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); in sec_alg_skcipher_setkey()
240 ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY, in sec_alg_skcipher_setkey()
241 &ctx->pkey, GFP_KERNEL); in sec_alg_skcipher_setkey()
242 if (!ctx->key) { in sec_alg_skcipher_setkey()
243 mutex_unlock(&ctx->lock); in sec_alg_skcipher_setkey()
244 return -ENOMEM; in sec_alg_skcipher_setkey()
247 mutex_unlock(&ctx->lock); in sec_alg_skcipher_setkey()
269 return -EINVAL; in sec_alg_skcipher_setkey_aes_ecb()
291 return -EINVAL; in sec_alg_skcipher_setkey_aes_cbc()
313 return -EINVAL; in sec_alg_skcipher_setkey_aes_ctr()
337 return -EINVAL; in sec_alg_skcipher_setkey_aes_xts()
376 sec_free_hw_sgl(el->out, el->dma_out, info); in sec_alg_free_el()
377 sec_free_hw_sgl(el->in, el->dma_in, info); in sec_alg_free_el()
378 kfree(el->sgl_in); in sec_alg_free_el()
379 kfree(el->sgl_out); in sec_alg_free_el()
384 static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue) in sec_send_request() argument
389 mutex_lock(&sec_req->lock); in sec_send_request()
390 list_for_each_entry_safe(el, temp, &sec_req->elements, head) { in sec_send_request()
392 * Add to hardware queue only under following circumstances in sec_send_request()
393 * 1) Software and hardware queue empty so no chain dependencies in sec_send_request()
394 * 2) No dependencies as new IV - (check software queue empty in sec_send_request()
398 * In other cases first insert onto the software queue which in sec_send_request()
401 if (!queue->havesoftqueue || in sec_send_request()
402 (kfifo_is_empty(&queue->softqueue) && in sec_send_request()
403 sec_queue_empty(queue))) { in sec_send_request()
404 ret = sec_queue_send(queue, &el->req, sec_req); in sec_send_request()
405 if (ret == -EAGAIN) { in sec_send_request()
407 /* DEAD if here - should not happen */ in sec_send_request()
408 ret = -EBUSY; in sec_send_request()
412 kfifo_put(&queue->softqueue, el); in sec_send_request()
416 mutex_unlock(&sec_req->lock); in sec_send_request()
430 struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx; in sec_skcipher_alg_callback()
432 struct device *dev = ctx->queue->dev_info->dev; in sec_skcipher_alg_callback()
436 sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el, in sec_skcipher_alg_callback()
438 icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >> in sec_skcipher_alg_callback()
440 if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) { in sec_skcipher_alg_callback()
442 sec_resp->w1 & SEC_BD_W1_BD_INVALID, in sec_skcipher_alg_callback()
444 sec_req->err = -EINVAL; in sec_skcipher_alg_callback()
447 * on the queue. Error will be reported so requester so in sec_skcipher_alg_callback()
452 spin_lock_bh(&ctx->queue->queuelock); in sec_skcipher_alg_callback()
454 switch (ctx->cipher_alg) { in sec_skcipher_alg_callback()
458 if (sec_req_el->req.w0 & SEC_BD_W0_DE) in sec_skcipher_alg_callback()
459 sg_pcopy_to_buffer(sec_req_el->sgl_out, in sec_skcipher_alg_callback()
460 sg_nents(sec_req_el->sgl_out), in sec_skcipher_alg_callback()
461 skreq->iv, in sec_skcipher_alg_callback()
463 sec_req_el->el_length - in sec_skcipher_alg_callback()
466 sg_pcopy_to_buffer(sec_req_el->sgl_in, in sec_skcipher_alg_callback()
467 sg_nents(sec_req_el->sgl_in), in sec_skcipher_alg_callback()
468 skreq->iv, in sec_skcipher_alg_callback()
470 sec_req_el->el_length - in sec_skcipher_alg_callback()
477 crypto_inc(skreq->iv, 16); in sec_skcipher_alg_callback()
484 if (ctx->queue->havesoftqueue && in sec_skcipher_alg_callback()
485 !kfifo_is_empty(&ctx->queue->softqueue) && in sec_skcipher_alg_callback()
486 sec_queue_empty(ctx->queue)) { in sec_skcipher_alg_callback()
487 ret = kfifo_get(&ctx->queue->softqueue, &nextrequest); in sec_skcipher_alg_callback()
494 sec_queue_send(ctx->queue, &nextrequest->req, in sec_skcipher_alg_callback()
495 nextrequest->sec_req); in sec_skcipher_alg_callback()
496 } else if (!list_empty(&ctx->backlog)) { in sec_skcipher_alg_callback()
498 backlog_req = list_first_entry(&ctx->backlog, in sec_skcipher_alg_callback()
501 if (sec_queue_can_enqueue(ctx->queue, in sec_skcipher_alg_callback()
502 backlog_req->num_elements) || in sec_skcipher_alg_callback()
503 (ctx->queue->havesoftqueue && in sec_skcipher_alg_callback()
504 kfifo_avail(&ctx->queue->softqueue) > in sec_skcipher_alg_callback()
505 backlog_req->num_elements)) { in sec_skcipher_alg_callback()
506 sec_send_request(backlog_req, ctx->queue); in sec_skcipher_alg_callback()
507 crypto_request_complete(backlog_req->req_base, in sec_skcipher_alg_callback()
508 -EINPROGRESS); in sec_skcipher_alg_callback()
509 list_del(&backlog_req->backlog_head); in sec_skcipher_alg_callback()
512 spin_unlock_bh(&ctx->queue->queuelock); in sec_skcipher_alg_callback()
514 mutex_lock(&sec_req->lock); in sec_skcipher_alg_callback()
515 list_del(&sec_req_el->head); in sec_skcipher_alg_callback()
516 mutex_unlock(&sec_req->lock); in sec_skcipher_alg_callback()
517 sec_alg_free_el(sec_req_el, ctx->queue->dev_info); in sec_skcipher_alg_callback()
523 mutex_lock(&sec_req->lock); in sec_skcipher_alg_callback()
524 done = list_empty(&sec_req->elements); in sec_skcipher_alg_callback()
525 mutex_unlock(&sec_req->lock); in sec_skcipher_alg_callback()
528 dma_unmap_single(dev, sec_req->dma_iv, in sec_skcipher_alg_callback()
532 dma_unmap_sg(dev, skreq->src, sec_req->len_in, in sec_skcipher_alg_callback()
534 if (skreq->src != skreq->dst) in sec_skcipher_alg_callback()
535 dma_unmap_sg(dev, skreq->dst, sec_req->len_out, in sec_skcipher_alg_callback()
537 skcipher_request_complete(skreq, sec_req->err); in sec_skcipher_alg_callback()
545 sec_req->cb(resp, sec_req->req_base); in sec_alg_callback()
551 size_t *sizes; in sec_alg_alloc_and_calc_split_sizes() local
556 sizes = kcalloc(*steps, sizeof(*sizes), gfp); in sec_alg_alloc_and_calc_split_sizes()
557 if (!sizes) in sec_alg_alloc_and_calc_split_sizes()
558 return -ENOMEM; in sec_alg_alloc_and_calc_split_sizes()
560 for (i = 0; i < *steps - 1; i++) in sec_alg_alloc_and_calc_split_sizes()
561 sizes[i] = SEC_REQ_LIMIT; in sec_alg_alloc_and_calc_split_sizes()
562 sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1); in sec_alg_alloc_and_calc_split_sizes()
563 *split_sizes = sizes; in sec_alg_alloc_and_calc_split_sizes()
578 return -EINVAL; in sec_map_and_split_sg()
582 ret = -ENOMEM; in sec_map_and_split_sg()
587 ret = -ENOMEM; in sec_map_and_split_sg()
595 ret = -ENOMEM; in sec_map_and_split_sg()
642 return ERR_PTR(-ENOMEM); in sec_alg_alloc_and_fill_el()
643 el->el_length = el_size; in sec_alg_alloc_and_fill_el()
644 req = &el->req; in sec_alg_alloc_and_fill_el()
647 req->w0 &= ~SEC_BD_W0_CIPHER_M; in sec_alg_alloc_and_fill_el()
649 req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S; in sec_alg_alloc_and_fill_el()
651 req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S; in sec_alg_alloc_and_fill_el()
653 req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M; in sec_alg_alloc_and_fill_el()
654 req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) & in sec_alg_alloc_and_fill_el()
657 req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M; in sec_alg_alloc_and_fill_el()
658 req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) & in sec_alg_alloc_and_fill_el()
662 req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) | in sec_alg_alloc_and_fill_el()
666 req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M; in sec_alg_alloc_and_fill_el()
667 req->w1 |= SEC_BD_W1_ADDR_TYPE; in sec_alg_alloc_and_fill_el()
669 el->sgl_in = sgl_in; in sec_alg_alloc_and_fill_el()
671 ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in, in sec_alg_alloc_and_fill_el()
676 req->data_addr_lo = lower_32_bits(el->dma_in); in sec_alg_alloc_and_fill_el()
677 req->data_addr_hi = upper_32_bits(el->dma_in); in sec_alg_alloc_and_fill_el()
680 el->sgl_out = sgl_out; in sec_alg_alloc_and_fill_el()
681 ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out, in sec_alg_alloc_and_fill_el()
682 el->sgl_out, in sec_alg_alloc_and_fill_el()
687 req->w0 |= SEC_BD_W0_DE; in sec_alg_alloc_and_fill_el()
688 req->cipher_destin_addr_lo = lower_32_bits(el->dma_out); in sec_alg_alloc_and_fill_el()
689 req->cipher_destin_addr_hi = upper_32_bits(el->dma_out); in sec_alg_alloc_and_fill_el()
692 req->w0 &= ~SEC_BD_W0_DE; in sec_alg_alloc_and_fill_el()
693 req->cipher_destin_addr_lo = lower_32_bits(el->dma_in); in sec_alg_alloc_and_fill_el()
694 req->cipher_destin_addr_hi = upper_32_bits(el->dma_in); in sec_alg_alloc_and_fill_el()
700 sec_free_hw_sgl(el->in, el->dma_in, info); in sec_alg_alloc_and_fill_el()
713 struct sec_queue *queue = ctx->queue; in sec_alg_skcipher_crypto() local
715 struct sec_dev_info *info = queue->dev_info; in sec_alg_skcipher_crypto()
723 bool split = skreq->src != skreq->dst; in sec_alg_skcipher_crypto()
724 gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; in sec_alg_skcipher_crypto()
726 mutex_init(&sec_req->lock); in sec_alg_skcipher_crypto()
727 sec_req->req_base = &skreq->base; in sec_alg_skcipher_crypto()
728 sec_req->err = 0; in sec_alg_skcipher_crypto()
730 sec_req->len_in = sg_nents(skreq->src); in sec_alg_skcipher_crypto()
732 ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes, in sec_alg_skcipher_crypto()
736 sec_req->num_elements = steps; in sec_alg_skcipher_crypto()
737 ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in, in sec_alg_skcipher_crypto()
738 &splits_in_nents, sec_req->len_in, in sec_alg_skcipher_crypto()
739 info->dev, gfp); in sec_alg_skcipher_crypto()
744 sec_req->len_out = sg_nents(skreq->dst); in sec_alg_skcipher_crypto()
745 ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, in sec_alg_skcipher_crypto()
747 sec_req->len_out, info->dev, gfp); in sec_alg_skcipher_crypto()
751 /* Shared info stored in seq_req - applies to all BDs */ in sec_alg_skcipher_crypto()
752 sec_req->tfm_ctx = ctx; in sec_alg_skcipher_crypto()
753 sec_req->cb = sec_skcipher_alg_callback; in sec_alg_skcipher_crypto()
754 INIT_LIST_HEAD(&sec_req->elements); in sec_alg_skcipher_crypto()
762 sec_req->dma_iv = dma_map_single(info->dev, skreq->iv, in sec_alg_skcipher_crypto()
765 if (dma_mapping_error(info->dev, sec_req->dma_iv)) { in sec_alg_skcipher_crypto()
766 ret = -ENOMEM; in sec_alg_skcipher_crypto()
771 /* Set them all up then queue - cleaner error handling. */ in sec_alg_skcipher_crypto()
773 el = sec_alg_alloc_and_fill_el(&ctx->req_template, in sec_alg_skcipher_crypto()
776 skreq->src != skreq->dst, in sec_alg_skcipher_crypto()
785 el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv); in sec_alg_skcipher_crypto()
786 el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv); in sec_alg_skcipher_crypto()
787 el->sec_req = sec_req; in sec_alg_skcipher_crypto()
788 list_add_tail(&el->head, &sec_req->elements); in sec_alg_skcipher_crypto()
792 * Only attempt to queue if the whole lot can fit in the queue - in sec_alg_skcipher_crypto()
796 * Big hammer test of both software and hardware queues - could be in sec_alg_skcipher_crypto()
801 spin_lock_bh(&queue->queuelock); in sec_alg_skcipher_crypto()
804 * Can go on to queue if we have space in either: in sec_alg_skcipher_crypto()
805 * 1) The hardware queue and no software queue in sec_alg_skcipher_crypto()
806 * 2) The software queue in sec_alg_skcipher_crypto()
808 * have to only queue to the backlog queue and return busy. in sec_alg_skcipher_crypto()
810 if ((!sec_queue_can_enqueue(queue, steps) && in sec_alg_skcipher_crypto()
811 (!queue->havesoftqueue || in sec_alg_skcipher_crypto()
812 kfifo_avail(&queue->softqueue) > steps)) || in sec_alg_skcipher_crypto()
813 !list_empty(&ctx->backlog)) { in sec_alg_skcipher_crypto()
814 ret = -EBUSY; in sec_alg_skcipher_crypto()
815 if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { in sec_alg_skcipher_crypto()
816 list_add_tail(&sec_req->backlog_head, &ctx->backlog); in sec_alg_skcipher_crypto()
817 spin_unlock_bh(&queue->queuelock); in sec_alg_skcipher_crypto()
821 spin_unlock_bh(&queue->queuelock); in sec_alg_skcipher_crypto()
824 ret = sec_send_request(sec_req, queue); in sec_alg_skcipher_crypto()
825 spin_unlock_bh(&queue->queuelock); in sec_alg_skcipher_crypto()
829 ret = -EINPROGRESS; in sec_alg_skcipher_crypto()
831 /* Cleanup - all elements in pointer arrays have been copied */ in sec_alg_skcipher_crypto()
840 list_for_each_entry_safe(el, temp, &sec_req->elements, head) { in sec_alg_skcipher_crypto()
841 list_del(&el->head); in sec_alg_skcipher_crypto()
845 dma_unmap_single(info->dev, sec_req->dma_iv, in sec_alg_skcipher_crypto()
850 sec_unmap_sg_on_err(skreq->dst, steps, splits_out, in sec_alg_skcipher_crypto()
851 splits_out_nents, sec_req->len_out, in sec_alg_skcipher_crypto()
852 info->dev); in sec_alg_skcipher_crypto()
854 sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents, in sec_alg_skcipher_crypto()
855 sec_req->len_in, info->dev); in sec_alg_skcipher_crypto()
876 mutex_init(&ctx->lock); in sec_alg_skcipher_init()
877 INIT_LIST_HEAD(&ctx->backlog); in sec_alg_skcipher_init()
880 ctx->queue = sec_queue_alloc_start_safe(); in sec_alg_skcipher_init()
881 if (IS_ERR(ctx->queue)) in sec_alg_skcipher_init()
882 return PTR_ERR(ctx->queue); in sec_alg_skcipher_init()
884 spin_lock_init(&ctx->queue->queuelock); in sec_alg_skcipher_init()
885 ctx->queue->havesoftqueue = false; in sec_alg_skcipher_init()
893 struct device *dev = ctx->queue->dev_info->dev; in sec_alg_skcipher_exit()
895 if (ctx->key) { in sec_alg_skcipher_exit()
896 memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY); in sec_alg_skcipher_exit()
897 dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key, in sec_alg_skcipher_exit()
898 ctx->pkey); in sec_alg_skcipher_exit()
900 sec_queue_stop_release(ctx->queue); in sec_alg_skcipher_exit()
912 INIT_KFIFO(ctx->queue->softqueue); in sec_alg_skcipher_init_with_queue()
913 ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL); in sec_alg_skcipher_init_with_queue()
918 ctx->queue->havesoftqueue = true; in sec_alg_skcipher_init_with_queue()
927 kfifo_free(&ctx->queue->softqueue); in sec_alg_skcipher_exit_with_queue()
1106 --active_devs; in sec_algs_register()
1116 if (--active_devs != 0) in sec_algs_unregister()