Lines Matching full:blocks

29 				  int rounds, int blocks);
31 int rounds, int blocks);
34 int rounds, int blocks, u8 iv[]);
37 int rounds, int blocks, u8 iv[], u8 final[]);
40 int rounds, int blocks, u8 iv[]);
42 int rounds, int blocks, u8 iv[]);
46 int rounds, int blocks);
48 int rounds, int blocks, u8 iv[]);
99 int rounds, int blocks)) in __ecb_crypt() argument
109 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in __ecb_crypt() local
112 blocks = round_down(blocks, in __ecb_crypt()
117 ctx->rounds, blocks); in __ecb_crypt()
120 walk.nbytes - blocks * AES_BLOCK_SIZE); in __ecb_crypt()
169 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in cbc_encrypt() local
174 ctx->enc, ctx->key.rounds, blocks, in cbc_encrypt()
192 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in cbc_decrypt() local
195 blocks = round_down(blocks, in cbc_decrypt()
200 ctx->key.rk, ctx->key.rounds, blocks, in cbc_decrypt()
204 walk.nbytes - blocks * AES_BLOCK_SIZE); in cbc_decrypt()
240 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in ctr_encrypt() local
244 blocks = round_down(blocks, in ctr_encrypt()
251 ctx->rk, ctx->rounds, blocks, walk.iv, final); in ctr_encrypt()
255 u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; in ctr_encrypt()
256 u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; in ctr_encrypt()
265 walk.nbytes - blocks * AES_BLOCK_SIZE); in ctr_encrypt()
320 int rounds, int blocks, u8 iv[])) in __xts_crypt() argument
358 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in __xts_crypt() local
361 blocks = round_down(blocks, in __xts_crypt()
369 if (likely(blocks > 6)) { /* plain NEON is faster otherwise */ in __xts_crypt()
376 fn(out, in, ctx->key.rk, ctx->key.rounds, blocks, in __xts_crypt()
379 out += blocks * AES_BLOCK_SIZE; in __xts_crypt()
380 in += blocks * AES_BLOCK_SIZE; in __xts_crypt()
381 nbytes -= blocks * AES_BLOCK_SIZE; in __xts_crypt()