1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Scatterlist Cryptographic API. 4 * 5 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 6 * Copyright (c) 2002 David S. Miller (davem@redhat.com) 7 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> 10 * and Nettle, by Niels Möller. 11 */ 12 #ifndef _LINUX_CRYPTO_H 13 #define _LINUX_CRYPTO_H 14 15 #include <linux/completion.h> 16 #include <linux/errno.h> 17 #include <linux/refcount_types.h> 18 #include <linux/slab.h> 19 #include <linux/types.h> 20 21 /* 22 * Algorithm masks and types. 23 */ 24 #define CRYPTO_ALG_TYPE_MASK 0x0000000f 25 #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 26 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 27 #define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004 28 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 29 #define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006 30 #define CRYPTO_ALG_TYPE_SIG 0x00000007 31 #define CRYPTO_ALG_TYPE_KPP 0x00000008 32 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a 33 #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b 34 #define CRYPTO_ALG_TYPE_RNG 0x0000000c 35 #define CRYPTO_ALG_TYPE_HASH 0x0000000e 36 #define CRYPTO_ALG_TYPE_SHASH 0x0000000e 37 #define CRYPTO_ALG_TYPE_AHASH 0x0000000f 38 39 #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e 40 41 #define CRYPTO_ALG_LARVAL 0x00000010 42 #define CRYPTO_ALG_DEAD 0x00000020 43 #define CRYPTO_ALG_DYING 0x00000040 44 #define CRYPTO_ALG_ASYNC 0x00000080 45 46 /* 47 * Set if the algorithm (or an algorithm which it uses) requires another 48 * algorithm of the same type to handle corner cases. 49 */ 50 #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 51 52 /* 53 * Set if the algorithm data structure should be duplicated into 54 * kmalloc memory before registration. This is useful for hardware 55 * that can be disconnected at will. Do not use this if the data 56 * structure is embedded into a bigger one. Duplicate the overall 57 * data structure in the driver in that case. 58 */ 59 #define CRYPTO_ALG_DUP_FIRST 0x00000200 60 61 /* 62 * Set if the algorithm has passed automated run-time testing. Note that 63 * if there is no run-time testing for a given algorithm it is considered 64 * to have passed. 65 */ 66 67 #define CRYPTO_ALG_TESTED 0x00000400 68 69 /* 70 * Set if the algorithm is an instance that is built from templates. 71 */ 72 #define CRYPTO_ALG_INSTANCE 0x00000800 73 74 /* Set this bit if the algorithm provided is hardware accelerated but 75 * not available to userspace via instruction set or so. 76 */ 77 #define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 78 79 /* 80 * Mark a cipher as a service implementation only usable by another 81 * cipher and never by a normal user of the kernel crypto API 82 */ 83 #define CRYPTO_ALG_INTERNAL 0x00002000 84 85 /* 86 * Set if the algorithm has a ->setkey() method but can be used without 87 * calling it first, i.e. there is a default key. 88 */ 89 #define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 90 91 /* 92 * Don't trigger module loading 93 */ 94 #define CRYPTO_NOLOAD 0x00008000 95 96 /* 97 * The algorithm may allocate memory during request processing, i.e. during 98 * encryption, decryption, or hashing. Users can request an algorithm with this 99 * flag unset if they can't handle memory allocation failures. 100 * 101 * This flag is currently only implemented for algorithms of type "skcipher", 102 * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not 103 * have this flag set even if they allocate memory. 104 * 105 * In some edge cases, algorithms can allocate memory regardless of this flag. 106 * To avoid these cases, users must obey the following usage constraints: 107 * skcipher: 108 * - The IV buffer and all scatterlist elements must be aligned to the 109 * algorithm's alignmask. 110 * - If the data were to be divided into chunks of size 111 * crypto_skcipher_walksize() (with any remainder going at the end), no 112 * chunk can cross a page boundary or a scatterlist element boundary. 113 * aead: 114 * - The IV buffer and all scatterlist elements must be aligned to the 115 * algorithm's alignmask. 116 * - The first scatterlist element must contain all the associated data, 117 * and its pages must be !PageHighMem. 118 * - If the plaintext/ciphertext were to be divided into chunks of size 119 * crypto_aead_walksize() (with the remainder going at the end), no chunk 120 * can cross a page boundary or a scatterlist element boundary. 121 * ahash: 122 * - crypto_ahash_finup() must not be used unless the algorithm implements 123 * ->finup() natively. 124 */ 125 #define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000 126 127 /* 128 * Mark an algorithm as a service implementation only usable by a 129 * template and never by a normal user of the kernel crypto API. 130 * This is intended to be used by algorithms that are themselves 131 * not FIPS-approved but may instead be used to implement parts of 132 * a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)). 133 */ 134 #define CRYPTO_ALG_FIPS_INTERNAL 0x00020000 135 136 /* Set if the algorithm supports virtual addresses. */ 137 #define CRYPTO_ALG_REQ_VIRT 0x00040000 138 139 /* Set if the algorithm cannot have a fallback (e.g., phmac). */ 140 #define CRYPTO_ALG_NO_FALLBACK 0x00080000 141 142 /* The high bits 0xff000000 are reserved for type-specific flags. */ 143 144 /* 145 * Transform masks and values (for crt_flags). 146 */ 147 #define CRYPTO_TFM_NEED_KEY 0x00000001 148 149 #define CRYPTO_TFM_REQ_MASK 0x000fff00 150 #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 151 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 152 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 153 #define CRYPTO_TFM_REQ_ON_STACK 0x00000800 154 155 /* 156 * Miscellaneous stuff. 157 */ 158 #define CRYPTO_MAX_ALG_NAME 128 159 160 /* 161 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual 162 * declaration) is used to ensure that the crypto_tfm context structure is 163 * aligned correctly for the given architecture so that there are no alignment 164 * faults for C data types. On architectures that support non-cache coherent 165 * DMA, such as ARM or arm64, it also takes into account the minimal alignment 166 * that is required to ensure that the context struct member does not share any 167 * cachelines with the rest of the struct. This is needed to ensure that cache 168 * maintenance for non-coherent DMA (cache invalidation in particular) does not 169 * affect data that may be accessed by the CPU concurrently. 170 */ 171 #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN 172 173 #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) 174 175 struct crypto_tfm; 176 struct crypto_type; 177 struct module; 178 179 typedef void (*crypto_completion_t)(void *req, int err); 180 181 /** 182 * DOC: Block Cipher Context Data Structures 183 * 184 * These data structures define the operating context for each block cipher 185 * type. 186 */ 187 188 struct crypto_async_request { 189 struct list_head list; 190 crypto_completion_t complete; 191 void *data; 192 struct crypto_tfm *tfm; 193 194 u32 flags; 195 }; 196 197 /** 198 * DOC: Block Cipher Algorithm Definitions 199 * 200 * These data structures define modular crypto algorithm implementations, 201 * managed via crypto_register_alg() and crypto_unregister_alg(). 202 */ 203 204 /** 205 * struct cipher_alg - single-block symmetric ciphers definition 206 * @cia_min_keysize: Minimum key size supported by the transformation. This is 207 * the smallest key length supported by this transformation 208 * algorithm. This must be set to one of the pre-defined 209 * values as this is not hardware specific. Possible values 210 * for this field can be found via git grep "_MIN_KEY_SIZE" 211 * include/crypto/ 212 * @cia_max_keysize: Maximum key size supported by the transformation. This is 213 * the largest key length supported by this transformation 214 * algorithm. This must be set to one of the pre-defined values 215 * as this is not hardware specific. Possible values for this 216 * field can be found via git grep "_MAX_KEY_SIZE" 217 * include/crypto/ 218 * @cia_setkey: Set key for the transformation. This function is used to either 219 * program a supplied key into the hardware or store the key in the 220 * transformation context for programming it later. Note that this 221 * function does modify the transformation context. This function 222 * can be called multiple times during the existence of the 223 * transformation object, so one must make sure the key is properly 224 * reprogrammed into the hardware. This function is also 225 * responsible for checking the key length for validity. 226 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a 227 * single block of data, which must be @cra_blocksize big. This 228 * always operates on a full @cra_blocksize and it is not possible 229 * to encrypt a block of smaller size. The supplied buffers must 230 * therefore also be at least of @cra_blocksize size. Both the 231 * input and output buffers are always aligned to @cra_alignmask. 232 * In case either of the input or output buffer supplied by user 233 * of the crypto API is not aligned to @cra_alignmask, the crypto 234 * API will re-align the buffers. The re-alignment means that a 235 * new buffer will be allocated, the data will be copied into the 236 * new buffer, then the processing will happen on the new buffer, 237 * then the data will be copied back into the original buffer and 238 * finally the new buffer will be freed. In case a software 239 * fallback was put in place in the @cra_init call, this function 240 * might need to use the fallback if the algorithm doesn't support 241 * all of the key sizes. In case the key was stored in 242 * transformation context, the key might need to be re-programmed 243 * into the hardware in this function. This function shall not 244 * modify the transformation context, as this function may be 245 * called in parallel with the same transformation object. 246 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to 247 * @cia_encrypt, and the conditions are exactly the same. 248 * 249 * All fields are mandatory and must be filled. 250 */ 251 struct cipher_alg { 252 unsigned int cia_min_keysize; 253 unsigned int cia_max_keysize; 254 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, 255 unsigned int keylen); 256 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 257 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 258 }; 259 260 #define cra_cipher cra_u.cipher 261 262 /** 263 * struct crypto_alg - definition of a cryptograpic cipher algorithm 264 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h 265 * CRYPTO_ALG_* flags for the flags which go in here. Those are 266 * used for fine-tuning the description of the transformation 267 * algorithm. 268 * @cra_blocksize: Minimum block size of this transformation. The size in bytes 269 * of the smallest possible unit which can be transformed with 270 * this algorithm. The users must respect this value. 271 * In case of HASH transformation, it is possible for a smaller 272 * block than @cra_blocksize to be passed to the crypto API for 273 * transformation, in case of any other transformation type, an 274 * error will be returned upon any attempt to transform smaller 275 * than @cra_blocksize chunks. 276 * @cra_ctxsize: Size of the operational context of the transformation. This 277 * value informs the kernel crypto API about the memory size 278 * needed to be allocated for the transformation context. 279 * @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is 280 * 1 less than the alignment, in bytes, that the algorithm 281 * implementation requires for input and output buffers. When 282 * the crypto API is invoked with buffers that are not aligned 283 * to this alignment, the crypto API automatically utilizes 284 * appropriately aligned temporary buffers to comply with what 285 * the algorithm needs. (For scatterlists this happens only if 286 * the algorithm uses the skcipher_walk helper functions.) This 287 * misalignment handling carries a performance penalty, so it is 288 * preferred that algorithms do not set a nonzero alignmask. 289 * Also, crypto API users may wish to allocate buffers aligned 290 * to the alignmask of the algorithm being used, in order to 291 * avoid the API having to realign them. Note: the alignmask is 292 * not supported for hash algorithms and is always 0 for them. 293 * @cra_reqsize: Size of the request context for this algorithm. 294 * @cra_priority: Priority of this transformation implementation. In case 295 * multiple transformations with same @cra_name are available to 296 * the Crypto API, the kernel will use the one with highest 297 * @cra_priority. 298 * @cra_name: Generic name (usable by multiple implementations) of the 299 * transformation algorithm. This is the name of the transformation 300 * itself. This field is used by the kernel when looking up the 301 * providers of particular transformation. 302 * @cra_driver_name: Unique name of the transformation provider. This is the 303 * name of the provider of the transformation. This can be any 304 * arbitrary value, but in the usual case, this contains the 305 * name of the chip or provider and the name of the 306 * transformation algorithm. 307 * @cra_type: Type of the cryptographic transformation. This is a pointer to 308 * struct crypto_type, which implements callbacks common for all 309 * transformation types. There are multiple options, such as 310 * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type. 311 * This field might be empty. In that case, there are no common 312 * callbacks. This is the case for: cipher. 313 * @cra_u: Callbacks implementing the transformation. This is a union of 314 * multiple structures. Depending on the type of transformation selected 315 * by @cra_type and @cra_flags above, the associated structure must be 316 * filled with callbacks. This field might be empty. This is the case 317 * for ahash, shash. 318 * @cra_init: Deprecated, do not use. 319 * @cra_exit: Deprecated, do not use. 320 * @cra_u.cipher: Union member which contains a single-block symmetric cipher 321 * definition. See @struct @cipher_alg. 322 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE 323 * @cra_list: internally used 324 * @cra_users: internally used 325 * @cra_refcnt: internally used 326 * @cra_destroy: internally used 327 * 328 * The struct crypto_alg describes a generic Crypto API algorithm and is common 329 * for all of the transformations. Any variable not documented here shall not 330 * be used by a cipher implementation as it is internal to the Crypto API. 331 */ 332 struct crypto_alg { 333 struct list_head cra_list; 334 struct list_head cra_users; 335 336 u32 cra_flags; 337 unsigned int cra_blocksize; 338 unsigned int cra_ctxsize; 339 unsigned int cra_alignmask; 340 unsigned int cra_reqsize; 341 342 int cra_priority; 343 refcount_t cra_refcnt; 344 345 char cra_name[CRYPTO_MAX_ALG_NAME]; 346 char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 347 348 const struct crypto_type *cra_type; 349 350 union { 351 struct cipher_alg cipher; 352 } cra_u; 353 354 int (*cra_init)(struct crypto_tfm *tfm); 355 void (*cra_exit)(struct crypto_tfm *tfm); 356 void (*cra_destroy)(struct crypto_alg *alg); 357 358 struct module *cra_module; 359 } CRYPTO_MINALIGN_ATTR; 360 361 /* 362 * A helper struct for waiting for completion of async crypto ops 363 */ 364 struct crypto_wait { 365 struct completion completion; 366 int err; 367 }; 368 369 /* 370 * Macro for declaring a crypto op async wait object on stack 371 */ 372 #define DECLARE_CRYPTO_WAIT(_wait) \ 373 struct crypto_wait _wait = { \ 374 COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } 375 376 /* 377 * Async ops completion helper functioons 378 */ 379 void crypto_req_done(void *req, int err); 380 381 static inline int crypto_wait_req(int err, struct crypto_wait *wait) 382 { 383 switch (err) { 384 case -EINPROGRESS: 385 case -EBUSY: 386 wait_for_completion(&wait->completion); 387 reinit_completion(&wait->completion); 388 err = wait->err; 389 break; 390 } 391 392 return err; 393 } 394 395 static inline void crypto_init_wait(struct crypto_wait *wait) 396 { 397 init_completion(&wait->completion); 398 } 399 400 /* 401 * Algorithm query interface. 402 */ 403 int crypto_has_alg(const char *name, u32 type, u32 mask); 404 405 /* 406 * Transforms: user-instantiated objects which encapsulate algorithms 407 * and core processing logic. Managed via crypto_alloc_*() and 408 * crypto_free_*(), as well as the various helpers below. 409 */ 410 411 struct crypto_tfm { 412 refcount_t refcnt; 413 414 u32 crt_flags; 415 416 int node; 417 418 struct crypto_tfm *fb; 419 420 void (*exit)(struct crypto_tfm *tfm); 421 422 struct crypto_alg *__crt_alg; 423 424 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; 425 }; 426 427 /* 428 * Transform user interface. 429 */ 430 431 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 432 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); 433 434 static inline void crypto_free_tfm(struct crypto_tfm *tfm) 435 { 436 return crypto_destroy_tfm(tfm, tfm); 437 } 438 439 /* 440 * Transform helpers which query the underlying algorithm. 441 */ 442 static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) 443 { 444 return tfm->__crt_alg->cra_name; 445 } 446 447 static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) 448 { 449 return tfm->__crt_alg->cra_driver_name; 450 } 451 452 static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) 453 { 454 return tfm->__crt_alg->cra_blocksize; 455 } 456 457 static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) 458 { 459 return tfm->__crt_alg->cra_alignmask; 460 } 461 462 static inline unsigned int crypto_tfm_alg_reqsize(struct crypto_tfm *tfm) 463 { 464 return tfm->__crt_alg->cra_reqsize; 465 } 466 467 static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) 468 { 469 return tfm->crt_flags; 470 } 471 472 static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) 473 { 474 tfm->crt_flags |= flags; 475 } 476 477 static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) 478 { 479 tfm->crt_flags &= ~flags; 480 } 481 482 static inline unsigned int crypto_tfm_ctx_alignment(void) 483 { 484 struct crypto_tfm *tfm; 485 return __alignof__(tfm->__crt_ctx); 486 } 487 488 static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm) 489 { 490 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC; 491 } 492 493 static inline bool crypto_req_on_stack(struct crypto_async_request *req) 494 { 495 return req->flags & CRYPTO_TFM_REQ_ON_STACK; 496 } 497 498 static inline void crypto_request_set_callback( 499 struct crypto_async_request *req, u32 flags, 500 crypto_completion_t compl, void *data) 501 { 502 u32 keep = CRYPTO_TFM_REQ_ON_STACK; 503 504 req->complete = compl; 505 req->data = data; 506 req->flags &= keep; 507 req->flags |= flags & ~keep; 508 } 509 510 static inline void crypto_request_set_tfm(struct crypto_async_request *req, 511 struct crypto_tfm *tfm) 512 { 513 req->tfm = tfm; 514 req->flags &= ~CRYPTO_TFM_REQ_ON_STACK; 515 } 516 517 struct crypto_async_request *crypto_request_clone( 518 struct crypto_async_request *req, size_t total, gfp_t gfp); 519 520 static inline void crypto_stack_request_init(struct crypto_async_request *req, 521 struct crypto_tfm *tfm) 522 { 523 req->flags = 0; 524 crypto_request_set_tfm(req, tfm); 525 req->flags |= CRYPTO_TFM_REQ_ON_STACK; 526 } 527 528 #endif /* _LINUX_CRYPTO_H */ 529 530