// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 Google LLC */ /* * Refer to Documentation/block/inline-encryption.rst for detailed explanation. */ #define pr_fmt(fmt) … #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/blk-crypto-profile.h> #include <linux/module.h> #include <linux/ratelimit.h> #include <linux/slab.h> #include "blk-crypto-internal.h" const struct blk_crypto_mode blk_crypto_modes[] = …; /* * This number needs to be at least (the number of threads doing IO * concurrently) * (maximum recursive depth of a bio), so that we don't * deadlock on crypt_ctx allocations. The default is chosen to be the same * as the default number of post read contexts in both EXT4 and F2FS. */ static int num_prealloc_crypt_ctxs = …; module_param(num_prealloc_crypt_ctxs, int, 0444); MODULE_PARM_DESC(…) …; static struct kmem_cache *bio_crypt_ctx_cache; static mempool_t *bio_crypt_ctx_pool; static int __init bio_crypt_ctx_init(void) { … } subsys_initcall(bio_crypt_ctx_init); void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) { … } void __bio_crypt_free_ctx(struct bio *bio) { … } int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) { … } /* Increments @dun by @inc, treating @dun as a multi-limb integer. */ void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], unsigned int inc) { … } void __bio_crypt_advance(struct bio *bio, unsigned int bytes) { … } /* * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to * @next_dun, treating the DUNs as multi-limb integers. */ bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc, unsigned int bytes, const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) { … } /* * Checks that two bio crypt contexts are compatible - i.e. that * they are mergeable except for data_unit_num continuity. */ static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1, struct bio_crypt_ctx *bc2) { … } bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) { … } /* * Checks that two bio crypt contexts are compatible, and also * that their data_unit_nums are continuous (and can hence be merged) * in the order @bc1 followed by @bc2. */ bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes, struct bio_crypt_ctx *bc2) { … } /* Check that all I/O segments are data unit aligned. */ static bool bio_crypt_check_alignment(struct bio *bio) { … } blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq) { … } void __blk_crypto_rq_put_keyslot(struct request *rq) { … } void __blk_crypto_free_request(struct request *rq) { … } /** * __blk_crypto_bio_prep - Prepare bio for inline encryption * * @bio_ptr: pointer to original bio pointer * * If the bio crypt context provided for the bio is supported by the underlying * device's inline encryption hardware, do nothing. * * Otherwise, try to perform en/decryption for this bio by falling back to the * kernel crypto API. When the crypto API fallback is used for encryption, * blk-crypto may choose to split the bio into 2 - the first one that will * continue to be processed and the second one that will be resubmitted via * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents * of the aforementioned "first one", and *bio_ptr will be updated to this * bounce bio. * * Caller must ensure bio has bio_crypt_ctx. * * Return: true on success; false on error (and bio->bi_status will be set * appropriately, and bio_endio() will have been called so bio * submission should abort). */ bool __blk_crypto_bio_prep(struct bio **bio_ptr) { … } int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, gfp_t gfp_mask) { … } /** * blk_crypto_init_key() - Prepare a key for use with blk-crypto * @blk_key: Pointer to the blk_crypto_key to initialize. * @raw_key: Pointer to the raw key. Must be the correct length for the chosen * @crypto_mode; see blk_crypto_modes[]. * @crypto_mode: identifier for the encryption algorithm to use * @dun_bytes: number of bytes that will be used to specify the DUN when this * key is used * @data_unit_size: the data unit size to use for en/decryption * * Return: 0 on success, -errno on failure. The caller is responsible for * zeroizing both blk_key and raw_key when done with them. */ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, enum blk_crypto_mode_num crypto_mode, unsigned int dun_bytes, unsigned int data_unit_size) { … } bool blk_crypto_config_supported_natively(struct block_device *bdev, const struct blk_crypto_config *cfg) { … } /* * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the * block_device it's submitted to supports inline crypto, or the * blk-crypto-fallback is enabled and supports the cfg). */ bool blk_crypto_config_supported(struct block_device *bdev, const struct blk_crypto_config *cfg) { … } /** * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device * @bdev: block device to operate on * @key: A key to use on the device * * Upper layers must call this function to ensure that either the hardware * supports the key's crypto settings, or the crypto API fallback has transforms * for the needed mode allocated and ready to go. This function may allocate * an skcipher, and *should not* be called from the data path, since that might * cause a deadlock * * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and * blk-crypto-fallback is either disabled or the needed algorithm * is disabled in the crypto API; or another -errno code. */ int blk_crypto_start_using_key(struct block_device *bdev, const struct blk_crypto_key *key) { … } /** * blk_crypto_evict_key() - Evict a blk_crypto_key from a block_device * @bdev: a block_device on which I/O using the key may have been done * @key: the key to evict * * For a given block_device, this function removes the given blk_crypto_key from * the keyslot management structures and evicts it from any underlying hardware * keyslot(s) or blk-crypto-fallback keyslot it may have been programmed into. * * Upper layers must call this before freeing the blk_crypto_key. It must be * called for every block_device the key may have been used on. The key must no * longer be in use by any I/O when this function is called. * * Context: May sleep. */ void blk_crypto_evict_key(struct block_device *bdev, const struct blk_crypto_key *key) { … } EXPORT_SYMBOL_GPL(…);