linux/drivers/md/dm-crypt.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2003 Jana Saout <[email protected]>
 * Copyright (C) 2004 Clemens Fruhwirth <[email protected]>
 * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved.
 * Copyright (C) 2013-2020 Milan Broz <[email protected]>
 *
 * This file is released under the GPL.
 */

#include <linux/completion.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/key.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/backing-dev.h>
#include <linux/atomic.h>
#include <linux/scatterlist.h>
#include <linux/rbtree.h>
#include <linux/ctype.h>
#include <asm/page.h>
#include <asm/unaligned.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
#include <crypto/skcipher.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/utils.h>
#include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
#include <linux/key-type.h>
#include <keys/user-type.h>
#include <keys/encrypted-type.h>
#include <keys/trusted-type.h>

#include <linux/device-mapper.h>

#include "dm-audit.h"

#define DM_MSG_PREFIX

static DEFINE_IDA(workqueue_ida);

/*
 * context holding the current state of a multi-part conversion
 */
struct convert_context {};

/*
 * per bio private data
 */
struct dm_crypt_io {} CRYPTO_MINALIGN_ATTR;

struct dm_crypt_request {};

struct crypt_config;

struct crypt_iv_operations {};

struct iv_benbi_private {};

#define LMK_SEED_SIZE
struct iv_lmk_private {};

#define TCW_WHITENING_SIZE
struct iv_tcw_private {};

#define ELEPHANT_MAX_KEY_SIZE
struct iv_elephant_private {};

/*
 * Crypt: maps a linear range of a block device
 * and encrypts / decrypts at the same time.
 */
enum flags {};

enum cipher_flags {};

/*
 * The fields in here must be read only after initialization.
 */
struct crypt_config {};

#define MIN_IOS
#define MAX_TAG_SIZE
#define POOL_ENTRY_SIZE

static DEFINE_SPINLOCK(dm_crypt_clients_lock);
static unsigned int dm_crypt_clients_n;
static volatile unsigned long dm_crypt_pages_per_client;
#define DM_CRYPT_MEMORY_PERCENT
#define DM_CRYPT_MIN_PAGES_PER_CLIENT
#define DM_CRYPT_DEFAULT_MAX_READ_SIZE
#define DM_CRYPT_DEFAULT_MAX_WRITE_SIZE

static unsigned int max_read_size =;
module_param(max_read_size, uint, 0644);
MODULE_PARM_DESC();
static unsigned int max_write_size =;
module_param(max_write_size, uint, 0644);
MODULE_PARM_DESC();
static unsigned get_max_request_size(struct crypt_config *cc, bool wrt)
{}

static void crypt_endio(struct bio *clone);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
					     struct scatterlist *sg);

static bool crypt_integrity_aead(struct crypt_config *cc);

/*
 * Use this to access cipher attributes that are independent of the key.
 */
static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
{}

static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
{}

/*
 * Different IV generation algorithms:
 *
 * plain: the initial vector is the 32-bit little-endian version of the sector
 *        number, padded with zeros if necessary.
 *
 * plain64: the initial vector is the 64-bit little-endian version of the sector
 *        number, padded with zeros if necessary.
 *
 * plain64be: the initial vector is the 64-bit big-endian version of the sector
 *        number, padded with zeros if necessary.
 *
 * essiv: "encrypted sector|salt initial vector", the sector number is
 *        encrypted with the bulk cipher using a salt as key. The salt
 *        should be derived from the bulk cipher's key via hashing.
 *
 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
 *        (needed for LRW-32-AES and possible other narrow block modes)
 *
 * null: the initial vector is always zero.  Provides compatibility with
 *       obsolete loop_fish2 devices.  Do not use for new devices.
 *
 * lmk:  Compatible implementation of the block chaining mode used
 *       by the Loop-AES block device encryption system
 *       designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
 *       It operates on full 512 byte sectors and uses CBC
 *       with an IV derived from the sector number, the data and
 *       optionally extra IV seed.
 *       This means that after decryption the first block
 *       of sector must be tweaked according to decrypted data.
 *       Loop-AES can use three encryption schemes:
 *         version 1: is plain aes-cbc mode
 *         version 2: uses 64 multikey scheme with lmk IV generator
 *         version 3: the same as version 2 with additional IV seed
 *                   (it uses 65 keys, last key is used as IV seed)
 *
 * tcw:  Compatible implementation of the block chaining mode used
 *       by the TrueCrypt device encryption system (prior to version 4.1).
 *       For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
 *       It operates on full 512 byte sectors and uses CBC
 *       with an IV derived from initial key and the sector number.
 *       In addition, whitening value is applied on every sector, whitening
 *       is calculated from initial key, sector number and mixed using CRC32.
 *       Note that this encryption scheme is vulnerable to watermarking attacks
 *       and should be used for old compatible containers access only.
 *
 * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
 *        The IV is encrypted little-endian byte-offset (with the same key
 *        and cipher as the volume).
 *
 * elephant: The extended version of eboiv with additional Elephant diffuser
 *           used with Bitlocker CBC mode.
 *           This mode was used in older Windows systems
 *           https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
 */

static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
			      struct dm_crypt_request *dmreq)
{}

static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
				struct dm_crypt_request *dmreq)
{}

static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
				  struct dm_crypt_request *dmreq)
{}

static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
			      struct dm_crypt_request *dmreq)
{}

static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
			      const char *opts)
{}

static void crypt_iv_benbi_dtr(struct crypt_config *cc)
{}

static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
			      struct dm_crypt_request *dmreq)
{}

static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
			     struct dm_crypt_request *dmreq)
{}

static void crypt_iv_lmk_dtr(struct crypt_config *cc)
{}

static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
			    const char *opts)
{}

static int crypt_iv_lmk_init(struct crypt_config *cc)
{}

static int crypt_iv_lmk_wipe(struct crypt_config *cc)
{}

static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
			    struct dm_crypt_request *dmreq,
			    u8 *data)
{}

static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
			    struct dm_crypt_request *dmreq)
{}

static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
			     struct dm_crypt_request *dmreq)
{}

static void crypt_iv_tcw_dtr(struct crypt_config *cc)
{}

static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
			    const char *opts)
{}

static int crypt_iv_tcw_init(struct crypt_config *cc)
{}

static int crypt_iv_tcw_wipe(struct crypt_config *cc)
{}

static int crypt_iv_tcw_whitening(struct crypt_config *cc,
				  struct dm_crypt_request *dmreq,
				  u8 *data)
{}

static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
			    struct dm_crypt_request *dmreq)
{}

static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
			     struct dm_crypt_request *dmreq)
{}

static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
				struct dm_crypt_request *dmreq)
{}

static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
			    const char *opts)
{}

static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
			    struct dm_crypt_request *dmreq)
{}

static void crypt_iv_elephant_dtr(struct crypt_config *cc)
{}

static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
			    const char *opts)
{}

static void diffuser_disk_to_cpu(u32 *d, size_t n)
{}

static void diffuser_cpu_to_disk(__le32 *d, size_t n)
{}

static void diffuser_a_decrypt(u32 *d, size_t n)
{}

static void diffuser_a_encrypt(u32 *d, size_t n)
{}

static void diffuser_b_decrypt(u32 *d, size_t n)
{}

static void diffuser_b_encrypt(u32 *d, size_t n)
{}

static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
{}

static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
			    struct dm_crypt_request *dmreq)
{}

static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
				  struct dm_crypt_request *dmreq)
{}

static int crypt_iv_elephant_init(struct crypt_config *cc)
{}

static int crypt_iv_elephant_wipe(struct crypt_config *cc)
{}

static const struct crypt_iv_operations crypt_iv_plain_ops =;

static const struct crypt_iv_operations crypt_iv_plain64_ops =;

static const struct crypt_iv_operations crypt_iv_plain64be_ops =;

static const struct crypt_iv_operations crypt_iv_essiv_ops =;

static const struct crypt_iv_operations crypt_iv_benbi_ops =;

static const struct crypt_iv_operations crypt_iv_null_ops =;

static const struct crypt_iv_operations crypt_iv_lmk_ops =;

static const struct crypt_iv_operations crypt_iv_tcw_ops =;

static const struct crypt_iv_operations crypt_iv_random_ops =;

static const struct crypt_iv_operations crypt_iv_eboiv_ops =;

static const struct crypt_iv_operations crypt_iv_elephant_ops =;

/*
 * Integrity extensions
 */
static bool crypt_integrity_aead(struct crypt_config *cc)
{}

static bool crypt_integrity_hmac(struct crypt_config *cc)
{}

/* Get sg containing data */
static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
					     struct scatterlist *sg)
{}

static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
{}

static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
{}

static void crypt_convert_init(struct crypt_config *cc,
			       struct convert_context *ctx,
			       struct bio *bio_out, struct bio *bio_in,
			       sector_t sector)
{}

static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
					     void *req)
{}

static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
{}

static u8 *iv_of_dmreq(struct crypt_config *cc,
		       struct dm_crypt_request *dmreq)
{}

static u8 *org_iv_of_dmreq(struct crypt_config *cc,
		       struct dm_crypt_request *dmreq)
{}

static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
		       struct dm_crypt_request *dmreq)
{}

static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
		       struct dm_crypt_request *dmreq)
{}

static void *tag_from_dmreq(struct crypt_config *cc,
				struct dm_crypt_request *dmreq)
{}

static void *iv_tag_from_dmreq(struct crypt_config *cc,
			       struct dm_crypt_request *dmreq)
{}

static int crypt_convert_block_aead(struct crypt_config *cc,
				     struct convert_context *ctx,
				     struct aead_request *req,
				     unsigned int tag_offset)
{}

static int crypt_convert_block_skcipher(struct crypt_config *cc,
					struct convert_context *ctx,
					struct skcipher_request *req,
					unsigned int tag_offset)
{}

static void kcryptd_async_done(void *async_req, int error);

static int crypt_alloc_req_skcipher(struct crypt_config *cc,
				     struct convert_context *ctx)
{}

static int crypt_alloc_req_aead(struct crypt_config *cc,
				 struct convert_context *ctx)
{}

static int crypt_alloc_req(struct crypt_config *cc,
			    struct convert_context *ctx)
{}

static void crypt_free_req_skcipher(struct crypt_config *cc,
				    struct skcipher_request *req, struct bio *base_bio)
{}

static void crypt_free_req_aead(struct crypt_config *cc,
				struct aead_request *req, struct bio *base_bio)
{}

static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
{}

/*
 * Encrypt / decrypt data from one bio to another one (can be the same one)
 */
static blk_status_t crypt_convert(struct crypt_config *cc,
			 struct convert_context *ctx, bool atomic, bool reset_pending)
{}

static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);

/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations (but if it did then block
 * core should split the bio as needed).
 *
 * This function may be called concurrently. If we allocate from the mempool
 * concurrently, there is a possibility of deadlock. For example, if we have
 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
 * the mempool concurrently, it may deadlock in a situation where both processes
 * have allocated 128 pages and the mempool is exhausted.
 *
 * In order to avoid this scenario we allocate the pages under a mutex.
 *
 * In order to not degrade performance with excessive locking, we try
 * non-blocking allocations without a mutex first but on failure we fallback
 * to blocking allocations with a mutex.
 *
 * In order to reduce allocation overhead, we try to allocate compound pages in
 * the first pass. If they are not available, we fall back to the mempool.
 */
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
{}

static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
{}

static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
			  struct bio *bio, sector_t sector)
{}

static void crypt_inc_pending(struct dm_crypt_io *io)
{}

static void kcryptd_queue_read(struct dm_crypt_io *io);

/*
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
 */
static void crypt_dec_pending(struct dm_crypt_io *io)
{}

/*
 * kcryptd/kcryptd_io:
 *
 * Needed because it would be very unwise to do decryption in an
 * interrupt context.
 *
 * kcryptd performs the actual encryption or decryption.
 *
 * kcryptd_io performs the IO submission.
 *
 * They must be separated as otherwise the final stages could be
 * starved by new requests which can block in the first stages due
 * to memory allocation.
 *
 * The work is done per CPU global for all dm-crypt instances.
 * They should not depend on each other and do not block.
 */
static void crypt_endio(struct bio *clone)
{}

#define CRYPT_MAP_READ_GFP

static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{}

static void kcryptd_io_read_work(struct work_struct *work)
{}

static void kcryptd_queue_read(struct dm_crypt_io *io)
{}

static void kcryptd_io_write(struct dm_crypt_io *io)
{}

#define crypt_io_from_node(node)

static int dmcrypt_write(void *data)
{}

static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
{}

static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
				       struct convert_context *ctx)

{}

static void kcryptd_crypt_write_continue(struct work_struct *work)
{}

static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{}

static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
{}

static void kcryptd_crypt_read_continue(struct work_struct *work)
{}

static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
{}

static void kcryptd_async_done(void *data, int error)
{}

static void kcryptd_crypt(struct work_struct *work)
{}

static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{}

static void crypt_free_tfms_aead(struct crypt_config *cc)
{}

static void crypt_free_tfms_skcipher(struct crypt_config *cc)
{}

static void crypt_free_tfms(struct crypt_config *cc)
{}

static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
{}

static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
{}

static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
{}

static unsigned int crypt_subkey_size(struct crypt_config *cc)
{}

static unsigned int crypt_authenckey_size(struct crypt_config *cc)
{}

/*
 * If AEAD is composed like authenc(hmac(sha256),xts(aes)),
 * the key must be for some reason in special format.
 * This funcion converts cc->key to this special format.
 */
static void crypt_copy_authenckey(char *p, const void *key,
				  unsigned int enckeylen, unsigned int authkeylen)
{}

static int crypt_setkey(struct crypt_config *cc)
{}

#ifdef CONFIG_KEYS

static bool contains_whitespace(const char *str)
{}

static int set_key_user(struct crypt_config *cc, struct key *key)
{}

static int set_key_encrypted(struct crypt_config *cc, struct key *key)
{}

static int set_key_trusted(struct crypt_config *cc, struct key *key)
{}

static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
{}

static int get_key_size(char **key_string)
{}

#else

static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
{
	return -EINVAL;
}

static int get_key_size(char **key_string)
{
	return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1);
}

#endif /* CONFIG_KEYS */

static int crypt_set_key(struct crypt_config *cc, char *key)
{}

static int crypt_wipe_key(struct crypt_config *cc)
{}

static void crypt_calculate_pages_per_client(void)
{}

static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
{}

static void crypt_page_free(void *page, void *pool_data)
{}

static void crypt_dtr(struct dm_target *ti)
{}

static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
{}

/*
 * Workaround to parse HMAC algorithm from AEAD crypto API spec.
 * The HMAC is needed to calculate tag size (HMAC digest size).
 * This should be probably done by crypto-api calls (once available...)
 */
static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
{}

static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
				char **ivmode, char **ivopts)
{}

static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
				char **ivmode, char **ivopts)
{}

static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
{}

static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
{}

#ifdef CONFIG_BLK_DEV_ZONED
static int crypt_report_zones(struct dm_target *ti,
		struct dm_report_zones_args *args, unsigned int nr_zones)
{}
#else
#define crypt_report_zones
#endif

/*
 * Construct an encryption mapping:
 * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
 */
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{}

static int crypt_map(struct dm_target *ti, struct bio *bio)
{}

static char hex2asc(unsigned char c)
{}

static void crypt_status(struct dm_target *ti, status_type_t type,
			 unsigned int status_flags, char *result, unsigned int maxlen)
{}

static void crypt_postsuspend(struct dm_target *ti)
{}

static int crypt_preresume(struct dm_target *ti)
{}

static void crypt_resume(struct dm_target *ti)
{}

/* Message interface
 *	key set <key>
 *	key wipe
 */
static int crypt_message(struct dm_target *ti, unsigned int argc, char **argv,
			 char *result, unsigned int maxlen)
{}

static int crypt_iterate_devices(struct dm_target *ti,
				 iterate_devices_callout_fn fn, void *data)
{}

static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
{}

static struct target_type crypt_target =;
module_dm(crypt);

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();