whiterose

linux unikernel
Log | Files | Refs | README | LICENSE | git clone https://git.ne02ptzero.me/git/whiterose

commit 63bdf4284c38a48af21745ceb148a087b190cd21
parent 6456300356433873309a1cae6aa05e77d6b59153
Author: Linus Torvalds <torvalds@linux-foundation.org>
Date:   Tue,  5 Mar 2019 09:09:55 -0800

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
 "API:
   - Add helper for simple skcipher modes.
   - Add helper to register multiple templates.
   - Set CRYPTO_TFM_NEED_KEY when setkey fails.
   - Require neither or both of export/import in shash.
   - AEAD decryption test vectors are now generated from encryption
     ones.
   - New option CONFIG_CRYPTO_MANAGER_EXTRA_TESTS that includes random
     fuzzing.

  Algorithms:
   - Conversions to skcipher and helper for many templates.
   - Add more test vectors for nhpoly1305 and adiantum.

  Drivers:
   - Add crypto4xx prng support.
   - Add xcbc/cmac/ecb support in caam.
   - Add AES support for Exynos5433 in s5p.
   - Remove sha384/sha512 from artpec7 as hardware cannot do partial
     hash"

[ There is a merge of the Freescale SoC tree in order to pull in changes
  required by patches to the caam/qi2 driver. ]

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (174 commits)
  crypto: s5p - add AES support for Exynos5433
  dt-bindings: crypto: document Exynos5433 SlimSSS
  crypto: crypto4xx - add missing of_node_put after of_device_is_available
  crypto: cavium/zip - fix collision with generic cra_driver_name
  crypto: af_alg - use struct_size() in sock_kfree_s()
  crypto: caam - remove redundant likely/unlikely annotation
  crypto: s5p - update iv after AES-CBC op end
  crypto: x86/poly1305 - Clear key material from stack in SSE2 variant
  crypto: caam - generate hash keys in-place
  crypto: caam - fix DMA mapping xcbc key twice
  crypto: caam - fix hash context DMA unmap size
  hwrng: bcm2835 - fix probe as platform device
  crypto: s5p-sss - Use AES_BLOCK_SIZE define instead of number
  crypto: stm32 - drop pointless static qualifier in stm32_hash_remove()
  crypto: chelsio - Fixed Traffic Stall
  crypto: marvell - Remove set but not used variable 'ivsize'
  crypto: ccp - Update driver messages to remove some confusion
  crypto: adiantum - add 1536 and 4096-byte test vectors
  crypto: nhpoly1305 - add a test vector with len % 16 != 0
  crypto: arm/aes-ce - update IV after partial final CTR block
  ...

Diffstat:
ADocumentation/devicetree/bindings/crypto/samsung-slimsss.txt | 19+++++++++++++++++++
MMAINTAINERS | 1-
March/arm/crypto/aes-ce-core.S | 26+++++++++++++-------------
March/arm/crypto/crct10dif-ce-core.S | 568++++++++++++++++++++++++++++++++++++-------------------------------------------
March/arm/crypto/crct10dif-ce-glue.c | 25+++++++------------------
March/arm64/crypto/aes-ce-ccm-core.S | 5+++--
March/arm64/crypto/aes-ce-ccm-glue.c | 8+++-----
March/arm64/crypto/aes-modes.S | 3+--
March/arm64/crypto/aes-neonbs-core.S | 8++++++--
March/arm64/crypto/crct10dif-ce-core.S | 513++++++++++++++++++++++++++++++++++++-------------------------------------------
March/arm64/crypto/crct10dif-ce-glue.c | 75++++++++++++++++++++++++++++++++++++++++++++++-----------------------------
March/arm64/crypto/ghash-ce-glue.c | 118++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------
March/s390/crypto/des_s390.c | 4++--
March/sparc/crypto/des_glue.c | 4++--
March/x86/crypto/aegis128-aesni-glue.c | 38+++++++++++++++-----------------------
March/x86/crypto/aegis128l-aesni-glue.c | 38+++++++++++++++-----------------------
March/x86/crypto/aegis256-aesni-glue.c | 38+++++++++++++++-----------------------
March/x86/crypto/aesni-intel_glue.c | 47+++++++++++++++++++++--------------------------
March/x86/crypto/crct10dif-pcl-asm_64.S | 782++++++++++++++++++++++++-------------------------------------------------------
March/x86/crypto/crct10dif-pclmul_glue.c | 12+++---------
March/x86/crypto/morus1280_glue.c | 40++++++++++++++++------------------------
March/x86/crypto/morus640_glue.c | 39+++++++++++++++------------------------
March/x86/crypto/poly1305-sse2-x86_64.S | 4++++
Mcrypto/Kconfig | 14++++++++++++--
Mcrypto/aead.c | 4+++-
Mcrypto/aegis.h | 7+------
Mcrypto/aegis128.c | 20++++++++------------
Mcrypto/aegis128l.c | 20++++++++------------
Mcrypto/aegis256.c | 20++++++++------------
Mcrypto/af_alg.c | 36+++++++++++++-----------------------
Mcrypto/ahash.c | 42++++++++++++++++++++++++++----------------
Mcrypto/algapi.c | 63++++++++++++++++++++++++++++++++-------------------------------
Mcrypto/arc4.c | 87+++++++++++++++++++++++++++++++++++++++++--------------------------------------
Mcrypto/cbc.c | 131++++++++-----------------------------------------------------------------------
Mcrypto/ccm.c | 78+++++++++++++++++++++++-------------------------------------------------------
Mcrypto/cfb.c | 139+++++++++++--------------------------------------------------------------------
Mcrypto/chacha20poly1305.c | 37++++++++++++++-----------------------
Mcrypto/crypto_null.c | 57++++++++++++++++++++++++++++++++-------------------------
Mcrypto/crypto_user_stat.c | 4----
Mcrypto/ctr.c | 200++++++++++++++++++++++---------------------------------------------------------
Mcrypto/des_generic.c | 4++--
Mcrypto/ecb.c | 151+++++++++++++++++++------------------------------------------------------------
Mcrypto/gcm.c | 75++++++++++++++++++++++++---------------------------------------------------
Mcrypto/keywrap.c | 198++++++++++++++++++++++++++-----------------------------------------------------
Mcrypto/morus1280.c | 19++++++++-----------
Mcrypto/morus640.c | 19++++++++-----------
Mcrypto/ofb.c | 202++++++++++++++++---------------------------------------------------------------
Mcrypto/pcbc.c | 143+++++++++----------------------------------------------------------------------
Mcrypto/rsa-pkcs1pad.c | 1+
Mcrypto/seqiv.c | 7+++----
Mcrypto/shash.c | 27++++++++++++++++++---------
Mcrypto/skcipher.c | 158++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-----
Mcrypto/streebog_generic.c | 2+-
Mcrypto/testmgr.c | 2760+++++++++++++++++++++++++++++++++++++++++++------------------------------------
Mcrypto/testmgr.h | 15649++++++++++++++++++++++++++++++++++---------------------------------------------
Mcrypto/tgr192.c | 6+++---
Mdrivers/bus/fsl-mc/fsl-mc-allocator.c | 11+++++++++++
Mdrivers/bus/fsl-mc/mc-io.c | 13+++++++++++++
Mdrivers/char/hw_random/bcm2835-rng.c | 18++++++++++--------
Mdrivers/char/hw_random/virtio-rng.c | 2+-
Mdrivers/crypto/amcc/crypto4xx_core.c | 87+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Mdrivers/crypto/amcc/crypto4xx_core.h | 4++++
Mdrivers/crypto/amcc/crypto4xx_reg_def.h | 1+
Mdrivers/crypto/amcc/crypto4xx_trng.c | 4+++-
Mdrivers/crypto/amcc/crypto4xx_trng.h | 4++--
Mdrivers/crypto/atmel-tdes.c | 2+-
Mdrivers/crypto/axis/artpec6_crypto.c | 326+++++++++++++++++--------------------------------------------------------------
Mdrivers/crypto/bcm/Makefile | 2--
Mdrivers/crypto/bcm/cipher.c | 10+++-------
Mdrivers/crypto/bcm/cipher.h | 4+---
Mdrivers/crypto/bcm/util.c | 40----------------------------------------
Mdrivers/crypto/bcm/util.h | 6------
Mdrivers/crypto/caam/Kconfig | 1+
Mdrivers/crypto/caam/caamalg.c | 238+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------
Mdrivers/crypto/caam/caamalg_desc.c | 18+++++++++++-------
Mdrivers/crypto/caam/caamalg_qi.c | 29++++++++++++++++++-----------
Mdrivers/crypto/caam/caamalg_qi2.c | 85+++++++++++++++++++++++++++++++++++++++++++------------------------------------
Mdrivers/crypto/caam/caamalg_qi2.h | 2++
Mdrivers/crypto/caam/caamhash.c | 429+++++++++++++++++++++++++++++++++++++++++++++++++++++++++----------------------
Mdrivers/crypto/caam/caamhash_desc.c | 68+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
Mdrivers/crypto/caam/caamhash_desc.h | 8++++++++
Mdrivers/crypto/caam/compat.h | 1+
Mdrivers/crypto/caam/ctrl.c | 25++++++-------------------
Mdrivers/crypto/caam/error.c | 6++++++
Mdrivers/crypto/caam/intern.h | 1-
Mdrivers/crypto/caam/key_gen.c | 30++++++++++--------------------
Mdrivers/crypto/caam/qi.c | 4++--
Mdrivers/crypto/cavium/nitrox/nitrox_debugfs.c | 27+++++----------------------
Mdrivers/crypto/cavium/nitrox/nitrox_debugfs.h | 5++---
Mdrivers/crypto/cavium/nitrox/nitrox_main.c | 4+---
Mdrivers/crypto/cavium/zip/zip_main.c | 58+++++++++++++++-------------------------------------------
Mdrivers/crypto/ccp/ccp-crypto-aes-cmac.c | 2+-
Mdrivers/crypto/ccp/ccp-crypto-des3.c | 2+-
Mdrivers/crypto/ccp/ccp-crypto-sha.c | 2+-
Mdrivers/crypto/ccp/ccp-debugfs.c | 36+++++++-----------------------------
Mdrivers/crypto/ccp/ccp-ops.c | 2+-
Mdrivers/crypto/ccp/psp-dev.c | 37+++++++++++++++++++++++++++++++------
Mdrivers/crypto/ccp/psp-dev.h | 2+-
Mdrivers/crypto/ccp/sp-dev.c | 2+-
Mdrivers/crypto/ccp/sp-dev.h | 2+-
Mdrivers/crypto/ccp/sp-pci.c | 6+-----
Mdrivers/crypto/ccp/sp-platform.c | 2+-
Mdrivers/crypto/ccree/cc_buffer_mgr.c | 87+++++++++++++++++++++++++++++++++++++++----------------------------------------
Mdrivers/crypto/ccree/cc_cipher.c | 10+++++++---
Mdrivers/crypto/ccree/cc_debugfs.c | 22+++-------------------
Mdrivers/crypto/ccree/cc_debugfs.h | 8++------
Mdrivers/crypto/ccree/cc_driver.c | 13++++---------
Mdrivers/crypto/ccree/cc_driver.h | 2--
Mdrivers/crypto/chelsio/Makefile | 2+-
Mdrivers/crypto/chelsio/chcr_algo.c | 12++++++------
Mdrivers/crypto/chelsio/chcr_core.h | 2+-
Mdrivers/crypto/chelsio/chcr_ipsec.c | 42+++++++++++++++++++++++++-----------------
Mdrivers/crypto/chelsio/chtls/Makefile | 3++-
Mdrivers/crypto/chelsio/chtls/chtls_io.c | 12+-----------
Mdrivers/crypto/chelsio/chtls/chtls_main.c | 1-
Mdrivers/crypto/hifn_795x.c | 3++-
Mdrivers/crypto/inside-secure/safexcel_cipher.c | 2+-
Mdrivers/crypto/ixp4xx_crypto.c | 4++--
Mdrivers/crypto/marvell/cipher.c | 4+---
Mdrivers/crypto/n2_core.c | 2+-
Mdrivers/crypto/omap-des.c | 2+-
Mdrivers/crypto/picoxcell_crypto.c | 3++-
Mdrivers/crypto/qat/qat_c3xxx/Makefile | 2+-
Mdrivers/crypto/qat/qat_c3xxx/adf_drv.c | 5-----
Mdrivers/crypto/qat/qat_c3xxxvf/Makefile | 2+-
Mdrivers/crypto/qat/qat_c3xxxvf/adf_drv.c | 5-----
Mdrivers/crypto/qat/qat_c62x/Makefile | 2+-
Mdrivers/crypto/qat/qat_c62x/adf_drv.c | 5-----
Mdrivers/crypto/qat/qat_c62xvf/Makefile | 2+-
Mdrivers/crypto/qat/qat_c62xvf/adf_drv.c | 5-----
Mdrivers/crypto/qat/qat_common/adf_cfg.c | 7-------
Mdrivers/crypto/qat/qat_common/adf_transport.c | 7-------
Mdrivers/crypto/qat/qat_common/adf_transport_debug.c | 15---------------
Mdrivers/crypto/qat/qat_dh895xcc/Makefile | 2+-
Mdrivers/crypto/qat/qat_dh895xcc/adf_drv.c | 5-----
Mdrivers/crypto/qat/qat_dh895xccvf/Makefile | 2+-
Mdrivers/crypto/qat/qat_dh895xccvf/adf_drv.c | 5-----
Mdrivers/crypto/qce/ablkcipher.c | 4++--
Mdrivers/crypto/rockchip/rk3288_crypto.c | 2+-
Mdrivers/crypto/rockchip/rk3288_crypto.h | 4+++-
Mdrivers/crypto/rockchip/rk3288_crypto_ablkcipher.c | 41++++++++++++++++++++++++++++++++++++++---
Mdrivers/crypto/rockchip/rk3288_crypto_ahash.c | 2+-
Mdrivers/crypto/s5p-sss.c | 64++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------
Mdrivers/crypto/stm32/stm32-hash.c | 2+-
Mdrivers/crypto/sunxi-ss/sun4i-ss-cipher.c | 2+-
Mdrivers/crypto/talitos.c | 2+-
Mdrivers/crypto/ux500/cryp/cryp_core.c | 26+++++++++++++++++---------
Mdrivers/crypto/virtio/virtio_crypto_algs.c | 2+-
Mdrivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 9+++++----
Mdrivers/soc/fsl/dpio/dpio-cmd.h | 1+
Mdrivers/soc/fsl/dpio/dpio-driver.c | 41++++++++++++++++++++++++-----------------
Mdrivers/soc/fsl/dpio/dpio-service.c | 41++++++++++++++++++++++++++++++++++++++---
Mdrivers/soc/fsl/dpio/dpio.c | 23+++++++++++++++++++++++
Mdrivers/soc/fsl/dpio/dpio.h | 4++++
Mfs/crypto/keyinfo.c | 4++--
Mfs/ecryptfs/crypto.c | 5+++--
Minclude/crypto/algapi.h | 8++++----
Ainclude/crypto/arc4.h | 13+++++++++++++
Minclude/crypto/if_alg.h | 7-------
Minclude/crypto/internal/cryptouser.h | 2++
Minclude/crypto/internal/hash.h | 6+++---
Minclude/crypto/internal/skcipher.h | 15+++++++++++++++
Minclude/crypto/morus1280_glue.h | 7+------
Minclude/crypto/morus640_glue.h | 7+------
Minclude/crypto/morus_common.h | 7+------
Minclude/crypto/streebog.h | 2+-
Minclude/crypto/xts.h | 4++--
Minclude/linux/crypto.h | 10+---------
Minclude/linux/fsl/mc.h | 1+
Minclude/soc/fsl/dpaa2-io.h | 11++++++++---
170 files changed, 11316 insertions(+), 13833 deletions(-)

diff --git a/Documentation/devicetree/bindings/crypto/samsung-slimsss.txt b/Documentation/devicetree/bindings/crypto/samsung-slimsss.txt @@ -0,0 +1,19 @@ +Samsung SoC SlimSSS (Slim Security SubSystem) module + +The SlimSSS module in Exynos5433 SoC supports the following: +-- Feeder (FeedCtrl) +-- Advanced Encryption Standard (AES) with ECB,CBC,CTR,XTS and (CBC/XTS)/CTS +-- SHA-1/SHA-256 and (SHA-1/SHA-256)/HMAC + +Required properties: + +- compatible : Should contain entry for slimSSS version: + - "samsung,exynos5433-slim-sss" for Exynos5433 SoC. +- reg : Offset and length of the register set for the module +- interrupts : interrupt specifiers of SlimSSS module interrupts (one feed + control interrupt). + +- clocks : list of clock phandle and specifier pairs for all clocks listed in + clock-names property. +- clock-names : list of device clock input names; should contain "pclk" and + "aclk" for slim-sss in Exynos5433. diff --git a/MAINTAINERS b/MAINTAINERS @@ -3529,7 +3529,6 @@ F: include/linux/spi/cc2520.h F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt CCREE ARM TRUSTZONE CRYPTOCELL REE DRIVER -M: Yael Chemla <yael.chemla@foss.arm.com> M: Gilad Ben-Yossef <gilad@benyossef.com> L: linux-crypto@vger.kernel.org S: Supported diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S @@ -317,25 +317,27 @@ ENTRY(ce_aes_ctr_encrypt) .Lctrloop: vmov q0, q6 bl aes_encrypt - subs r4, r4, #1 - bmi .Lctrtailblock @ blocks < 0 means tail block - vld1.8 {q3}, [r1]! - veor q3, q0, q3 - vst1.8 {q3}, [r0]! adds r6, r6, #1 @ increment BE ctr rev ip, r6 vmov s27, ip bcs .Lctrcarry - teq r4, #0 + +.Lctrcarrydone: + subs r4, r4, #1 + bmi .Lctrtailblock @ blocks < 0 means tail block + vld1.8 {q3}, [r1]! + veor q3, q0, q3 + vst1.8 {q3}, [r0]! bne .Lctrloop + .Lctrout: - vst1.8 {q6}, [r5] + vst1.8 {q6}, [r5] @ return next CTR value pop {r4-r6, pc} .Lctrtailblock: - vst1.8 {q0}, [r0, :64] @ return just the key stream - pop {r4-r6, pc} + vst1.8 {q0}, [r0, :64] @ return the key stream + b .Lctrout .Lctrcarry: .irp sreg, s26, s25, s24 @@ -344,11 +346,9 @@ ENTRY(ce_aes_ctr_encrypt) adds ip, ip, #1 rev ip, ip vmov \sreg, ip - bcc 0f + bcc .Lctrcarrydone .endr -0: teq r4, #0 - beq .Lctrout - b .Lctrloop + b .Lctrcarrydone ENDPROC(ce_aes_ctr_encrypt) /* diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S @@ -2,12 +2,14 @@ // Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions // // Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> +// Copyright (C) 2019 Google LLC <ebiggers@google.com> // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License version 2 as // published by the Free Software Foundation. // +// Derived from the x86 version: // // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions // @@ -54,19 +56,11 @@ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // -// Function API: -// UINT16 crc_t10dif_pcl( -// UINT16 init_crc, //initial CRC value, 16 bits -// const unsigned char *buf, //buffer pointer to calculate CRC on -// UINT64 len //buffer length in bytes (64-bit data) -// ); -// // Reference paper titled "Fast CRC Computation for Generic // Polynomials Using PCLMULQDQ Instruction" // URL: http://www.intel.com/content/dam/www/public/us/en/documents // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf // -// #include <linux/linkage.h> #include <asm/assembler.h> @@ -78,13 +72,14 @@ #endif .text + .arch armv7-a .fpu crypto-neon-fp-armv8 - arg1_low32 .req r0 - arg2 .req r1 - arg3 .req r2 + init_crc .req r0 + buf .req r1 + len .req r2 - qzr .req q13 + fold_consts_ptr .req ip q0l .req d0 q0h .req d1 @@ -102,82 +97,35 @@ q6h .req d13 q7l .req d14 q7h .req d15 - -ENTRY(crc_t10dif_pmull) - vmov.i8 qzr, #0 // init zero register - - // adjust the 16-bit initial_crc value, scale it to 32 bits - lsl arg1_low32, arg1_low32, #16 - - // check if smaller than 256 - cmp arg3, #256 - - // for sizes less than 128, we can't fold 64B at a time... - blt _less_than_128 - - // load the initial crc value - // crc value does not need to be byte-reflected, but it needs - // to be moved to the high part of the register. - // because data will be byte-reflected and will align with - // initial crc at correct place. - vmov s0, arg1_low32 // initial crc - vext.8 q10, qzr, q0, #4 - - // receive the initial 64B data, xor the initial crc value - vld1.64 {q0-q1}, [arg2, :128]! - vld1.64 {q2-q3}, [arg2, :128]! - vld1.64 {q4-q5}, [arg2, :128]! - vld1.64 {q6-q7}, [arg2, :128]! -CPU_LE( vrev64.8 q0, q0 ) -CPU_LE( vrev64.8 q1, q1 ) -CPU_LE( vrev64.8 q2, q2 ) -CPU_LE( vrev64.8 q3, q3 ) -CPU_LE( vrev64.8 q4, q4 ) -CPU_LE( vrev64.8 q5, q5 ) -CPU_LE( vrev64.8 q6, q6 ) -CPU_LE( vrev64.8 q7, q7 ) - - vswp d0, d1 - vswp d2, d3 - vswp d4, d5 - vswp d6, d7 - vswp d8, d9 - vswp d10, d11 - vswp d12, d13 - vswp d14, d15 - - // XOR the initial_crc value - veor.8 q0, q0, q10 - - adr ip, rk3 - vld1.64 {q10}, [ip, :128] // xmm10 has rk3 and rk4 - - // - // we subtract 256 instead of 128 to save one instruction from the loop - // - sub arg3, arg3, #256 - - // at this section of the code, there is 64*x+y (0<=y<64) bytes of - // buffer. The _fold_64_B_loop will fold 64B at a time - // until we have 64+y Bytes of buffer - - - // fold 64B at a time. This section of the code folds 4 vector - // registers in parallel -_fold_64_B_loop: - - .macro fold64, reg1, reg2 - vld1.64 {q11-q12}, [arg2, :128]! - - vmull.p64 q8, \reg1\()h, d21 - vmull.p64 \reg1, \reg1\()l, d20 - vmull.p64 q9, \reg2\()h, d21 - vmull.p64 \reg2, \reg2\()l, d20 - -CPU_LE( vrev64.8 q11, q11 ) -CPU_LE( vrev64.8 q12, q12 ) - vswp d22, d23 - vswp d24, d25 + q8l .req d16 + q8h .req d17 + q9l .req d18 + q9h .req d19 + q10l .req d20 + q10h .req d21 + q11l .req d22 + q11h .req d23 + q12l .req d24 + q12h .req d25 + + FOLD_CONSTS .req q10 + FOLD_CONST_L .req q10l + FOLD_CONST_H .req q10h + + // Fold reg1, reg2 into the next 32 data bytes, storing the result back + // into reg1, reg2. + .macro fold_32_bytes, reg1, reg2 + vld1.64 {q11-q12}, [buf]! + + vmull.p64 q8, \reg1\()h, FOLD_CONST_H + vmull.p64 \reg1, \reg1\()l, FOLD_CONST_L + vmull.p64 q9, \reg2\()h, FOLD_CONST_H + vmull.p64 \reg2, \reg2\()l, FOLD_CONST_L + +CPU_LE( vrev64.8 q11, q11 ) +CPU_LE( vrev64.8 q12, q12 ) + vswp q11l, q11h + vswp q12l, q12h veor.8 \reg1, \reg1, q8 veor.8 \reg2, \reg2, q9 @@ -185,242 +133,248 @@ CPU_LE( vrev64.8 q12, q12 ) veor.8 \reg2, \reg2, q12 .endm - fold64 q0, q1 - fold64 q2, q3 - fold64 q4, q5 - fold64 q6, q7 - - subs arg3, arg3, #128 - - // check if there is another 64B in the buffer to be able to fold - bge _fold_64_B_loop - - // at this point, the buffer pointer is pointing at the last y Bytes - // of the buffer the 64B of folded data is in 4 of the vector - // registers: v0, v1, v2, v3 - - // fold the 8 vector registers to 1 vector register with different - // constants - - adr ip, rk9 - vld1.64 {q10}, [ip, :128]! - - .macro fold16, reg, rk - vmull.p64 q8, \reg\()l, d20 - vmull.p64 \reg, \reg\()h, d21 - .ifnb \rk - vld1.64 {q10}, [ip, :128]! + // Fold src_reg into dst_reg, optionally loading the next fold constants + .macro fold_16_bytes, src_reg, dst_reg, load_next_consts + vmull.p64 q8, \src_reg\()l, FOLD_CONST_L + vmull.p64 \src_reg, \src_reg\()h, FOLD_CONST_H + .ifnb \load_next_consts + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! .endif - veor.8 q7, q7, q8 - veor.8 q7, q7, \reg + veor.8 \dst_reg, \dst_reg, q8 + veor.8 \dst_reg, \dst_reg, \src_reg .endm - fold16 q0, rk11 - fold16 q1, rk13 - fold16 q2, rk15 - fold16 q3, rk17 - fold16 q4, rk19 - fold16 q5, rk1 - fold16 q6 - - // instead of 64, we add 48 to the loop counter to save 1 instruction - // from the loop instead of a cmp instruction, we use the negative - // flag with the jl instruction - adds arg3, arg3, #(128-16) - blt _final_reduction_for_128 - - // now we have 16+y bytes left to reduce. 16 Bytes is in register v7 - // and the rest is in memory. We can fold 16 bytes at a time if y>=16 - // continue folding 16B at a time - -_16B_reduction_loop: - vmull.p64 q8, d14, d20 - vmull.p64 q7, d15, d21 - veor.8 q7, q7, q8 + .macro __adrl, out, sym + movw \out, #:lower16:\sym + movt \out, #:upper16:\sym + .endm - vld1.64 {q0}, [arg2, :128]! -CPU_LE( vrev64.8 q0, q0 ) - vswp d0, d1 - veor.8 q7, q7, q0 - subs arg3, arg3, #16 - - // instead of a cmp instruction, we utilize the flags with the - // jge instruction equivalent of: cmp arg3, 16-16 - // check if there is any more 16B in the buffer to be able to fold - bge _16B_reduction_loop - - // now we have 16+z bytes left to reduce, where 0<= z < 16. - // first, we reduce the data in the xmm7 register - -_final_reduction_for_128: - // check if any more data to fold. If not, compute the CRC of - // the final 128 bits - adds arg3, arg3, #16 - beq _128_done - - // here we are getting data that is less than 16 bytes. - // since we know that there was data before the pointer, we can - // offset the input pointer before the actual point, to receive - // exactly 16 bytes. after that the registers need to be adjusted. -_get_last_two_regs: - add arg2, arg2, arg3 - sub arg2, arg2, #16 - vld1.64 {q1}, [arg2] -CPU_LE( vrev64.8 q1, q1 ) - vswp d2, d3 - - // get rid of the extra data that was loaded before - // load the shift constant - adr ip, tbl_shf_table + 16 - sub ip, ip, arg3 - vld1.8 {q0}, [ip] - - // shift v2 to the left by arg3 bytes - vtbl.8 d4, {d14-d15}, d0 - vtbl.8 d5, {d14-d15}, d1 - - // shift v7 to the right by 16-arg3 bytes - vmov.i8 q9, #0x80 - veor.8 q0, q0, q9 - vtbl.8 d18, {d14-d15}, d0 - vtbl.8 d19, {d14-d15}, d1 - - // blend - vshr.s8 q0, q0, #7 // convert to 8-bit mask - vbsl.8 q0, q2, q1 - - // fold 16 Bytes - vmull.p64 q8, d18, d20 - vmull.p64 q7, d19, d21 +// +// u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len); +// +// Assumes len >= 16. +// +ENTRY(crc_t10dif_pmull) + + // For sizes less than 256 bytes, we can't fold 128 bytes at a time. + cmp len, #256 + blt .Lless_than_256_bytes + + __adrl fold_consts_ptr, .Lfold_across_128_bytes_consts + + // Load the first 128 data bytes. Byte swapping is necessary to make + // the bit order match the polynomial coefficient order. + vld1.64 {q0-q1}, [buf]! + vld1.64 {q2-q3}, [buf]! + vld1.64 {q4-q5}, [buf]! + vld1.64 {q6-q7}, [buf]! +CPU_LE( vrev64.8 q0, q0 ) +CPU_LE( vrev64.8 q1, q1 ) +CPU_LE( vrev64.8 q2, q2 ) +CPU_LE( vrev64.8 q3, q3 ) +CPU_LE( vrev64.8 q4, q4 ) +CPU_LE( vrev64.8 q5, q5 ) +CPU_LE( vrev64.8 q6, q6 ) +CPU_LE( vrev64.8 q7, q7 ) + vswp q0l, q0h + vswp q1l, q1h + vswp q2l, q2h + vswp q3l, q3h + vswp q4l, q4h + vswp q5l, q5h + vswp q6l, q6h + vswp q7l, q7h + + // XOR the first 16 data *bits* with the initial CRC value. + vmov.i8 q8h, #0 + vmov.u16 q8h[3], init_crc + veor q0h, q0h, q8h + + // Load the constants for folding across 128 bytes. + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! + + // Subtract 128 for the 128 data bytes just consumed. Subtract another + // 128 to simplify the termination condition of the following loop. + sub len, len, #256 + + // While >= 128 data bytes remain (not counting q0-q7), fold the 128 + // bytes q0-q7 into them, storing the result back into q0-q7. +.Lfold_128_bytes_loop: + fold_32_bytes q0, q1 + fold_32_bytes q2, q3 + fold_32_bytes q4, q5 + fold_32_bytes q6, q7 + subs len, len, #128 + bge .Lfold_128_bytes_loop + + // Now fold the 112 bytes in q0-q6 into the 16 bytes in q7. + + // Fold across 64 bytes. + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! + fold_16_bytes q0, q4 + fold_16_bytes q1, q5 + fold_16_bytes q2, q6 + fold_16_bytes q3, q7, 1 + // Fold across 32 bytes. + fold_16_bytes q4, q6 + fold_16_bytes q5, q7, 1 + // Fold across 16 bytes. + fold_16_bytes q6, q7 + + // Add 128 to get the correct number of data bytes remaining in 0...127 + // (not counting q7), following the previous extra subtraction by 128. + // Then subtract 16 to simplify the termination condition of the + // following loop. + adds len, len, #(128-16) + + // While >= 16 data bytes remain (not counting q7), fold the 16 bytes q7 + // into them, storing the result back into q7. + blt .Lfold_16_bytes_loop_done +.Lfold_16_bytes_loop: + vmull.p64 q8, q7l, FOLD_CONST_L + vmull.p64 q7, q7h, FOLD_CONST_H veor.8 q7, q7, q8 + vld1.64 {q0}, [buf]! +CPU_LE( vrev64.8 q0, q0 ) + vswp q0l, q0h veor.8 q7, q7, q0 - -_128_done: - // compute crc of a 128-bit value - vldr d20, rk5 - vldr d21, rk6 // rk5 and rk6 in xmm10 - - // 64b fold - vext.8 q0, qzr, q7, #8 - vmull.p64 q7, d15, d20 + subs len, len, #16 + bge .Lfold_16_bytes_loop + +.Lfold_16_bytes_loop_done: + // Add 16 to get the correct number of data bytes remaining in 0...15 + // (not counting q7), following the previous extra subtraction by 16. + adds len, len, #16 + beq .Lreduce_final_16_bytes + +.Lhandle_partial_segment: + // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first + // 16 bytes are in q7 and the rest are the remaining data in 'buf'. To + // do this without needing a fold constant for each possible 'len', + // redivide the bytes into a first chunk of 'len' bytes and a second + // chunk of 16 bytes, then fold the first chunk into the second. + + // q0 = last 16 original data bytes + add buf, buf, len + sub buf, buf, #16 + vld1.64 {q0}, [buf] +CPU_LE( vrev64.8 q0, q0 ) + vswp q0l, q0h + + // q1 = high order part of second chunk: q7 left-shifted by 'len' bytes. + __adrl r3, .Lbyteshift_table + 16 + sub r3, r3, len + vld1.8 {q2}, [r3] + vtbl.8 q1l, {q7l-q7h}, q2l + vtbl.8 q1h, {q7l-q7h}, q2h + + // q3 = first chunk: q7 right-shifted by '16-len' bytes. + vmov.i8 q3, #0x80 + veor.8 q2, q2, q3 + vtbl.8 q3l, {q7l-q7h}, q2l + vtbl.8 q3h, {q7l-q7h}, q2h + + // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes. + vshr.s8 q2, q2, #7 + + // q2 = second chunk: 'len' bytes from q0 (low-order bytes), + // then '16-len' bytes from q1 (high-order bytes). + vbsl.8 q2, q1, q0 + + // Fold the first chunk into the second chunk, storing the result in q7. + vmull.p64 q0, q3l, FOLD_CONST_L + vmull.p64 q7, q3h, FOLD_CONST_H veor.8 q7, q7, q0 + veor.8 q7, q7, q2 + +.Lreduce_final_16_bytes: + // Reduce the 128-bit value M(x), stored in q7, to the final 16-bit CRC. + + // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! + + // Fold the high 64 bits into the low 64 bits, while also multiplying by + // x^64. This produces a 128-bit value congruent to x^64 * M(x) and + // whose low 48 bits are 0. + vmull.p64 q0, q7h, FOLD_CONST_H // high bits * x^48 * (x^80 mod G(x)) + veor.8 q0h, q0h, q7l // + low bits * x^64 + + // Fold the high 32 bits into the low 96 bits. This produces a 96-bit + // value congruent to x^64 * M(x) and whose low 48 bits are 0. + vmov.i8 q1, #0 + vmov s4, s3 // extract high 32 bits + vmov s3, s5 // zero high 32 bits + vmull.p64 q1, q1l, FOLD_CONST_L // high 32 bits * x^48 * (x^48 mod G(x)) + veor.8 q0, q0, q1 // + low bits + + // Load G(x) and floor(x^48 / G(x)). + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128] + + // Use Barrett reduction to compute the final CRC value. + vmull.p64 q1, q0h, FOLD_CONST_H // high 32 bits * floor(x^48 / G(x)) + vshr.u64 q1l, q1l, #32 // /= x^32 + vmull.p64 q1, q1l, FOLD_CONST_L // *= G(x) + vshr.u64 q0l, q0l, #48 + veor.8 q0l, q0l, q1l // + low 16 nonzero bits + // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of q0. + + vmov.u16 r0, q0l[0] + bx lr - // 32b fold - vext.8 q0, q7, qzr, #12 - vmov s31, s3 - vmull.p64 q0, d0, d21 - veor.8 q7, q0, q7 - - // barrett reduction -_barrett: - vldr d20, rk7 - vldr d21, rk8 - - vmull.p64 q0, d15, d20 - vext.8 q0, qzr, q0, #12 - vmull.p64 q0, d1, d21 - vext.8 q0, qzr, q0, #12 - veor.8 q7, q7, q0 - vmov r0, s29 +.Lless_than_256_bytes: + // Checksumming a buffer of length 16...255 bytes -_cleanup: - // scale the result back to 16 bits - lsr r0, r0, #16 - bx lr + __adrl fold_consts_ptr, .Lfold_across_16_bytes_consts -_less_than_128: - teq arg3, #0 - beq _cleanup + // Load the first 16 data bytes. + vld1.64 {q7}, [buf]! +CPU_LE( vrev64.8 q7, q7 ) + vswp q7l, q7h - vmov.i8 q0, #0 - vmov s3, arg1_low32 // get the initial crc value + // XOR the first 16 data *bits* with the initial CRC value. + vmov.i8 q0h, #0 + vmov.u16 q0h[3], init_crc + veor.8 q7h, q7h, q0h - vld1.64 {q7}, [arg2, :128]! -CPU_LE( vrev64.8 q7, q7 ) - vswp d14, d15 - veor.8 q7, q7, q0 + // Load the fold-across-16-bytes constants. + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! - cmp arg3, #16 - beq _128_done // exactly 16 left - blt _less_than_16_left - - // now if there is, load the constants - vldr d20, rk1 - vldr d21, rk2 // rk1 and rk2 in xmm10 - - // check if there is enough buffer to be able to fold 16B at a time - subs arg3, arg3, #32 - addlt arg3, arg3, #16 - blt _get_last_two_regs - b _16B_reduction_loop - -_less_than_16_left: - // shl r9, 4 - adr ip, tbl_shf_table + 16 - sub ip, ip, arg3 - vld1.8 {q0}, [ip] - vmov.i8 q9, #0x80 - veor.8 q0, q0, q9 - vtbl.8 d18, {d14-d15}, d0 - vtbl.8 d15, {d14-d15}, d1 - vmov d14, d18 - b _128_done + cmp len, #16 + beq .Lreduce_final_16_bytes // len == 16 + subs len, len, #32 + addlt len, len, #16 + blt .Lhandle_partial_segment // 17 <= len <= 31 + b .Lfold_16_bytes_loop // 32 <= len <= 255 ENDPROC(crc_t10dif_pmull) -// precomputed constants -// these constants are precomputed from the poly: -// 0x8bb70000 (0x8bb7 scaled to 32 bits) + .section ".rodata", "a" .align 4 -// Q = 0x18BB70000 -// rk1 = 2^(32*3) mod Q << 32 -// rk2 = 2^(32*5) mod Q << 32 -// rk3 = 2^(32*15) mod Q << 32 -// rk4 = 2^(32*17) mod Q << 32 -// rk5 = 2^(32*3) mod Q << 32 -// rk6 = 2^(32*2) mod Q << 32 -// rk7 = floor(2^64/Q) -// rk8 = Q - -rk3: .quad 0x9d9d000000000000 -rk4: .quad 0x7cf5000000000000 -rk5: .quad 0x2d56000000000000 -rk6: .quad 0x1368000000000000 -rk7: .quad 0x00000001f65a57f8 -rk8: .quad 0x000000018bb70000 -rk9: .quad 0xceae000000000000 -rk10: .quad 0xbfd6000000000000 -rk11: .quad 0x1e16000000000000 -rk12: .quad 0x713c000000000000 -rk13: .quad 0xf7f9000000000000 -rk14: .quad 0x80a6000000000000 -rk15: .quad 0x044c000000000000 -rk16: .quad 0xe658000000000000 -rk17: .quad 0xad18000000000000 -rk18: .quad 0xa497000000000000 -rk19: .quad 0x6ee3000000000000 -rk20: .quad 0xe7b5000000000000 -rk1: .quad 0x2d56000000000000 -rk2: .quad 0x06df000000000000 - -tbl_shf_table: -// use these values for shift constants for the tbl/tbx instruction -// different alignments result in values as shown: -// DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 -// DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 -// DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 -// DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 -// DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 -// DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 -// DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 -// DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 -// DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 -// DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 -// DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 -// DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 -// DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 -// DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 -// DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 +// Fold constants precomputed from the polynomial 0x18bb7 +// G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 +.Lfold_across_128_bytes_consts: + .quad 0x0000000000006123 // x^(8*128) mod G(x) + .quad 0x0000000000002295 // x^(8*128+64) mod G(x) +// .Lfold_across_64_bytes_consts: + .quad 0x0000000000001069 // x^(4*128) mod G(x) + .quad 0x000000000000dd31 // x^(4*128+64) mod G(x) +// .Lfold_across_32_bytes_consts: + .quad 0x000000000000857d // x^(2*128) mod G(x) + .quad 0x0000000000007acc // x^(2*128+64) mod G(x) +.Lfold_across_16_bytes_consts: + .quad 0x000000000000a010 // x^(1*128) mod G(x) + .quad 0x0000000000001faa // x^(1*128+64) mod G(x) +// .Lfinal_fold_consts: + .quad 0x1368000000000000 // x^48 * (x^48 mod G(x)) + .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x)) +// .Lbarrett_reduction_consts: + .quad 0x0000000000018bb7 // G(x) + .quad 0x00000001f65a57f8 // floor(x^48 / G(x)) + +// For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - +// len] is the index vector to shift left by 'len' bytes, and is also {0x80, +// ..., 0x80} XOR the index vector to shift right by '16 - len' bytes. +.Lbyteshift_table: .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c @@ -21,7 +21,7 @@ #define CRC_T10DIF_PMULL_CHUNK_SIZE 16U -asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u32 len); +asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len); static int crct10dif_init(struct shash_desc *desc) { @@ -35,26 +35,15 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data, unsigned int length) { u16 *crc = shash_desc_ctx(desc); - unsigned int l; - if (!may_use_simd()) { - *crc = crc_t10dif_generic(*crc, data, length); + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { + kernel_neon_begin(); + *crc = crc_t10dif_pmull(*crc, data, length); + kernel_neon_end(); } else { - if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) { - l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE - - ((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)); - - *crc = crc_t10dif_generic(*crc, data, l); - - length -= l; - data += l; - } - if (length > 0) { - kernel_neon_begin(); - *crc = crc_t10dif_pmull(*crc, data, length); - kernel_neon_end(); - } + *crc = crc_t10dif_generic(*crc, data, length); } + return 0; } diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S @@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data) beq 10f ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */ b 7b -8: mov w7, w8 +8: cbz w8, 91f + mov w7, w8 add w8, w8, #16 9: ext v1.16b, v1.16b, v1.16b, #1 adds w7, w7, #1 bne 9b - eor v0.16b, v0.16b, v1.16b +91: eor v0.16b, v0.16b, v1.16b st1 {v0.16b}, [x0] 10: str w8, [x3] ret diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -125,7 +125,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], abytes -= added; } - while (abytes > AES_BLOCK_SIZE) { + while (abytes >= AES_BLOCK_SIZE) { __aes_arm64_encrypt(key->key_enc, mac, mac, num_rounds(key)); crypto_xor(mac, in, AES_BLOCK_SIZE); @@ -139,8 +139,6 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], num_rounds(key)); crypto_xor(mac, in, abytes); *macp = abytes; - } else { - *macp = 0; } } } @@ -255,7 +253,7 @@ static int ccm_encrypt(struct aead_request *req) /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); - err = skcipher_walk_aead_encrypt(&walk, req, true); + err = skcipher_walk_aead_encrypt(&walk, req, false); if (may_use_simd()) { while (walk.nbytes) { @@ -313,7 +311,7 @@ static int ccm_decrypt(struct aead_request *req) /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); - err = skcipher_walk_aead_decrypt(&walk, req, true); + err = skcipher_walk_aead_decrypt(&walk, req, false); if (may_use_simd()) { while (walk.nbytes) { diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S @@ -320,8 +320,7 @@ AES_ENTRY(aes_ctr_encrypt) .Lctrtailblock: st1 {v0.16b}, [x0] - ldp x29, x30, [sp], #16 - ret + b .Lctrout .Lctrcarry: umov x7, v4.d[0] /* load upper word of ctr */ diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S @@ -971,18 +971,22 @@ CPU_LE( rev x8, x8 ) 8: next_ctr v0 st1 {v0.16b}, [x24] - cbz x23, 0f + cbz x23, .Lctr_done cond_yield_neon 98b b 99b -0: frame_pop +.Lctr_done: + frame_pop ret /* * If we are handling the tail of the input (x6 != NULL), return the * final keystream block back to the caller. */ +0: cbz x25, 8b + st1 {v0.16b}, [x25] + b 8b 1: cbz x25, 8b st1 {v1.16b}, [x25] b 8b diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S @@ -2,12 +2,14 @@ // Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions // // Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> +// Copyright (C) 2019 Google LLC <ebiggers@google.com> // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License version 2 as // published by the Free Software Foundation. // +// Derived from the x86 version: // // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions // @@ -54,19 +56,11 @@ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // -// Function API: -// UINT16 crc_t10dif_pcl( -// UINT16 init_crc, //initial CRC value, 16 bits -// const unsigned char *buf, //buffer pointer to calculate CRC on -// UINT64 len //buffer length in bytes (64-bit data) -// ); -// // Reference paper titled "Fast CRC Computation for Generic // Polynomials Using PCLMULQDQ Instruction" // URL: http://www.intel.com/content/dam/www/public/us/en/documents // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf // -// #include <linux/linkage.h> #include <asm/assembler.h> @@ -74,14 +68,14 @@ .text .cpu generic+crypto - arg1_low32 .req w19 - arg2 .req x20 - arg3 .req x21 + init_crc .req w19 + buf .req x20 + len .req x21 + fold_consts_ptr .req x22 - vzr .req v13 + fold_consts .req v10 ad .req v14 - bd .req v10 k00_16 .req v15 k32_48 .req v16 @@ -143,11 +137,11 @@ __pmull_p8_core: ext t5.8b, ad.8b, ad.8b, #2 // A2 ext t6.8b, ad.8b, ad.8b, #3 // A3 - pmull t4.8h, t4.8b, bd.8b // F = A1*B + pmull t4.8h, t4.8b, fold_consts.8b // F = A1*B pmull t8.8h, ad.8b, bd1.8b // E = A*B1 - pmull t5.8h, t5.8b, bd.8b // H = A2*B + pmull t5.8h, t5.8b, fold_consts.8b // H = A2*B pmull t7.8h, ad.8b, bd2.8b // G = A*B2 - pmull t6.8h, t6.8b, bd.8b // J = A3*B + pmull t6.8h, t6.8b, fold_consts.8b // J = A3*B pmull t9.8h, ad.8b, bd3.8b // I = A*B3 pmull t3.8h, ad.8b, bd4.8b // K = A*B4 b 0f @@ -157,11 +151,11 @@ __pmull_p8_core: tbl t5.16b, {ad.16b}, perm2.16b // A2 tbl t6.16b, {ad.16b}, perm3.16b // A3 - pmull2 t4.8h, t4.16b, bd.16b // F = A1*B + pmull2 t4.8h, t4.16b, fold_consts.16b // F = A1*B pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1 - pmull2 t5.8h, t5.16b, bd.16b // H = A2*B + pmull2 t5.8h, t5.16b, fold_consts.16b // H = A2*B pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2 - pmull2 t6.8h, t6.16b, bd.16b // J = A3*B + pmull2 t6.8h, t6.16b, fold_consts.16b // J = A3*B pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3 pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4 @@ -203,14 +197,14 @@ __pmull_p8_core: ENDPROC(__pmull_p8_core) .macro __pmull_p8, rq, ad, bd, i - .ifnc \bd, v10 + .ifnc \bd, fold_consts .err .endif mov ad.16b, \ad\().16b .ifb \i - pmull \rq\().8h, \ad\().8b, bd.8b // D = A*B + pmull \rq\().8h, \ad\().8b, \bd\().8b // D = A*B .else - pmull2 \rq\().8h, \ad\().16b, bd.16b // D = A*B + pmull2 \rq\().8h, \ad\().16b, \bd\().16b // D = A*B .endif bl .L__pmull_p8_core\i @@ -219,17 +213,19 @@ ENDPROC(__pmull_p8_core) eor \rq\().16b, \rq\().16b, t6.16b .endm - .macro fold64, p, reg1, reg2 - ldp q11, q12, [arg2], #0x20 + // Fold reg1, reg2 into the next 32 data bytes, storing the result back + // into reg1, reg2. + .macro fold_32_bytes, p, reg1, reg2 + ldp q11, q12, [buf], #0x20 - __pmull_\p v8, \reg1, v10, 2 - __pmull_\p \reg1, \reg1, v10 + __pmull_\p v8, \reg1, fold_consts, 2 + __pmull_\p \reg1, \reg1, fold_consts CPU_LE( rev64 v11.16b, v11.16b ) CPU_LE( rev64 v12.16b, v12.16b ) - __pmull_\p v9, \reg2, v10, 2 - __pmull_\p \reg2, \reg2, v10 + __pmull_\p v9, \reg2, fold_consts, 2 + __pmull_\p \reg2, \reg2, fold_consts CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 ) CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) @@ -240,15 +236,16 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) eor \reg2\().16b, \reg2\().16b, v12.16b .endm - .macro fold16, p, reg, rk - __pmull_\p v8, \reg, v10 - __pmull_\p \reg, \reg, v10, 2 - .ifnb \rk - ldr_l q10, \rk, x8 - __pmull_pre_\p v10 + // Fold src_reg into dst_reg, optionally loading the next fold constants + .macro fold_16_bytes, p, src_reg, dst_reg, load_next_consts + __pmull_\p v8, \src_reg, fold_consts + __pmull_\p \src_reg, \src_reg, fold_consts, 2 + .ifnb \load_next_consts + ld1 {fold_consts.2d}, [fold_consts_ptr], #16 + __pmull_pre_\p fold_consts .endif - eor v7.16b, v7.16b, v8.16b - eor v7.16b, v7.16b, \reg\().16b + eor \dst_reg\().16b, \dst_reg\().16b, v8.16b + eor \dst_reg\().16b, \dst_reg\().16b, \src_reg\().16b .endm .macro __pmull_p64, rd, rn, rm, n @@ -260,40 +257,27 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) .endm .macro crc_t10dif_pmull, p - frame_push 3, 128 + frame_push 4, 128 - mov arg1_low32, w0 - mov arg2, x1 - mov arg3, x2 - - movi vzr.16b, #0 // init zero register + mov init_crc, w0 + mov buf, x1 + mov len, x2 __pmull_init_\p - // adjust the 16-bit initial_crc value, scale it to 32 bits - lsl arg1_low32, arg1_low32, #16 - - // check if smaller than 256 - cmp arg3, #256 - - // for sizes less than 128, we can't fold 64B at a time... - b.lt .L_less_than_128_\@ + // For sizes less than 256 bytes, we can't fold 128 bytes at a time. + cmp len, #256 + b.lt .Lless_than_256_bytes_\@ - // load the initial crc value - // crc value does not need to be byte-reflected, but it needs - // to be moved to the high part of the register. - // because data will be byte-reflected and will align with - // initial crc at correct place. - movi v10.16b, #0 - mov v10.s[3], arg1_low32 // initial crc - - // receive the initial 64B data, xor the initial crc value - ldp q0, q1, [arg2] - ldp q2, q3, [arg2, #0x20] - ldp q4, q5, [arg2, #0x40] - ldp q6, q7, [arg2, #0x60] - add arg2, arg2, #0x80 + adr_l fold_consts_ptr, .Lfold_across_128_bytes_consts + // Load the first 128 data bytes. Byte swapping is necessary to make + // the bit order match the polynomial coefficient order. + ldp q0, q1, [buf] + ldp q2, q3, [buf, #0x20] + ldp q4, q5, [buf, #0x40] + ldp q6, q7, [buf, #0x60] + add buf, buf, #0x80 CPU_LE( rev64 v0.16b, v0.16b ) CPU_LE( rev64 v1.16b, v1.16b ) CPU_LE( rev64 v2.16b, v2.16b ) @@ -302,7 +286,6 @@ CPU_LE( rev64 v4.16b, v4.16b ) CPU_LE( rev64 v5.16b, v5.16b ) CPU_LE( rev64 v6.16b, v6.16b ) CPU_LE( rev64 v7.16b, v7.16b ) - CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 ) @@ -312,36 +295,29 @@ CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 ) CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 ) CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) - // XOR the initial_crc value - eor v0.16b, v0.16b, v10.16b - - ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4 - // type of pmull instruction - // will determine which constant to use - __pmull_pre_\p v10 + // XOR the first 16 data *bits* with the initial CRC value. + movi v8.16b, #0 + mov v8.h[7], init_crc + eor v0.16b, v0.16b, v8.16b - // - // we subtract 256 instead of 128 to save one instruction from the loop - // - sub arg3, arg3, #256 + // Load the constants for folding across 128 bytes. + ld1 {fold_consts.2d}, [fold_consts_ptr] + __pmull_pre_\p fold_consts - // at this section of the code, there is 64*x+y (0<=y<64) bytes of - // buffer. The _fold_64_B_loop will fold 64B at a time - // until we have 64+y Bytes of buffer + // Subtract 128 for the 128 data bytes just consumed. Subtract another + // 128 to simplify the termination condition of the following loop. + sub len, len, #256 - // fold 64B at a time. This section of the code folds 4 vector - // registers in parallel -.L_fold_64_B_loop_\@: + // While >= 128 data bytes remain (not counting v0-v7), fold the 128 + // bytes v0-v7 into them, storing the result back into v0-v7. +.Lfold_128_bytes_loop_\@: + fold_32_bytes \p, v0, v1 + fold_32_bytes \p, v2, v3 + fold_32_bytes \p, v4, v5 + fold_32_bytes \p, v6, v7 - fold64 \p, v0, v1 - fold64 \p, v2, v3 - fold64 \p, v4, v5 - fold64 \p, v6, v7 - - subs arg3, arg3, #128 - - // check if there is another 64B in the buffer to be able to fold - b.lt .L_fold_64_B_end_\@ + subs len, len, #128 + b.lt .Lfold_128_bytes_loop_done_\@ if_will_cond_yield_neon stp q0, q1, [sp, #.Lframe_local_offset] @@ -353,228 +329,207 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) ldp q2, q3, [sp, #.Lframe_local_offset + 32] ldp q4, q5, [sp, #.Lframe_local_offset + 64] ldp q6, q7, [sp, #.Lframe_local_offset + 96] - ldr_l q10, rk3, x8 - movi vzr.16b, #0 // init zero register + ld1 {fold_consts.2d}, [fold_consts_ptr] __pmull_init_\p - __pmull_pre_\p v10 + __pmull_pre_\p fold_consts endif_yield_neon - b .L_fold_64_B_loop_\@ - -.L_fold_64_B_end_\@: - // at this point, the buffer pointer is pointing at the last y Bytes - // of the buffer the 64B of folded data is in 4 of the vector - // registers: v0, v1, v2, v3 - - // fold the 8 vector registers to 1 vector register with different - // constants - - ldr_l q10, rk9, x8 - __pmull_pre_\p v10 - - fold16 \p, v0, rk11 - fold16 \p, v1, rk13 - fold16 \p, v2, rk15 - fold16 \p, v3, rk17 - fold16 \p, v4, rk19 - fold16 \p, v5, rk1 - fold16 \p, v6 - - // instead of 64, we add 48 to the loop counter to save 1 instruction - // from the loop instead of a cmp instruction, we use the negative - // flag with the jl instruction - adds arg3, arg3, #(128-16) - b.lt .L_final_reduction_for_128_\@ - - // now we have 16+y bytes left to reduce. 16 Bytes is in register v7 - // and the rest is in memory. We can fold 16 bytes at a time if y>=16 - // continue folding 16B at a time - -.L_16B_reduction_loop_\@: - __pmull_\p v8, v7, v10 - __pmull_\p v7, v7, v10, 2 + b .Lfold_128_bytes_loop_\@ + +.Lfold_128_bytes_loop_done_\@: + + // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7. + + // Fold across 64 bytes. + add fold_consts_ptr, fold_consts_ptr, #16 + ld1 {fold_consts.2d}, [fold_consts_ptr], #16 + __pmull_pre_\p fold_consts + fold_16_bytes \p, v0, v4 + fold_16_bytes \p, v1, v5 + fold_16_bytes \p, v2, v6 + fold_16_bytes \p, v3, v7, 1 + // Fold across 32 bytes. + fold_16_bytes \p, v4, v6 + fold_16_bytes \p, v5, v7, 1 + // Fold across 16 bytes. + fold_16_bytes \p, v6, v7 + + // Add 128 to get the correct number of data bytes remaining in 0...127 + // (not counting v7), following the previous extra subtraction by 128. + // Then subtract 16 to simplify the termination condition of the + // following loop. + adds len, len, #(128-16) + + // While >= 16 data bytes remain (not counting v7), fold the 16 bytes v7 + // into them, storing the result back into v7. + b.lt .Lfold_16_bytes_loop_done_\@ +.Lfold_16_bytes_loop_\@: + __pmull_\p v8, v7, fold_consts + __pmull_\p v7, v7, fold_consts, 2 eor v7.16b, v7.16b, v8.16b - - ldr q0, [arg2], #16 + ldr q0, [buf], #16 CPU_LE( rev64 v0.16b, v0.16b ) CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) eor v7.16b, v7.16b, v0.16b - subs arg3, arg3, #16 - - // instead of a cmp instruction, we utilize the flags with the - // jge instruction equivalent of: cmp arg3, 16-16 - // check if there is any more 16B in the buffer to be able to fold - b.ge .L_16B_reduction_loop_\@ - - // now we have 16+z bytes left to reduce, where 0<= z < 16. - // first, we reduce the data in the xmm7 register - -.L_final_reduction_for_128_\@: - // check if any more data to fold. If not, compute the CRC of - // the final 128 bits - adds arg3, arg3, #16 - b.eq .L_128_done_\@ - - // here we are getting data that is less than 16 bytes. - // since we know that there was data before the pointer, we can - // offset the input pointer before the actual point, to receive - // exactly 16 bytes. after that the registers need to be adjusted. -.L_get_last_two_regs_\@: - add arg2, arg2, arg3 - ldr q1, [arg2, #-16] -CPU_LE( rev64 v1.16b, v1.16b ) -CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) - - // get rid of the extra data that was loaded before - // load the shift constant - adr_l x4, tbl_shf_table + 16 - sub x4, x4, arg3 - ld1 {v0.16b}, [x4] - - // shift v2 to the left by arg3 bytes - tbl v2.16b, {v7.16b}, v0.16b - - // shift v7 to the right by 16-arg3 bytes - movi v9.16b, #0x80 - eor v0.16b, v0.16b, v9.16b - tbl v7.16b, {v7.16b}, v0.16b - - // blend - sshr v0.16b, v0.16b, #7 // convert to 8-bit mask - bsl v0.16b, v2.16b, v1.16b - - // fold 16 Bytes - __pmull_\p v8, v7, v10 - __pmull_\p v7, v7, v10, 2 - eor v7.16b, v7.16b, v8.16b - eor v7.16b, v7.16b, v0.16b + subs len, len, #16 + b.ge .Lfold_16_bytes_loop_\@ + +.Lfold_16_bytes_loop_done_\@: + // Add 16 to get the correct number of data bytes remaining in 0...15 + // (not counting v7), following the previous extra subtraction by 16. + adds len, len, #16 + b.eq .Lreduce_final_16_bytes_\@ + +.Lhandle_partial_segment_\@: + // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first + // 16 bytes are in v7 and the rest are the remaining data in 'buf'. To + // do this without needing a fold constant for each possible 'len', + // redivide the bytes into a first chunk of 'len' bytes and a second + // chunk of 16 bytes, then fold the first chunk into the second. + + // v0 = last 16 original data bytes + add buf, buf, len + ldr q0, [buf, #-16] +CPU_LE( rev64 v0.16b, v0.16b ) +CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) -.L_128_done_\@: - // compute crc of a 128-bit value - ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10 - __pmull_pre_\p v10 + // v1 = high order part of second chunk: v7 left-shifted by 'len' bytes. + adr_l x4, .Lbyteshift_table + 16 + sub x4, x4, len + ld1 {v2.16b}, [x4] + tbl v1.16b, {v7.16b}, v2.16b - // 64b fold - ext v0.16b, vzr.16b, v7.16b, #8 - mov v7.d[0], v7.d[1] - __pmull_\p v7, v7, v10 - eor v7.16b, v7.16b, v0.16b + // v3 = first chunk: v7 right-shifted by '16-len' bytes. + movi v3.16b, #0x80 + eor v2.16b, v2.16b, v3.16b + tbl v3.16b, {v7.16b}, v2.16b - // 32b fold - ext v0.16b, v7.16b, vzr.16b, #4 - mov v7.s[3], vzr.s[0] - __pmull_\p v0, v0, v10, 2 - eor v7.16b, v7.16b, v0.16b + // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes. + sshr v2.16b, v2.16b, #7 - // barrett reduction - ldr_l q10, rk7, x8 - __pmull_pre_\p v10 - mov v0.d[0], v7.d[1] + // v2 = second chunk: 'len' bytes from v0 (low-order bytes), + // then '16-len' bytes from v1 (high-order bytes). + bsl v2.16b, v1.16b, v0.16b - __pmull_\p v0, v0, v10 - ext v0.16b, vzr.16b, v0.16b, #12 - __pmull_\p v0, v0, v10, 2 - ext v0.16b, vzr.16b, v0.16b, #12 + // Fold the first chunk into the second chunk, storing the result in v7. + __pmull_\p v0, v3, fold_consts + __pmull_\p v7, v3, fold_consts, 2 eor v7.16b, v7.16b, v0.16b - mov w0, v7.s[1] - -.L_cleanup_\@: - // scale the result back to 16 bits - lsr x0, x0, #16 + eor v7.16b, v7.16b, v2.16b + +.Lreduce_final_16_bytes_\@: + // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC. + + movi v2.16b, #0 // init zero register + + // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. + ld1 {fold_consts.2d}, [fold_consts_ptr], #16 + __pmull_pre_\p fold_consts + + // Fold the high 64 bits into the low 64 bits, while also multiplying by + // x^64. This produces a 128-bit value congruent to x^64 * M(x) and + // whose low 48 bits are 0. + ext v0.16b, v2.16b, v7.16b, #8 + __pmull_\p v7, v7, fold_consts, 2 // high bits * x^48 * (x^80 mod G(x)) + eor v0.16b, v0.16b, v7.16b // + low bits * x^64 + + // Fold the high 32 bits into the low 96 bits. This produces a 96-bit + // value congruent to x^64 * M(x) and whose low 48 bits are 0. + ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits + mov v0.s[3], v2.s[0] // zero high 32 bits + __pmull_\p v1, v1, fold_consts // high 32 bits * x^48 * (x^48 mod G(x)) + eor v0.16b, v0.16b, v1.16b // + low bits + + // Load G(x) and floor(x^48 / G(x)). + ld1 {fold_consts.2d}, [fold_consts_ptr] + __pmull_pre_\p fold_consts + + // Use Barrett reduction to compute the final CRC value. + __pmull_\p v1, v0, fold_consts, 2 // high 32 bits * floor(x^48 / G(x)) + ushr v1.2d, v1.2d, #32 // /= x^32 + __pmull_\p v1, v1, fold_consts // *= G(x) + ushr v0.2d, v0.2d, #48 + eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits + // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0. + + umov w0, v0.h[0] frame_pop ret -.L_less_than_128_\@: - cbz arg3, .L_cleanup_\@ +.Lless_than_256_bytes_\@: + // Checksumming a buffer of length 16...255 bytes - movi v0.16b, #0 - mov v0.s[3], arg1_low32 // get the initial crc value + adr_l fold_consts_ptr, .Lfold_across_16_bytes_consts - ldr q7, [arg2], #0x10 + // Load the first 16 data bytes. + ldr q7, [buf], #0x10 CPU_LE( rev64 v7.16b, v7.16b ) CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) - eor v7.16b, v7.16b, v0.16b // xor the initial crc value - - cmp arg3, #16 - b.eq .L_128_done_\@ // exactly 16 left - b.lt .L_less_than_16_left_\@ - - ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10 - __pmull_pre_\p v10 - - // update the counter. subtract 32 instead of 16 to save one - // instruction from the loop - subs arg3, arg3, #32 - b.ge .L_16B_reduction_loop_\@ - - add arg3, arg3, #16 - b .L_get_last_two_regs_\@ - -.L_less_than_16_left_\@: - // shl r9, 4 - adr_l x0, tbl_shf_table + 16 - sub x0, x0, arg3 - ld1 {v0.16b}, [x0] - movi v9.16b, #0x80 - eor v0.16b, v0.16b, v9.16b - tbl v7.16b, {v7.16b}, v0.16b - b .L_128_done_\@ + + // XOR the first 16 data *bits* with the initial CRC value. + movi v0.16b, #0 + mov v0.h[7], init_crc + eor v7.16b, v7.16b, v0.16b + + // Load the fold-across-16-bytes constants. + ld1 {fold_consts.2d}, [fold_consts_ptr], #16 + __pmull_pre_\p fold_consts + + cmp len, #16 + b.eq .Lreduce_final_16_bytes_\@ // len == 16 + subs len, len, #32 + b.ge .Lfold_16_bytes_loop_\@ // 32 <= len <= 255 + add len, len, #16 + b .Lhandle_partial_segment_\@ // 17 <= len <= 31 .endm +// +// u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len); +// +// Assumes len >= 16. +// ENTRY(crc_t10dif_pmull_p8) crc_t10dif_pmull p8 ENDPROC(crc_t10dif_pmull_p8) .align 5 +// +// u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len); +// +// Assumes len >= 16. +// ENTRY(crc_t10dif_pmull_p64) crc_t10dif_pmull p64 ENDPROC(crc_t10dif_pmull_p64) -// precomputed constants -// these constants are precomputed from the poly: -// 0x8bb70000 (0x8bb7 scaled to 32 bits) .section ".rodata", "a" .align 4 -// Q = 0x18BB70000 -// rk1 = 2^(32*3) mod Q << 32 -// rk2 = 2^(32*5) mod Q << 32 -// rk3 = 2^(32*15) mod Q << 32 -// rk4 = 2^(32*17) mod Q << 32 -// rk5 = 2^(32*3) mod Q << 32 -// rk6 = 2^(32*2) mod Q << 32 -// rk7 = floor(2^64/Q) -// rk8 = Q - -rk1: .octa 0x06df0000000000002d56000000000000 -rk3: .octa 0x7cf50000000000009d9d000000000000 -rk5: .octa 0x13680000000000002d56000000000000 -rk7: .octa 0x000000018bb7000000000001f65a57f8 -rk9: .octa 0xbfd6000000000000ceae000000000000 -rk11: .octa 0x713c0000000000001e16000000000000 -rk13: .octa 0x80a6000000000000f7f9000000000000 -rk15: .octa 0xe658000000000000044c000000000000 -rk17: .octa 0xa497000000000000ad18000000000000 -rk19: .octa 0xe7b50000000000006ee3000000000000 - -tbl_shf_table: -// use these values for shift constants for the tbl/tbx instruction -// different alignments result in values as shown: -// DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 -// DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 -// DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 -// DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 -// DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 -// DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 -// DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 -// DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 -// DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 -// DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 -// DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 -// DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 -// DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 -// DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 -// DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 +// Fold constants precomputed from the polynomial 0x18bb7 +// G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 +.Lfold_across_128_bytes_consts: + .quad 0x0000000000006123 // x^(8*128) mod G(x) + .quad 0x0000000000002295 // x^(8*128+64) mod G(x) +// .Lfold_across_64_bytes_consts: + .quad 0x0000000000001069 // x^(4*128) mod G(x) + .quad 0x000000000000dd31 // x^(4*128+64) mod G(x) +// .Lfold_across_32_bytes_consts: + .quad 0x000000000000857d // x^(2*128) mod G(x) + .quad 0x0000000000007acc // x^(2*128+64) mod G(x) +.Lfold_across_16_bytes_consts: + .quad 0x000000000000a010 // x^(1*128) mod G(x) + .quad 0x0000000000001faa // x^(1*128+64) mod G(x) +// .Lfinal_fold_consts: + .quad 0x1368000000000000 // x^48 * (x^48 mod G(x)) + .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x)) +// .Lbarrett_reduction_consts: + .quad 0x0000000000018bb7 // G(x) + .quad 0x00000001f65a57f8 // floor(x^48 / G(x)) + +// For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - +// len] is the index vector to shift left by 'len' bytes, and is also {0x80, +// ..., 0x80} XOR the index vector to shift right by '16 - len' bytes. +.Lbyteshift_table: .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c @@ -22,10 +22,8 @@ #define CRC_T10DIF_PMULL_CHUNK_SIZE 16U -asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 buf[], u64 len); -asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 buf[], u64 len); - -static u16 (*crc_t10dif_pmull)(u16 init_crc, const u8 buf[], u64 len); +asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len); +asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len); static int crct10dif_init(struct shash_desc *desc) { @@ -35,30 +33,33 @@ static int crct10dif_init(struct shash_desc *desc) return 0; } -static int crct10dif_update(struct shash_desc *desc, const u8 *data, +static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data, unsigned int length) { u16 *crc = shash_desc_ctx(desc); - unsigned int l; - if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) { - l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE - - ((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)); + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { + kernel_neon_begin(); + *crc = crc_t10dif_pmull_p8(*crc, data, length); + kernel_neon_end(); + } else { + *crc = crc_t10dif_generic(*crc, data, length); + } - *crc = crc_t10dif_generic(*crc, data, l); + return 0; +} - length -= l; - data += l; - } +static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + u16 *crc = shash_desc_ctx(desc); - if (length > 0) { - if (may_use_simd()) { - kernel_neon_begin(); - *crc = crc_t10dif_pmull(*crc, data, length); - kernel_neon_end(); - } else { - *crc = crc_t10dif_generic(*crc, data, length); - } + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { + kernel_neon_begin(); + *crc = crc_t10dif_pmull_p64(*crc, data, length); + kernel_neon_end(); + } else { + *crc = crc_t10dif_generic(*crc, data, length); } return 0; @@ -72,10 +73,22 @@ static int crct10dif_final(struct shash_desc *desc, u8 *out) return 0; } -static struct shash_alg crc_t10dif_alg = { +static struct shash_alg crc_t10dif_alg[] = {{ .digestsize = CRC_T10DIF_DIGEST_SIZE, .init = crct10dif_init, - .update = crct10dif_update, + .update = crct10dif_update_pmull_p8, + .final = crct10dif_final, + .descsize = CRC_T10DIF_DIGEST_SIZE, + + .base.cra_name = "crct10dif", + .base.cra_driver_name = "crct10dif-arm64-neon", + .base.cra_priority = 100, + .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +}, { + .digestsize = CRC_T10DIF_DIGEST_SIZE, + .init = crct10dif_init, + .update = crct10dif_update_pmull_p64, .final = crct10dif_final, .descsize = CRC_T10DIF_DIGEST_SIZE, @@ -84,21 +97,25 @@ static struct shash_alg crc_t10dif_alg = { .base.cra_priority = 200, .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE, .base.cra_module = THIS_MODULE, -}; +}}; static int __init crc_t10dif_mod_init(void) { if (elf_hwcap & HWCAP_PMULL) - crc_t10dif_pmull = crc_t10dif_pmull_p64; + return crypto_register_shashes(crc_t10dif_alg, + ARRAY_SIZE(crc_t10dif_alg)); else - crc_t10dif_pmull = crc_t10dif_pmull_p8; - - return crypto_register_shash(&crc_t10dif_alg); + /* only register the first array element */ + return crypto_register_shash(crc_t10dif_alg); } static void __exit crc_t10dif_mod_exit(void) { - crypto_unregister_shash(&crc_t10dif_alg); + if (elf_hwcap & HWCAP_PMULL) + crypto_unregister_shashes(crc_t10dif_alg, + ARRAY_SIZE(crc_t10dif_alg)); + else + crypto_unregister_shash(crc_t10dif_alg); } module_cpu_feature_match(ASIMD, crc_t10dif_mod_init); diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c @@ -60,10 +60,6 @@ asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src, struct ghash_key const *k, const char *head); -static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, - const char *head); - asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[], const u8 src[], struct ghash_key const *k, u8 ctr[], u32 const rk[], int rounds, @@ -87,11 +83,15 @@ static int ghash_init(struct shash_desc *desc) } static void ghash_do_update(int blocks, u64 dg[], const char *src, - struct ghash_key *key, const char *head) + struct ghash_key *key, const char *head, + void (*simd_update)(int blocks, u64 dg[], + const char *src, + struct ghash_key const *k, + const char *head)) { if (likely(may_use_simd())) { kernel_neon_begin(); - pmull_ghash_update(blocks, dg, src, key, head); + simd_update(blocks, dg, src, key, head); kernel_neon_end(); } else { be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; @@ -119,8 +119,12 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src, /* avoid hogging the CPU for too long */ #define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE) -static int ghash_update(struct shash_desc *desc, const u8 *src, - unsigned int len) +static int __ghash_update(struct shash_desc *desc, const u8 *src, + unsigned int len, + void (*simd_update)(int blocks, u64 dg[], + const char *src, + struct ghash_key const *k, + const char *head)) { struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; @@ -146,7 +150,8 @@ static int ghash_update(struct shash_desc *desc, const u8 *src, int chunk = min(blocks, MAX_BLOCKS); ghash_do_update(chunk, ctx->digest, src, key, - partial ? ctx->buf : NULL); + partial ? ctx->buf : NULL, + simd_update); blocks -= chunk; src += chunk * GHASH_BLOCK_SIZE; @@ -158,7 +163,19 @@ static int ghash_update(struct shash_desc *desc, const u8 *src, return 0; } -static int ghash_final(struct shash_desc *desc, u8 *dst) +static int ghash_update_p8(struct shash_desc *desc, const u8 *src, + unsigned int len) +{ + return __ghash_update(desc, src, len, pmull_ghash_update_p8); +} + +static int ghash_update_p64(struct shash_desc *desc, const u8 *src, + unsigned int len) +{ + return __ghash_update(desc, src, len, pmull_ghash_update_p64); +} + +static int ghash_final_p8(struct shash_desc *desc, u8 *dst) { struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; @@ -168,7 +185,28 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); - ghash_do_update(1, ctx->digest, ctx->buf, key, NULL); + ghash_do_update(1, ctx->digest, ctx->buf, key, NULL, + pmull_ghash_update_p8); + } + put_unaligned_be64(ctx->digest[1], dst); + put_unaligned_be64(ctx->digest[0], dst + 8); + + *ctx = (struct ghash_desc_ctx){}; + return 0; +} + +static int ghash_final_p64(struct shash_desc *desc, u8 *dst) +{ + struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); + unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; + + if (partial) { + struct ghash_key *key = crypto_shash_ctx(desc->tfm); + + memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); + + ghash_do_update(1, ctx->digest, ctx->buf, key, NULL, + pmull_ghash_update_p64); } put_unaligned_be64(ctx->digest[1], dst); put_unaligned_be64(ctx->digest[0], dst + 8); @@ -224,7 +262,21 @@ static int ghash_setkey(struct crypto_shash *tfm, return __ghash_setkey(key, inkey, keylen); } -static struct shash_alg ghash_alg = { +static struct shash_alg ghash_alg[] = {{ + .base.cra_name = "ghash", + .base.cra_driver_name = "ghash-neon", + .base.cra_priority = 100, + .base.cra_blocksize = GHASH_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ghash_key), + .base.cra_module = THIS_MODULE, + + .digestsize = GHASH_DIGEST_SIZE, + .init = ghash_init, + .update = ghash_update_p8, + .final = ghash_final_p8, + .setkey = ghash_setkey, + .descsize = sizeof(struct ghash_desc_ctx), +}, { .base.cra_name = "ghash", .base.cra_driver_name = "ghash-ce", .base.cra_priority = 200, @@ -234,11 +286,11 @@ static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, - .update = ghash_update, - .final = ghash_final, + .update = ghash_update_p64, + .final = ghash_final_p64, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), -}; +}}; static int num_rounds(struct crypto_aes_ctx *ctx) { @@ -301,7 +353,8 @@ static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[], int blocks = count / GHASH_BLOCK_SIZE; ghash_do_update(blocks, dg, src, &ctx->ghash_key, - *buf_count ? buf : NULL); + *buf_count ? buf : NULL, + pmull_ghash_update_p64); src += blocks * GHASH_BLOCK_SIZE; count %= GHASH_BLOCK_SIZE; @@ -345,7 +398,8 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[]) if (buf_count) { memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count); - ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL); + ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL, + pmull_ghash_update_p64); } } @@ -358,7 +412,8 @@ static void gcm_final(struct aead_request *req, struct gcm_aes_ctx *ctx, lengths.a = cpu_to_be64(req->assoclen * 8); lengths.b = cpu_to_be64(cryptlen * 8); - ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL); + ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL, + pmull_ghash_update_p64); put_unaligned_be64(dg[1], mac); put_unaligned_be64(dg[0], mac + 8); @@ -434,7 +489,7 @@ static int gcm_encrypt(struct aead_request *req) ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg, walk.dst.virt.addr, &ctx->ghash_key, - NULL); + NULL, pmull_ghash_update_p64); err = skcipher_walk_done(&walk, walk.nbytes % (2 * AES_BLOCK_SIZE)); @@ -469,7 +524,8 @@ static int gcm_encrypt(struct aead_request *req) memcpy(buf, dst, nbytes); memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes); - ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head); + ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head, + pmull_ghash_update_p64); err = skcipher_walk_done(&walk, 0); } @@ -558,7 +614,8 @@ static int gcm_decrypt(struct aead_request *req) u8 *src = walk.src.virt.addr; ghash_do_update(blocks, dg, walk.src.virt.addr, - &ctx->ghash_key, NULL); + &ctx->ghash_key, NULL, + pmull_ghash_update_p64); do { __aes_arm64_encrypt(ctx->aes_key.key_enc, @@ -602,7 +659,8 @@ static int gcm_decrypt(struct aead_request *req) memcpy(buf, src, nbytes); memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes); - ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head); + ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head, + pmull_ghash_update_p64); crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv, walk.nbytes); @@ -650,26 +708,30 @@ static int __init ghash_ce_mod_init(void) return -ENODEV; if (elf_hwcap & HWCAP_PMULL) - pmull_ghash_update = pmull_ghash_update_p64; - + ret = crypto_register_shashes(ghash_alg, + ARRAY_SIZE(ghash_alg)); else - pmull_ghash_update = pmull_ghash_update_p8; + /* only register the first array element */ + ret = crypto_register_shash(ghash_alg); - ret = crypto_register_shash(&ghash_alg); if (ret) return ret; if (elf_hwcap & HWCAP_PMULL) { ret = crypto_register_aead(&gcm_aes_alg); if (ret) - crypto_unregister_shash(&ghash_alg); + crypto_unregister_shashes(ghash_alg, + ARRAY_SIZE(ghash_alg)); } return ret; } static void __exit ghash_ce_mod_exit(void) { - crypto_unregister_shash(&ghash_alg); + if (elf_hwcap & HWCAP_PMULL) + crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg)); + else + crypto_unregister_shash(ghash_alg); crypto_unregister_aead(&gcm_aes_alg); } diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c @@ -38,7 +38,7 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key, /* check for weak keys */ if (!des_ekey(tmp, key) && - (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -228,7 +228,7 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key, if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], DES_KEY_SIZE)) && - (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c @@ -53,7 +53,7 @@ static int des_set_key(struct crypto_tfm *tfm, const u8 *key, * weak key detection code. */ ret = des_ekey(tmp, key); - if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -209,7 +209,7 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key, if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && - (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c @@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad( } static void crypto_aegis128_aesni_process_crypt( - struct aegis_state *state, struct aead_request *req, + struct aegis_state *state, struct skcipher_walk *walk, const struct aegis_crypt_ops *ops) { - struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize, base; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops->crypt_blocks(state, chunksize, src, dst); - - base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1); - src += base; - dst += base; - chunksize &= AEGIS128_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops->crypt_tail(state, chunksize, src, dst); + while (walk->nbytes >= AEGIS128_BLOCK_SIZE) { + ops->crypt_blocks(state, + round_down(walk->nbytes, AEGIS128_BLOCK_SIZE), + walk->src.virt.addr, walk->dst.virt.addr); + skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, + walk->dst.virt.addr); + skcipher_walk_done(walk, 0); } } @@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm); + struct skcipher_walk walk; struct aegis_state state; + ops->skcipher_walk_init(&walk, req, true); + kernel_fpu_begin(); crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv); crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_aesni_process_crypt(&state, req, ops); + crypto_aegis128_aesni_process_crypt(&state, &walk, ops); crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c @@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad( } static void crypto_aegis128l_aesni_process_crypt( - struct aegis_state *state, struct aead_request *req, + struct aegis_state *state, struct skcipher_walk *walk, const struct aegis_crypt_ops *ops) { - struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize, base; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops->crypt_blocks(state, chunksize, src, dst); - - base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1); - src += base; - dst += base; - chunksize &= AEGIS128L_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops->crypt_tail(state, chunksize, src, dst); + while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) { + ops->crypt_blocks(state, round_down(walk->nbytes, + AEGIS128L_BLOCK_SIZE), + walk->src.virt.addr, walk->dst.virt.addr); + skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, + walk->dst.virt.addr); + skcipher_walk_done(walk, 0); } } @@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm); + struct skcipher_walk walk; struct aegis_state state; + ops->skcipher_walk_init(&walk, req, true); + kernel_fpu_begin(); crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv); crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis128l_aesni_process_crypt(&state, req, ops); + crypto_aegis128l_aesni_process_crypt(&state, &walk, ops); crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c @@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad( } static void crypto_aegis256_aesni_process_crypt( - struct aegis_state *state, struct aead_request *req, + struct aegis_state *state, struct skcipher_walk *walk, const struct aegis_crypt_ops *ops) { - struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize, base; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops->crypt_blocks(state, chunksize, src, dst); - - base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1); - src += base; - dst += base; - chunksize &= AEGIS256_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops->crypt_tail(state, chunksize, src, dst); + while (walk->nbytes >= AEGIS256_BLOCK_SIZE) { + ops->crypt_blocks(state, + round_down(walk->nbytes, AEGIS256_BLOCK_SIZE), + walk->src.virt.addr, walk->dst.virt.addr); + skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, + walk->dst.virt.addr); + skcipher_walk_done(walk, 0); } } @@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm); + struct skcipher_walk walk; struct aegis_state state; + ops->skcipher_walk_init(&walk, req, true); + kernel_fpu_begin(); crypto_aegis256_aesni_init(&state, ctx->key, req->iv); crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis256_aesni_process_crypt(&state, req, ops); + crypto_aegis256_aesni_process_crypt(&state, &walk, ops); crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c @@ -175,26 +175,18 @@ asmlinkage void aesni_gcm_finalize(void *ctx, struct gcm_context_data *gdata, u8 *auth_tag, unsigned long auth_tag_len); -static struct aesni_gcm_tfm_s { -void (*init)(void *ctx, - struct gcm_context_data *gdata, - u8 *iv, - u8 *hash_subkey, const u8 *aad, - unsigned long aad_len); -void (*enc_update)(void *ctx, - struct gcm_context_data *gdata, u8 *out, - const u8 *in, - unsigned long plaintext_len); -void (*dec_update)(void *ctx, - struct gcm_context_data *gdata, u8 *out, - const u8 *in, - unsigned long ciphertext_len); -void (*finalize)(void *ctx, - struct gcm_context_data *gdata, - u8 *auth_tag, unsigned long auth_tag_len); +static const struct aesni_gcm_tfm_s { + void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv, + u8 *hash_subkey, const u8 *aad, unsigned long aad_len); + void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out, + const u8 *in, unsigned long plaintext_len); + void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out, + const u8 *in, unsigned long ciphertext_len); + void (*finalize)(void *ctx, struct gcm_context_data *gdata, + u8 *auth_tag, unsigned long auth_tag_len); } *aesni_gcm_tfm; -struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = { +static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = { .init = &aesni_gcm_init, .enc_update = &aesni_gcm_enc_update, .dec_update = &aesni_gcm_dec_update, @@ -243,7 +235,7 @@ asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); -struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { +static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { .init = &aesni_gcm_init_avx_gen2, .enc_update = &aesni_gcm_enc_update_avx_gen2, .dec_update = &aesni_gcm_dec_update_avx_gen2, @@ -288,7 +280,7 @@ asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); -struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = { +static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = { .init = &aesni_gcm_init_avx_gen4, .enc_update = &aesni_gcm_enc_update_avx_gen4, .dec_update = &aesni_gcm_dec_update_avx_gen4, @@ -778,7 +770,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned long auth_tag_len = crypto_aead_authsize(tfm); - struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm; + const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm; struct gcm_context_data data AESNI_ALIGN_ATTR; struct scatter_walk dst_sg_walk = {}; unsigned long left = req->cryptlen; @@ -821,11 +813,14 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0); } - src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen); - scatterwalk_start(&src_sg_walk, src_sg); - if (req->src != req->dst) { - dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen); - scatterwalk_start(&dst_sg_walk, dst_sg); + if (left) { + src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen); + scatterwalk_start(&src_sg_walk, src_sg); + if (req->src != req->dst) { + dst_sg = scatterwalk_ffwd(dst_start, req->dst, + req->assoclen); + scatterwalk_start(&dst_sg_walk, dst_sg); + } } kernel_fpu_begin(); diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S @@ -43,609 +43,291 @@ # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -######################################################################## -# Function API: -# UINT16 crc_t10dif_pcl( -# UINT16 init_crc, //initial CRC value, 16 bits -# const unsigned char *buf, //buffer pointer to calculate CRC on -# UINT64 len //buffer length in bytes (64-bit data) -# ); # # Reference paper titled "Fast CRC Computation for Generic # Polynomials Using PCLMULQDQ Instruction" # URL: http://www.intel.com/content/dam/www/public/us/en/documents # /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf # -# #include <linux/linkage.h> .text -#define arg1 %rdi -#define arg2 %rsi -#define arg3 %rdx - -#define arg1_low32 %edi +#define init_crc %edi +#define buf %rsi +#define len %rdx + +#define FOLD_CONSTS %xmm10 +#define BSWAP_MASK %xmm11 + +# Fold reg1, reg2 into the next 32 data bytes, storing the result back into +# reg1, reg2. +.macro fold_32_bytes offset, reg1, reg2 + movdqu \offset(buf), %xmm9 + movdqu \offset+16(buf), %xmm12 + pshufb BSWAP_MASK, %xmm9 + pshufb BSWAP_MASK, %xmm12 + movdqa \reg1, %xmm8 + movdqa \reg2, %xmm13 + pclmulqdq $0x00, FOLD_CONSTS, \reg1 + pclmulqdq $0x11, FOLD_CONSTS, %xmm8 + pclmulqdq $0x00, FOLD_CONSTS, \reg2 + pclmulqdq $0x11, FOLD_CONSTS, %xmm13 + pxor %xmm9 , \reg1 + xorps %xmm8 , \reg1 + pxor %xmm12, \reg2 + xorps %xmm13, \reg2 +.endm + +# Fold src_reg into dst_reg. +.macro fold_16_bytes src_reg, dst_reg + movdqa \src_reg, %xmm8 + pclmulqdq $0x11, FOLD_CONSTS, \src_reg + pclmulqdq $0x00, FOLD_CONSTS, %xmm8 + pxor %xmm8, \dst_reg + xorps \src_reg, \dst_reg +.endm -ENTRY(crc_t10dif_pcl) +# +# u16 crc_t10dif_pcl(u16 init_crc, const *u8 buf, size_t len); +# +# Assumes len >= 16. +# .align 16 +ENTRY(crc_t10dif_pcl) - # adjust the 16-bit initial_crc value, scale it to 32 bits - shl $16, arg1_low32 - - # Allocate Stack Space - mov %rsp, %rcx - sub $16*2, %rsp - # align stack to 16 byte boundary - and $~(0x10 - 1), %rsp - - # check if smaller than 256 - cmp $256, arg3 - - # for sizes less than 128, we can't fold 64B at a time... - jl _less_than_128 - - - # load the initial crc value - movd arg1_low32, %xmm10 # initial crc - - # crc value does not need to be byte-reflected, but it needs - # to be moved to the high part of the register. - # because data will be byte-reflected and will align with - # initial crc at correct place. - pslldq $12, %xmm10 - - movdqa SHUF_MASK(%rip), %xmm11 - # receive the initial 64B data, xor the initial crc value - movdqu 16*0(arg2), %xmm0 - movdqu 16*1(arg2), %xmm1 - movdqu 16*2(arg2), %xmm2 - movdqu 16*3(arg2), %xmm3 - movdqu 16*4(arg2), %xmm4 - movdqu 16*5(arg2), %xmm5 - movdqu 16*6(arg2), %xmm6 - movdqu 16*7(arg2), %xmm7 - - pshufb %xmm11, %xmm0 - # XOR the initial_crc value - pxor %xmm10, %xmm0 - pshufb %xmm11, %xmm1 - pshufb %xmm11, %xmm2 - pshufb %xmm11, %xmm3 - pshufb %xmm11, %xmm4 - pshufb %xmm11, %xmm5 - pshufb %xmm11, %xmm6 - pshufb %xmm11, %xmm7 - - movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4 - #imm value of pclmulqdq instruction - #will determine which constant to use - - ################################################################# - # we subtract 256 instead of 128 to save one instruction from the loop - sub $256, arg3 - - # at this section of the code, there is 64*x+y (0<=y<64) bytes of - # buffer. The _fold_64_B_loop will fold 64B at a time - # until we have 64+y Bytes of buffer - - - # fold 64B at a time. This section of the code folds 4 xmm - # registers in parallel -_fold_64_B_loop: - - # update the buffer pointer - add $128, arg2 # buf += 64# - - movdqu 16*0(arg2), %xmm9 - movdqu 16*1(arg2), %xmm12 - pshufb %xmm11, %xmm9 - pshufb %xmm11, %xmm12 - movdqa %xmm0, %xmm8 - movdqa %xmm1, %xmm13 - pclmulqdq $0x0 , %xmm10, %xmm0 - pclmulqdq $0x11, %xmm10, %xmm8 - pclmulqdq $0x0 , %xmm10, %xmm1 - pclmulqdq $0x11, %xmm10, %xmm13 - pxor %xmm9 , %xmm0 - xorps %xmm8 , %xmm0 - pxor %xmm12, %xmm1 - xorps %xmm13, %xmm1 - - movdqu 16*2(arg2), %xmm9 - movdqu 16*3(arg2), %xmm12 - pshufb %xmm11, %xmm9 - pshufb %xmm11, %xmm12 - movdqa %xmm2, %xmm8 - movdqa %xmm3, %xmm13 - pclmulqdq $0x0, %xmm10, %xmm2 - pclmulqdq $0x11, %xmm10, %xmm8 - pclmulqdq $0x0, %xmm10, %xmm3 - pclmulqdq $0x11, %xmm10, %xmm13 - pxor %xmm9 , %xmm2 - xorps %xmm8 , %xmm2 - pxor %xmm12, %xmm3 - xorps %xmm13, %xmm3 - - movdqu 16*4(arg2), %xmm9 - movdqu 16*5(arg2), %xmm12 - pshufb %xmm11, %xmm9 - pshufb %xmm11, %xmm12 - movdqa %xmm4, %xmm8 - movdqa %xmm5, %xmm13 - pclmulqdq $0x0, %xmm10, %xmm4 - pclmulqdq $0x11, %xmm10, %xmm8 - pclmulqdq $0x0, %xmm10, %xmm5 - pclmulqdq $0x11, %xmm10, %xmm13 - pxor %xmm9 , %xmm4 - xorps %xmm8 , %xmm4 - pxor %xmm12, %xmm5 - xorps %xmm13, %xmm5 - - movdqu 16*6(arg2), %xmm9 - movdqu 16*7(arg2), %xmm12 - pshufb %xmm11, %xmm9 - pshufb %xmm11, %xmm12 - movdqa %xmm6 , %xmm8 - movdqa %xmm7 , %xmm13 - pclmulqdq $0x0 , %xmm10, %xmm6 - pclmulqdq $0x11, %xmm10, %xmm8 - pclmulqdq $0x0 , %xmm10, %xmm7 - pclmulqdq $0x11, %xmm10, %xmm13 - pxor %xmm9 , %xmm6 - xorps %xmm8 , %xmm6 - pxor %xmm12, %xmm7 - xorps %xmm13, %xmm7 - - sub $128, arg3 - - # check if there is another 64B in the buffer to be able to fold - jge _fold_64_B_loop - ################################################################## - - - add $128, arg2 - # at this point, the buffer pointer is pointing at the last y Bytes - # of the buffer the 64B of folded data is in 4 of the xmm - # registers: xmm0, xmm1, xmm2, xmm3 - - - # fold the 8 xmm registers to 1 xmm register with different constants - - movdqa rk9(%rip), %xmm10 - movdqa %xmm0, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm0 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - xorps %xmm0, %xmm7 - - movdqa rk11(%rip), %xmm10 - movdqa %xmm1, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm1 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - xorps %xmm1, %xmm7 - - movdqa rk13(%rip), %xmm10 - movdqa %xmm2, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm2 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - pxor %xmm2, %xmm7 - - movdqa rk15(%rip), %xmm10 - movdqa %xmm3, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm3 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - xorps %xmm3, %xmm7 - - movdqa rk17(%rip), %xmm10 - movdqa %xmm4, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm4 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - pxor %xmm4, %xmm7 - - movdqa rk19(%rip), %xmm10 - movdqa %xmm5, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm5 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - xorps %xmm5, %xmm7 - - movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2 - #imm value of pclmulqdq instruction - #will determine which constant to use - movdqa %xmm6, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm6 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - pxor %xmm6, %xmm7 - - - # instead of 64, we add 48 to the loop counter to save 1 instruction - # from the loop instead of a cmp instruction, we use the negative - # flag with the jl instruction - add $128-16, arg3 - jl _final_reduction_for_128 - - # now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7 - # and the rest is in memory. We can fold 16 bytes at a time if y>=16 - # continue folding 16B at a time - -_16B_reduction_loop: + movdqa .Lbswap_mask(%rip), BSWAP_MASK + + # For sizes less than 256 bytes, we can't fold 128 bytes at a time. + cmp $256, len + jl .Lless_than_256_bytes + + # Load the first 128 data bytes. Byte swapping is necessary to make the + # bit order match the polynomial coefficient order. + movdqu 16*0(buf), %xmm0 + movdqu 16*1(buf), %xmm1 + movdqu 16*2(buf), %xmm2 + movdqu 16*3(buf), %xmm3 + movdqu 16*4(buf), %xmm4 + movdqu 16*5(buf), %xmm5 + movdqu 16*6(buf), %xmm6 + movdqu 16*7(buf), %xmm7 + add $128, buf + pshufb BSWAP_MASK, %xmm0 + pshufb BSWAP_MASK, %xmm1 + pshufb BSWAP_MASK, %xmm2 + pshufb BSWAP_MASK, %xmm3 + pshufb BSWAP_MASK, %xmm4 + pshufb BSWAP_MASK, %xmm5 + pshufb BSWAP_MASK, %xmm6 + pshufb BSWAP_MASK, %xmm7 + + # XOR the first 16 data *bits* with the initial CRC value. + pxor %xmm8, %xmm8 + pinsrw $7, init_crc, %xmm8 + pxor %xmm8, %xmm0 + + movdqa .Lfold_across_128_bytes_consts(%rip), FOLD_CONSTS + + # Subtract 128 for the 128 data bytes just consumed. Subtract another + # 128 to simplify the termination condition of the following loop. + sub $256, len + + # While >= 128 data bytes remain (not counting xmm0-7), fold the 128 + # bytes xmm0-7 into them, storing the result back into xmm0-7. +.Lfold_128_bytes_loop: + fold_32_bytes 0, %xmm0, %xmm1 + fold_32_bytes 32, %xmm2, %xmm3 + fold_32_bytes 64, %xmm4, %xmm5 + fold_32_bytes 96, %xmm6, %xmm7 + add $128, buf + sub $128, len + jge .Lfold_128_bytes_loop + + # Now fold the 112 bytes in xmm0-xmm6 into the 16 bytes in xmm7. + + # Fold across 64 bytes. + movdqa .Lfold_across_64_bytes_consts(%rip), FOLD_CONSTS + fold_16_bytes %xmm0, %xmm4 + fold_16_bytes %xmm1, %xmm5 + fold_16_bytes %xmm2, %xmm6 + fold_16_bytes %xmm3, %xmm7 + # Fold across 32 bytes. + movdqa .Lfold_across_32_bytes_consts(%rip), FOLD_CONSTS + fold_16_bytes %xmm4, %xmm6 + fold_16_bytes %xmm5, %xmm7 + # Fold across 16 bytes. + movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS + fold_16_bytes %xmm6, %xmm7 + + # Add 128 to get the correct number of data bytes remaining in 0...127 + # (not counting xmm7), following the previous extra subtraction by 128. + # Then subtract 16 to simplify the termination condition of the + # following loop. + add $128-16, len + + # While >= 16 data bytes remain (not counting xmm7), fold the 16 bytes + # xmm7 into them, storing the result back into xmm7. + jl .Lfold_16_bytes_loop_done +.Lfold_16_bytes_loop: movdqa %xmm7, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm7 - pclmulqdq $0x0 , %xmm10, %xmm8 + pclmulqdq $0x11, FOLD_CONSTS, %xmm7 + pclmulqdq $0x00, FOLD_CONSTS, %xmm8 pxor %xmm8, %xmm7 - movdqu (arg2), %xmm0 - pshufb %xmm11, %xmm0 + movdqu (buf), %xmm0 + pshufb BSWAP_MASK, %xmm0 pxor %xmm0 , %xmm7 - add $16, arg2 - sub $16, arg3 - # instead of a cmp instruction, we utilize the flags with the - # jge instruction equivalent of: cmp arg3, 16-16 - # check if there is any more 16B in the buffer to be able to fold - jge _16B_reduction_loop - - #now we have 16+z bytes left to reduce, where 0<= z < 16. - #first, we reduce the data in the xmm7 register - - -_final_reduction_for_128: - # check if any more data to fold. If not, compute the CRC of - # the final 128 bits - add $16, arg3 - je _128_done - - # here we are getting data that is less than 16 bytes. - # since we know that there was data before the pointer, we can - # offset the input pointer before the actual point, to receive - # exactly 16 bytes. after that the registers need to be adjusted. -_get_last_two_xmms: + add $16, buf + sub $16, len + jge .Lfold_16_bytes_loop + +.Lfold_16_bytes_loop_done: + # Add 16 to get the correct number of data bytes remaining in 0...15 + # (not counting xmm7), following the previous extra subtraction by 16. + add $16, len + je .Lreduce_final_16_bytes + +.Lhandle_partial_segment: + # Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first 16 + # bytes are in xmm7 and the rest are the remaining data in 'buf'. To do + # this without needing a fold constant for each possible 'len', redivide + # the bytes into a first chunk of 'len' bytes and a second chunk of 16 + # bytes, then fold the first chunk into the second. + movdqa %xmm7, %xmm2 - movdqu -16(arg2, arg3), %xmm1 - pshufb %xmm11, %xmm1 + # xmm1 = last 16 original data bytes + movdqu -16(buf, len), %xmm1 + pshufb BSWAP_MASK, %xmm1 - # get rid of the extra data that was loaded before - # load the shift constant - lea pshufb_shf_table+16(%rip), %rax - sub arg3, %rax + # xmm2 = high order part of second chunk: xmm7 left-shifted by 'len' bytes. + lea .Lbyteshift_table+16(%rip), %rax + sub len, %rax movdqu (%rax), %xmm0 - - # shift xmm2 to the left by arg3 bytes pshufb %xmm0, %xmm2 - # shift xmm7 to the right by 16-arg3 bytes - pxor mask1(%rip), %xmm0 + # xmm7 = first chunk: xmm7 right-shifted by '16-len' bytes. + pxor .Lmask1(%rip), %xmm0 pshufb %xmm0, %xmm7 + + # xmm1 = second chunk: 'len' bytes from xmm1 (low-order bytes), + # then '16-len' bytes from xmm2 (high-order bytes). pblendvb %xmm2, %xmm1 #xmm0 is implicit - # fold 16 Bytes - movdqa %xmm1, %xmm2 + # Fold the first chunk into the second chunk, storing the result in xmm7. movdqa %xmm7, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm7 - pclmulqdq $0x0 , %xmm10, %xmm8 + pclmulqdq $0x11, FOLD_CONSTS, %xmm7 + pclmulqdq $0x00, FOLD_CONSTS, %xmm8 pxor %xmm8, %xmm7 - pxor %xmm2, %xmm7 + pxor %xmm1, %xmm7 -_128_done: - # compute crc of a 128-bit value - movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10 - movdqa %xmm7, %xmm0 +.Lreduce_final_16_bytes: + # Reduce the 128-bit value M(x), stored in xmm7, to the final 16-bit CRC - #64b fold - pclmulqdq $0x1, %xmm10, %xmm7 - pslldq $8 , %xmm0 - pxor %xmm0, %xmm7 + # Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. + movdqa .Lfinal_fold_consts(%rip), FOLD_CONSTS - #32b fold + # Fold the high 64 bits into the low 64 bits, while also multiplying by + # x^64. This produces a 128-bit value congruent to x^64 * M(x) and + # whose low 48 bits are 0. movdqa %xmm7, %xmm0 + pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high bits * x^48 * (x^80 mod G(x)) + pslldq $8, %xmm0 + pxor %xmm0, %xmm7 # + low bits * x^64 - pand mask2(%rip), %xmm0 - - psrldq $12, %xmm7 - pclmulqdq $0x10, %xmm10, %xmm7 - pxor %xmm0, %xmm7 - - #barrett reduction -_barrett: - movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10 + # Fold the high 32 bits into the low 96 bits. This produces a 96-bit + # value congruent to x^64 * M(x) and whose low 48 bits are 0. movdqa %xmm7, %xmm0 - pclmulqdq $0x01, %xmm10, %xmm7 - pslldq $4, %xmm7 - pclmulqdq $0x11, %xmm10, %xmm7 + pand .Lmask2(%rip), %xmm0 # zero high 32 bits + psrldq $12, %xmm7 # extract high 32 bits + pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # high 32 bits * x^48 * (x^48 mod G(x)) + pxor %xmm0, %xmm7 # + low bits - pslldq $4, %xmm7 - pxor %xmm0, %xmm7 - pextrd $1, %xmm7, %eax + # Load G(x) and floor(x^48 / G(x)). + movdqa .Lbarrett_reduction_consts(%rip), FOLD_CONSTS -_cleanup: - # scale the result back to 16 bits - shr $16, %eax - mov %rcx, %rsp + # Use Barrett reduction to compute the final CRC value. + movdqa %xmm7, %xmm0 + pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high 32 bits * floor(x^48 / G(x)) + psrlq $32, %xmm7 # /= x^32 + pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # *= G(x) + psrlq $48, %xmm0 + pxor %xmm7, %xmm0 # + low 16 nonzero bits + # Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0. + + pextrw $0, %xmm0, %eax ret -######################################################################## - .align 16 -_less_than_128: - - # check if there is enough buffer to be able to fold 16B at a time - cmp $32, arg3 - jl _less_than_32 - movdqa SHUF_MASK(%rip), %xmm11 +.Lless_than_256_bytes: + # Checksumming a buffer of length 16...255 bytes - # now if there is, load the constants - movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 + # Load the first 16 data bytes. + movdqu (buf), %xmm7 + pshufb BSWAP_MASK, %xmm7 + add $16, buf - movd arg1_low32, %xmm0 # get the initial crc value - pslldq $12, %xmm0 # align it to its correct place - movdqu (arg2), %xmm7 # load the plaintext - pshufb %xmm11, %xmm7 # byte-reflect the plaintext + # XOR the first 16 data *bits* with the initial CRC value. + pxor %xmm0, %xmm0 + pinsrw $7, init_crc, %xmm0 pxor %xmm0, %xmm7 - - # update the buffer pointer - add $16, arg2 - - # update the counter. subtract 32 instead of 16 to save one - # instruction from the loop - sub $32, arg3 - - jmp _16B_reduction_loop - - -.align 16 -_less_than_32: - # mov initial crc to the return value. this is necessary for - # zero-length buffers. - mov arg1_low32, %eax - test arg3, arg3 - je _cleanup - - movdqa SHUF_MASK(%rip), %xmm11 - - movd arg1_low32, %xmm0 # get the initial crc value - pslldq $12, %xmm0 # align it to its correct place - - cmp $16, arg3 - je _exact_16_left - jl _less_than_16_left - - movdqu (arg2), %xmm7 # load the plaintext - pshufb %xmm11, %xmm7 # byte-reflect the plaintext - pxor %xmm0 , %xmm7 # xor the initial crc value - add $16, arg2 - sub $16, arg3 - movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 - jmp _get_last_two_xmms - - -.align 16 -_less_than_16_left: - # use stack space to load data less than 16 bytes, zero-out - # the 16B in memory first. - - pxor %xmm1, %xmm1 - mov %rsp, %r11 - movdqa %xmm1, (%r11) - - cmp $4, arg3 - jl _only_less_than_4 - - # backup the counter value - mov arg3, %r9 - cmp $8, arg3 - jl _less_than_8_left - - # load 8 Bytes - mov (arg2), %rax - mov %rax, (%r11) - add $8, %r11 - sub $8, arg3 - add $8, arg2 -_less_than_8_left: - - cmp $4, arg3 - jl _less_than_4_left - - # load 4 Bytes - mov (arg2), %eax - mov %eax, (%r11) - add $4, %r11 - sub $4, arg3 - add $4, arg2 -_less_than_4_left: - - cmp $2, arg3 - jl _less_than_2_left - - # load 2 Bytes - mov (arg2), %ax - mov %ax, (%r11) - add $2, %r11 - sub $2, arg3 - add $2, arg2 -_less_than_2_left: - cmp $1, arg3 - jl _zero_left - - # load 1 Byte - mov (arg2), %al - mov %al, (%r11) -_zero_left: - movdqa (%rsp), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - # shl r9, 4 - lea pshufb_shf_table+16(%rip), %rax - sub %r9, %rax - movdqu (%rax), %xmm0 - pxor mask1(%rip), %xmm0 - - pshufb %xmm0, %xmm7 - jmp _128_done - -.align 16 -_exact_16_left: - movdqu (arg2), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - jmp _128_done - -_only_less_than_4: - cmp $3, arg3 - jl _only_less_than_3 - - # load 3 Bytes - mov (arg2), %al - mov %al, (%r11) - - mov 1(arg2), %al - mov %al, 1(%r11) - - mov 2(arg2), %al - mov %al, 2(%r11) - - movdqa (%rsp), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - psrldq $5, %xmm7 - - jmp _barrett -_only_less_than_3: - cmp $2, arg3 - jl _only_less_than_2 - - # load 2 Bytes - mov (arg2), %al - mov %al, (%r11) - - mov 1(arg2), %al - mov %al, 1(%r11) - - movdqa (%rsp), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - psrldq $6, %xmm7 - - jmp _barrett -_only_less_than_2: - - # load 1 Byte - mov (arg2), %al - mov %al, (%r11) - - movdqa (%rsp), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - psrldq $7, %xmm7 - - jmp _barrett - + movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS + cmp $16, len + je .Lreduce_final_16_bytes # len == 16 + sub $32, len + jge .Lfold_16_bytes_loop # 32 <= len <= 255 + add $16, len + jmp .Lhandle_partial_segment # 17 <= len <= 31 ENDPROC(crc_t10dif_pcl) .section .rodata, "a", @progbits .align 16 -# precomputed constants -# these constants are precomputed from the poly: -# 0x8bb70000 (0x8bb7 scaled to 32 bits) -# Q = 0x18BB70000 -# rk1 = 2^(32*3) mod Q << 32 -# rk2 = 2^(32*5) mod Q << 32 -# rk3 = 2^(32*15) mod Q << 32 -# rk4 = 2^(32*17) mod Q << 32 -# rk5 = 2^(32*3) mod Q << 32 -# rk6 = 2^(32*2) mod Q << 32 -# rk7 = floor(2^64/Q) -# rk8 = Q -rk1: -.quad 0x2d56000000000000 -rk2: -.quad 0x06df000000000000 -rk3: -.quad 0x9d9d000000000000 -rk4: -.quad 0x7cf5000000000000 -rk5: -.quad 0x2d56000000000000 -rk6: -.quad 0x1368000000000000 -rk7: -.quad 0x00000001f65a57f8 -rk8: -.quad 0x000000018bb70000 - -rk9: -.quad 0xceae000000000000 -rk10: -.quad 0xbfd6000000000000 -rk11: -.quad 0x1e16000000000000 -rk12: -.quad 0x713c000000000000 -rk13: -.quad 0xf7f9000000000000 -rk14: -.quad 0x80a6000000000000 -rk15: -.quad 0x044c000000000000 -rk16: -.quad 0xe658000000000000 -rk17: -.quad 0xad18000000000000 -rk18: -.quad 0xa497000000000000 -rk19: -.quad 0x6ee3000000000000 -rk20: -.quad 0xe7b5000000000000 - +# Fold constants precomputed from the polynomial 0x18bb7 +# G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 +.Lfold_across_128_bytes_consts: + .quad 0x0000000000006123 # x^(8*128) mod G(x) + .quad 0x0000000000002295 # x^(8*128+64) mod G(x) +.Lfold_across_64_bytes_consts: + .quad 0x0000000000001069 # x^(4*128) mod G(x) + .quad 0x000000000000dd31 # x^(4*128+64) mod G(x) +.Lfold_across_32_bytes_consts: + .quad 0x000000000000857d # x^(2*128) mod G(x) + .quad 0x0000000000007acc # x^(2*128+64) mod G(x) +.Lfold_across_16_bytes_consts: + .quad 0x000000000000a010 # x^(1*128) mod G(x) + .quad 0x0000000000001faa # x^(1*128+64) mod G(x) +.Lfinal_fold_consts: + .quad 0x1368000000000000 # x^48 * (x^48 mod G(x)) + .quad 0x2d56000000000000 # x^48 * (x^80 mod G(x)) +.Lbarrett_reduction_consts: + .quad 0x0000000000018bb7 # G(x) + .quad 0x00000001f65a57f8 # floor(x^48 / G(x)) .section .rodata.cst16.mask1, "aM", @progbits, 16 .align 16 -mask1: -.octa 0x80808080808080808080808080808080 +.Lmask1: + .octa 0x80808080808080808080808080808080 .section .rodata.cst16.mask2, "aM", @progbits, 16 .align 16 -mask2: -.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF +.Lmask2: + .octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF + +.section .rodata.cst16.bswap_mask, "aM", @progbits, 16 +.align 16 +.Lbswap_mask: + .octa 0x000102030405060708090A0B0C0D0E0F -.section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16 +.section .rodata.cst32.byteshift_table, "aM", @progbits, 32 .align 16 -SHUF_MASK: -.octa 0x000102030405060708090A0B0C0D0E0F - -.section .rodata.cst32.pshufb_shf_table, "aM", @progbits, 32 -.align 32 -pshufb_shf_table: -# use these values for shift constants for the pshufb instruction -# different alignments result in values as shown: -# DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 -# DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 -# DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 -# DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 -# DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 -# DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 -# DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 -# DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 -# DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 -# DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 -# DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 -# DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 -# DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 -# DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 -# DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 -.octa 0x8f8e8d8c8b8a89888786858483828100 -.octa 0x000e0d0c0b0a09080706050403020100 +# For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - len] +# is the index vector to shift left by 'len' bytes, and is also {0x80, ..., +# 0x80} XOR the index vector to shift right by '16 - len' bytes. +.Lbyteshift_table: + .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 + .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f + .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 + .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0 diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c @@ -33,18 +33,12 @@ #include <asm/cpufeatures.h> #include <asm/cpu_device_id.h> -asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf, - size_t len); +asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len); struct chksum_desc_ctx { __u16 crc; }; -/* - * Steps through buffer one byte at at time, calculates reflected - * crc using table. - */ - static int chksum_init(struct shash_desc *desc) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); @@ -59,7 +53,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data, { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - if (irq_fpu_usable()) { + if (length >= 16 && irq_fpu_usable()) { kernel_fpu_begin(); ctx->crc = crc_t10dif_pcl(ctx->crc, data, length); kernel_fpu_end(); @@ -79,7 +73,7 @@ static int chksum_final(struct shash_desc *desc, u8 *out) static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, u8 *out) { - if (irq_fpu_usable()) { + if (len >= 16 && irq_fpu_usable()) { kernel_fpu_begin(); *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len); kernel_fpu_end(); diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c @@ -85,31 +85,20 @@ static void crypto_morus1280_glue_process_ad( static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state, struct morus1280_ops ops, - struct aead_request *req) + struct skcipher_walk *walk) { - struct skcipher_walk walk; - u8 *cursor_src, *cursor_dst; - unsigned int chunksize, base; - - ops.skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - cursor_src = walk.src.virt.addr; - cursor_dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); - - base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1); - cursor_src += base; - cursor_dst += base; - chunksize &= MORUS1280_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops.crypt_tail(state, cursor_src, cursor_dst, - chunksize); + while (walk->nbytes >= MORUS1280_BLOCK_SIZE) { + ops.crypt_blocks(state, walk->src.virt.addr, + walk->dst.virt.addr, + round_down(walk->nbytes, + MORUS1280_BLOCK_SIZE)); + skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr, + walk->nbytes); + skcipher_walk_done(walk, 0); } } @@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(struct aead_request *req, struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); struct morus1280_state state; + struct skcipher_walk walk; + + ops.skcipher_walk_init(&walk, req, true); kernel_fpu_begin(); ctx->ops->init(&state, &ctx->key, req->iv); crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); - crypto_morus1280_glue_process_crypt(&state, ops, req); + crypto_morus1280_glue_process_crypt(&state, ops, &walk); ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c @@ -85,31 +85,19 @@ static void crypto_morus640_glue_process_ad( static void crypto_morus640_glue_process_crypt(struct morus640_state *state, struct morus640_ops ops, - struct aead_request *req) + struct skcipher_walk *walk) { - struct skcipher_walk walk; - u8 *cursor_src, *cursor_dst; - unsigned int chunksize, base; - - ops.skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - cursor_src = walk.src.virt.addr; - cursor_dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); - - base = chunksize & ~(MORUS640_BLOCK_SIZE - 1); - cursor_src += base; - cursor_dst += base; - chunksize &= MORUS640_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops.crypt_tail(state, cursor_src, cursor_dst, - chunksize); + while (walk->nbytes >= MORUS640_BLOCK_SIZE) { + ops.crypt_blocks(state, walk->src.virt.addr, + walk->dst.virt.addr, + round_down(walk->nbytes, MORUS640_BLOCK_SIZE)); + skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr, + walk->nbytes); + skcipher_walk_done(walk, 0); } } @@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(struct aead_request *req, struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct morus640_ctx *ctx = crypto_aead_ctx(tfm); struct morus640_state state; + struct skcipher_walk walk; + + ops.skcipher_walk_init(&walk, req, true); kernel_fpu_begin(); ctx->ops->init(&state, &ctx->key, req->iv); crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); - crypto_morus640_glue_process_crypt(&state, ops, req); + crypto_morus640_glue_process_crypt(&state, ops, &walk); ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S @@ -272,6 +272,10 @@ ENTRY(poly1305_block_sse2) dec %rcx jnz .Ldoblock + # Zeroing of key material + mov %rcx,0x00(%rsp) + mov %rcx,0x08(%rsp) + add $0x10,%rsp pop %r12 pop %rbx diff --git a/crypto/Kconfig b/crypto/Kconfig @@ -168,6 +168,16 @@ config CRYPTO_MANAGER_DISABLE_TESTS Disable run-time self tests that normally take place at algorithm registration. +config CRYPTO_MANAGER_EXTRA_TESTS + bool "Enable extra run-time crypto self tests" + depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS + help + Enable extra run-time self tests of registered crypto algorithms, + including randomized fuzz tests. + + This is intended for developer use only, as these tests take much + longer to run than the normal self tests. + config CRYPTO_GF128MUL tristate "GF(2^128) multiplication functions" help @@ -642,7 +652,7 @@ config CRYPTO_CRC32_PCLMUL From Intel Westmere and AMD Bulldozer processor with SSE4.2 and PCLMULQDQ supported, the processor will support CRC32 PCLMULQDQ implementation using hardware accelerated PCLMULQDQ - instruction. This option will create 'crc32-plcmul' module, + instruction. This option will create 'crc32-pclmul' module, which will enable any routine to use the CRC-32-IEEE 802.3 checksum and gain better performance as compared with the table implementation. @@ -671,7 +681,7 @@ config CRYPTO_CRCT10DIF_PCLMUL For x86_64 processors with SSE4.2 and PCLMULQDQ supported, CRC T10 DIF PCLMULQDQ computation can be hardware accelerated PCLMULQDQ instruction. This option will create - 'crct10dif-plcmul' module, which is faster when computing the + 'crct10dif-pclmul' module, which is faster when computing the crct10dif checksum as compared with the generic table implementation. config CRYPTO_CRCT10DIF_VPMSUM diff --git a/crypto/aead.c b/crypto/aead.c @@ -61,8 +61,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm, else err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen); - if (err) + if (unlikely(err)) { + crypto_aead_set_flags(tfm, CRYPTO_TFM_NEED_KEY); return err; + } crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; diff --git a/crypto/aegis.h b/crypto/aegis.h @@ -1,14 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * AEGIS common definitions * * Copyright (c) 2018 Ondrej Mosnacek <omosnacek@gmail.com> * Copyright (c) 2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. */ #ifndef _CRYPTO_AEGIS_H diff --git a/crypto/aegis128.c b/crypto/aegis128.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * The AEGIS-128 Authenticated-Encryption Algorithm * * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. */ #include <crypto/algapi.h> @@ -290,19 +286,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state, const struct aegis128_ops *ops) { struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - ops->crypt_chunk(state, dst, src, chunksize); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); - skcipher_walk_done(&walk, 0); + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * The AEGIS-128L Authenticated-Encryption Algorithm * * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. */ #include <crypto/algapi.h> @@ -353,19 +349,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state, const struct aegis128l_ops *ops) { struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - ops->crypt_chunk(state, dst, src, chunksize); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); - skcipher_walk_done(&walk, 0); + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } diff --git a/crypto/aegis256.c b/crypto/aegis256.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * The AEGIS-256 Authenticated-Encryption Algorithm * * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. */ #include <crypto/algapi.h> @@ -303,19 +299,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state, const struct aegis256_ops *ops) { struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - ops->crypt_chunk(state, dst, src, chunksize); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); - skcipher_walk_done(&walk, 0); + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } diff --git a/crypto/af_alg.c b/crypto/af_alg.c @@ -304,8 +304,6 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) if (err) goto unlock; - sk2->sk_family = PF_ALG; - if (nokey || !ask->refcnt++) sock_hold(sk); ask->nokey_refcnt += nokey; @@ -382,7 +380,6 @@ static int alg_create(struct net *net, struct socket *sock, int protocol, sock->ops = &alg_proto_ops; sock_init_data(sock, sk); - sk->sk_family = PF_ALG; sk->sk_destruct = alg_sock_destruct; return 0; @@ -427,12 +424,12 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len) } EXPORT_SYMBOL_GPL(af_alg_make_sg); -void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new) +static void af_alg_link_sg(struct af_alg_sgl *sgl_prev, + struct af_alg_sgl *sgl_new) { sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg); } -EXPORT_SYMBOL_GPL(af_alg_link_sg); void af_alg_free_sg(struct af_alg_sgl *sgl) { @@ -443,7 +440,7 @@ void af_alg_free_sg(struct af_alg_sgl *sgl) } EXPORT_SYMBOL_GPL(af_alg_free_sg); -int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) +static int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) { struct cmsghdr *cmsg; @@ -482,7 +479,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) return 0; } -EXPORT_SYMBOL_GPL(af_alg_cmsg_send); /** * af_alg_alloc_tsgl - allocate the TX SGL @@ -490,7 +486,7 @@ EXPORT_SYMBOL_GPL(af_alg_cmsg_send); * @sk socket of connection to user space * @return: 0 upon success, < 0 upon error */ -int af_alg_alloc_tsgl(struct sock *sk) +static int af_alg_alloc_tsgl(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; @@ -519,7 +515,6 @@ int af_alg_alloc_tsgl(struct sock *sk) return 0; } -EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl); /** * aead_count_tsgl - Count number of TX SG entries @@ -534,17 +529,17 @@ EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl); */ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) { - struct alg_sock *ask = alg_sk(sk); - struct af_alg_ctx *ctx = ask->private; - struct af_alg_tsgl *sgl, *tmp; + const struct alg_sock *ask = alg_sk(sk); + const struct af_alg_ctx *ctx = ask->private; + const struct af_alg_tsgl *sgl; unsigned int i; unsigned int sgl_count = 0; if (!bytes) return 0; - list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) { - struct scatterlist *sg = sgl->sg; + list_for_each_entry(sgl, &ctx->tsgl_list, list) { + const struct scatterlist *sg = sgl->sg; for (i = 0; i < sgl->cur; i++) { size_t bytes_count; @@ -642,8 +637,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, } list_del(&sgl->list); - sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * - (MAX_SGL_ENTS + 1)); + sock_kfree_s(sk, sgl, struct_size(sgl, sg, MAX_SGL_ENTS + 1)); } if (!ctx->used) @@ -656,7 +650,7 @@ EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); * * @areq Request holding the TX and RX SGL */ -void af_alg_free_areq_sgls(struct af_alg_async_req *areq) +static void af_alg_free_areq_sgls(struct af_alg_async_req *areq) { struct sock *sk = areq->sk; struct alg_sock *ask = alg_sk(sk); @@ -685,7 +679,6 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq) sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); } } -EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); /** * af_alg_wait_for_wmem - wait for availability of writable memory @@ -694,7 +687,7 @@ EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); * @flags If MSG_DONTWAIT is set, then only report if function would sleep * @return 0 when writable memory is available, < 0 upon error */ -int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) +static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) { DEFINE_WAIT_FUNC(wait, woken_wake_function); int err = -ERESTARTSYS; @@ -719,7 +712,6 @@ int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) return err; } -EXPORT_SYMBOL_GPL(af_alg_wait_for_wmem); /** * af_alg_wmem_wakeup - wakeup caller when writable memory is available @@ -788,8 +780,7 @@ EXPORT_SYMBOL_GPL(af_alg_wait_for_data); * * @sk socket of connection to user space */ - -void af_alg_data_wakeup(struct sock *sk) +static void af_alg_data_wakeup(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; @@ -807,7 +798,6 @@ void af_alg_data_wakeup(struct sock *sk) sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } -EXPORT_SYMBOL_GPL(af_alg_data_wakeup); /** * af_alg_sendmsg - implementation of sendmsg system call handler diff --git a/crypto/ahash.c b/crypto/ahash.c @@ -86,17 +86,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) { unsigned int alignmask = walk->alignmask; - unsigned int nbytes = walk->entrylen; walk->data -= walk->offset; - if (nbytes && walk->offset & alignmask && !err) { - walk->offset = ALIGN(walk->offset, alignmask + 1); - nbytes = min(nbytes, - ((unsigned int)(PAGE_SIZE)) - walk->offset); - walk->entrylen -= nbytes; + if (walk->entrylen && (walk->offset & alignmask) && !err) { + unsigned int nbytes; + walk->offset = ALIGN(walk->offset, alignmask + 1); + nbytes = min(walk->entrylen, + (unsigned int)(PAGE_SIZE - walk->offset)); if (nbytes) { + walk->entrylen -= nbytes; walk->data += walk->offset; return nbytes; } @@ -116,7 +116,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) if (err) return err; - if (nbytes) { + if (walk->entrylen) { walk->offset = 0; walk->pg++; return hash_walk_next(walk); @@ -190,6 +190,21 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, return ret; } +static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + return -ENOSYS; +} + +static void ahash_set_needkey(struct crypto_ahash *tfm) +{ + const struct hash_alg_common *alg = crypto_hash_alg_common(tfm); + + if (tfm->setkey != ahash_nosetkey && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); +} + int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -201,20 +216,16 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, else err = tfm->setkey(tfm, key, keylen); - if (err) + if (unlikely(err)) { + ahash_set_needkey(tfm); return err; + } crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); -static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - return -ENOSYS; -} - static inline unsigned int ahash_align_buffer_size(unsigned len, unsigned long mask) { @@ -489,8 +500,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) if (alg->setkey) { hash->setkey = alg->setkey; - if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) - crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY); + ahash_set_needkey(hash); } return 0; diff --git a/crypto/algapi.c b/crypto/algapi.c @@ -494,6 +494,24 @@ out: } EXPORT_SYMBOL_GPL(crypto_register_template); +int crypto_register_templates(struct crypto_template *tmpls, int count) +{ + int i, err; + + for (i = 0; i < count; i++) { + err = crypto_register_template(&tmpls[i]); + if (err) + goto out; + } + return 0; + +out: + for (--i; i >= 0; --i) + crypto_unregister_template(&tmpls[i]); + return err; +} +EXPORT_SYMBOL_GPL(crypto_register_templates); + void crypto_unregister_template(struct crypto_template *tmpl) { struct crypto_instance *inst; @@ -523,6 +541,15 @@ void crypto_unregister_template(struct crypto_template *tmpl) } EXPORT_SYMBOL_GPL(crypto_unregister_template); +void crypto_unregister_templates(struct crypto_template *tmpls, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_template(&tmpls[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_templates); + static struct crypto_template *__crypto_lookup_template(const char *name) { struct crypto_template *q, *tmpl = NULL; @@ -608,6 +635,9 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, { int err = -EAGAIN; + if (WARN_ON_ONCE(inst == NULL)) + return -EINVAL; + spawn->inst = inst; spawn->mask = mask; @@ -845,8 +875,8 @@ int crypto_inst_setname(struct crypto_instance *inst, const char *name, } EXPORT_SYMBOL_GPL(crypto_inst_setname); -void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, - unsigned int head) +void *crypto_alloc_instance(const char *name, struct crypto_alg *alg, + unsigned int head) { struct crypto_instance *inst; char *p; @@ -869,35 +899,6 @@ err_free_inst: kfree(p); return ERR_PTR(err); } -EXPORT_SYMBOL_GPL(crypto_alloc_instance2); - -struct crypto_instance *crypto_alloc_instance(const char *name, - struct crypto_alg *alg) -{ - struct crypto_instance *inst; - struct crypto_spawn *spawn; - int err; - - inst = crypto_alloc_instance2(name, alg, 0); - if (IS_ERR(inst)) - goto out; - - spawn = crypto_instance_ctx(inst); - err = crypto_init_spawn(spawn, alg, inst, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); - - if (err) - goto err_free_inst; - - return inst; - -err_free_inst: - kfree(inst); - inst = ERR_PTR(err); - -out: - return inst; -} EXPORT_SYMBOL_GPL(crypto_alloc_instance); void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) diff --git a/crypto/arc4.c b/crypto/arc4.c @@ -12,14 +12,11 @@ * */ -#include <linux/module.h> -#include <linux/init.h> -#include <linux/crypto.h> #include <crypto/algapi.h> - -#define ARC4_MIN_KEY_SIZE 1 -#define ARC4_MAX_KEY_SIZE 256 -#define ARC4_BLOCK_SIZE 1 +#include <crypto/arc4.h> +#include <crypto/internal/skcipher.h> +#include <linux/init.h> +#include <linux/module.h> struct arc4_ctx { u32 S[256]; @@ -50,6 +47,12 @@ static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, return 0; } +static int arc4_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + return arc4_set_key(&tfm->base, in_key, key_len); +} + static void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len) { @@ -92,30 +95,25 @@ static void arc4_crypt_one(struct crypto_tfm *tfm, u8 *out, const u8 *in) arc4_crypt(crypto_tfm_ctx(tfm), out, in, 1); } -static int ecb_arc4_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ecb_arc4_crypt(struct skcipher_request *req) { - struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes > 0) { - u8 *wsrc = walk.src.virt.addr; - u8 *wdst = walk.dst.virt.addr; - - arc4_crypt(ctx, wdst, wsrc, walk.nbytes); - - err = blkcipher_walk_done(desc, &walk, 0); + arc4_crypt(ctx, walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes); + err = skcipher_walk_done(&walk, 0); } return err; } -static struct crypto_alg arc4_algs[2] = { { +static struct crypto_alg arc4_cipher = { .cra_name = "arc4", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = ARC4_BLOCK_SIZE, @@ -130,34 +128,39 @@ static struct crypto_alg arc4_algs[2] = { { .cia_decrypt = arc4_crypt_one, }, }, -}, { - .cra_name = "ecb(arc4)", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = ARC4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct arc4_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = ARC4_MIN_KEY_SIZE, - .max_keysize = ARC4_MAX_KEY_SIZE, - .setkey = arc4_set_key, - .encrypt = ecb_arc4_crypt, - .decrypt = ecb_arc4_crypt, - }, - }, -} }; +}; + +static struct skcipher_alg arc4_skcipher = { + .base.cra_name = "ecb(arc4)", + .base.cra_priority = 100, + .base.cra_blocksize = ARC4_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct arc4_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = ARC4_MIN_KEY_SIZE, + .max_keysize = ARC4_MAX_KEY_SIZE, + .setkey = arc4_set_key_skcipher, + .encrypt = ecb_arc4_crypt, + .decrypt = ecb_arc4_crypt, +}; static int __init arc4_init(void) { - return crypto_register_algs(arc4_algs, ARRAY_SIZE(arc4_algs)); + int err; + + err = crypto_register_alg(&arc4_cipher); + if (err) + return err; + + err = crypto_register_skcipher(&arc4_skcipher); + if (err) + crypto_unregister_alg(&arc4_cipher); + return err; } static void __exit arc4_exit(void) { - crypto_unregister_algs(arc4_algs, ARRAY_SIZE(arc4_algs)); + crypto_unregister_alg(&arc4_cipher); + crypto_unregister_skcipher(&arc4_skcipher); } module_init(arc4_init); diff --git a/crypto/cbc.c b/crypto/cbc.c @@ -18,34 +18,11 @@ #include <linux/kernel.h> #include <linux/log2.h> #include <linux/module.h> -#include <linux/slab.h> - -struct crypto_cbc_ctx { - struct crypto_cipher *child; -}; - -static int crypto_cbc_setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) -{ - struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_cipher *child = ctx->child; - int err; - - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) { - struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_cipher_encrypt_one(ctx->child, dst, src); + crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src); } static int crypto_cbc_encrypt(struct skcipher_request *req) @@ -56,9 +33,7 @@ static int crypto_cbc_encrypt(struct skcipher_request *req) static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) { - struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_cipher_decrypt_one(ctx->child, dst, src); + crypto_cipher_decrypt_one(skcipher_cipher_simple(tfm), dst, src); } static int crypto_cbc_decrypt(struct skcipher_request *req) @@ -78,113 +53,33 @@ static int crypto_cbc_decrypt(struct skcipher_request *req) return err; } -static int crypto_cbc_init_tfm(struct crypto_skcipher *tfm) -{ - struct skcipher_instance *inst = skcipher_alg_instance(tfm); - struct crypto_spawn *spawn = skcipher_instance_ctx(inst); - struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - return 0; -} - -static void crypto_cbc_exit_tfm(struct crypto_skcipher *tfm) -{ - struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_cipher(ctx->child); -} - -static void crypto_cbc_free(struct skcipher_instance *inst) -{ - crypto_drop_skcipher(skcipher_instance_ctx(inst)); - kfree(inst); -} - static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_attr_type *algt; - struct crypto_spawn *spawn; struct crypto_alg *alg; - u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); - if (err) - return err; - - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return -ENOMEM; - - algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); - if (IS_ERR(algt)) - goto err_free_inst; - - mask = CRYPTO_ALG_TYPE_MASK | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); - - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask); - err = PTR_ERR(alg); - if (IS_ERR(alg)) - goto err_free_inst; - - spawn = skcipher_instance_ctx(inst); - err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); - if (err) - goto err_put_alg; - - err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg); - if (err) - goto err_drop_spawn; + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + if (IS_ERR(inst)) + return PTR_ERR(inst); err = -EINVAL; if (!is_power_of_2(alg->cra_blocksize)) - goto err_drop_spawn; - - inst->alg.base.cra_priority = alg->cra_priority; - inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask; - - inst->alg.ivsize = alg->cra_blocksize; - inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; - - inst->alg.base.cra_ctxsize = sizeof(struct crypto_cbc_ctx); - - inst->alg.init = crypto_cbc_init_tfm; - inst->alg.exit = crypto_cbc_exit_tfm; + goto out_free_inst; - inst->alg.setkey = crypto_cbc_setkey; inst->alg.encrypt = crypto_cbc_encrypt; inst->alg.decrypt = crypto_cbc_decrypt; - inst->free = crypto_cbc_free; - err = skcipher_register_instance(tmpl, inst); if (err) - goto err_drop_spawn; - crypto_mod_put(alg); + goto out_free_inst; + goto out_put_alg; -out: - return err; - -err_drop_spawn: - crypto_drop_spawn(spawn); -err_put_alg: +out_free_inst: + inst->free(inst); +out_put_alg: crypto_mod_put(alg); -err_free_inst: - kfree(inst); - goto out; + return err; } static struct crypto_template crypto_cbc_tmpl = { @@ -207,5 +102,5 @@ module_init(crypto_cbc_module_init); module_exit(crypto_cbc_module_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("CBC block cipher algorithm"); +MODULE_DESCRIPTION("CBC block cipher mode of operation"); MODULE_ALIAS_CRYPTO("cbc"); diff --git a/crypto/ccm.c b/crypto/ccm.c @@ -589,12 +589,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb) mac_name); } -static struct crypto_template crypto_ccm_tmpl = { - .name = "ccm", - .create = crypto_ccm_create, - .module = THIS_MODULE, -}; - static int crypto_ccm_base_create(struct crypto_template *tmpl, struct rtattr **tb) { @@ -618,12 +612,6 @@ static int crypto_ccm_base_create(struct crypto_template *tmpl, cipher_name); } -static struct crypto_template crypto_ccm_base_tmpl = { - .name = "ccm_base", - .create = crypto_ccm_base_create, - .module = THIS_MODULE, -}; - static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { @@ -854,12 +842,6 @@ out_free_inst: goto out; } -static struct crypto_template crypto_rfc4309_tmpl = { - .name = "rfc4309", - .create = crypto_rfc4309_create, - .module = THIS_MODULE, -}; - static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { @@ -999,51 +981,37 @@ out_put_alg: return err; } -static struct crypto_template crypto_cbcmac_tmpl = { - .name = "cbcmac", - .create = cbcmac_create, - .free = shash_free_instance, - .module = THIS_MODULE, +static struct crypto_template crypto_ccm_tmpls[] = { + { + .name = "cbcmac", + .create = cbcmac_create, + .free = shash_free_instance, + .module = THIS_MODULE, + }, { + .name = "ccm_base", + .create = crypto_ccm_base_create, + .module = THIS_MODULE, + }, { + .name = "ccm", + .create = crypto_ccm_create, + .module = THIS_MODULE, + }, { + .name = "rfc4309", + .create = crypto_rfc4309_create, + .module = THIS_MODULE, + }, }; static int __init crypto_ccm_module_init(void) { - int err; - - err = crypto_register_template(&crypto_cbcmac_tmpl); - if (err) - goto out; - - err = crypto_register_template(&crypto_ccm_base_tmpl); - if (err) - goto out_undo_cbcmac; - - err = crypto_register_template(&crypto_ccm_tmpl); - if (err) - goto out_undo_base; - - err = crypto_register_template(&crypto_rfc4309_tmpl); - if (err) - goto out_undo_ccm; - -out: - return err; - -out_undo_ccm: - crypto_unregister_template(&crypto_ccm_tmpl); -out_undo_base: - crypto_unregister_template(&crypto_ccm_base_tmpl); -out_undo_cbcmac: - crypto_register_template(&crypto_cbcmac_tmpl); - goto out; + return crypto_register_templates(crypto_ccm_tmpls, + ARRAY_SIZE(crypto_ccm_tmpls)); } static void __exit crypto_ccm_module_exit(void) { - crypto_unregister_template(&crypto_rfc4309_tmpl); - crypto_unregister_template(&crypto_ccm_tmpl); - crypto_unregister_template(&crypto_ccm_base_tmpl); - crypto_unregister_template(&crypto_cbcmac_tmpl); + crypto_unregister_templates(crypto_ccm_tmpls, + ARRAY_SIZE(crypto_ccm_tmpls)); } module_init(crypto_ccm_module_init); diff --git a/crypto/cfb.c b/crypto/cfb.c @@ -25,28 +25,17 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/slab.h> #include <linux/string.h> -#include <linux/types.h> - -struct crypto_cfb_ctx { - struct crypto_cipher *child; -}; static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm) { - struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; - - return crypto_cipher_blocksize(child); + return crypto_cipher_blocksize(skcipher_cipher_simple(tfm)); } static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) { - struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_cipher_encrypt_one(ctx->child, dst, src); + crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src); } /* final encrypt and decrypt is the same */ @@ -77,12 +66,14 @@ static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk, do { crypto_cfb_encrypt_one(tfm, iv, dst); crypto_xor(dst, src, bsize); - memcpy(iv, dst, bsize); + iv = dst; src += bsize; dst += bsize; } while ((nbytes -= bsize) >= bsize); + memcpy(walk->iv, iv, bsize); + return nbytes; } @@ -162,7 +153,7 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk, const unsigned int bsize = crypto_cfb_bsize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; u8 tmp[MAX_CIPHER_BLOCKSIZE]; do { @@ -172,8 +163,6 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk, src += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); - return nbytes; } @@ -186,22 +175,6 @@ static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk, return crypto_cfb_decrypt_segment(walk, tfm); } -static int crypto_cfb_setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) -{ - struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_cipher *child = ctx->child; - int err; - - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} - static int crypto_cfb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -224,110 +197,34 @@ static int crypto_cfb_decrypt(struct skcipher_request *req) return err; } -static int crypto_cfb_init_tfm(struct crypto_skcipher *tfm) -{ - struct skcipher_instance *inst = skcipher_alg_instance(tfm); - struct crypto_spawn *spawn = skcipher_instance_ctx(inst); - struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - return 0; -} - -static void crypto_cfb_exit_tfm(struct crypto_skcipher *tfm) -{ - struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_cipher(ctx->child); -} - -static void crypto_cfb_free(struct skcipher_instance *inst) -{ - crypto_drop_skcipher(skcipher_instance_ctx(inst)); - kfree(inst); -} - static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_attr_type *algt; - struct crypto_spawn *spawn; struct crypto_alg *alg; - u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); - if (err) - return err; + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + if (IS_ERR(inst)) + return PTR_ERR(inst); - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return -ENOMEM; - - algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); - if (IS_ERR(algt)) - goto err_free_inst; - - mask = CRYPTO_ALG_TYPE_MASK | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); - - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask); - err = PTR_ERR(alg); - if (IS_ERR(alg)) - goto err_free_inst; - - spawn = skcipher_instance_ctx(inst); - err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); - if (err) - goto err_put_alg; - - err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg); - if (err) - goto err_drop_spawn; - - inst->alg.base.cra_priority = alg->cra_priority; - /* we're a stream cipher independend of the crypto cra_blocksize */ + /* CFB mode is a stream cipher. */ inst->alg.base.cra_blocksize = 1; - inst->alg.base.cra_alignmask = alg->cra_alignmask; - - inst->alg.ivsize = alg->cra_blocksize; - inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; - - inst->alg.base.cra_ctxsize = sizeof(struct crypto_cfb_ctx); - inst->alg.init = crypto_cfb_init_tfm; - inst->alg.exit = crypto_cfb_exit_tfm; + /* + * To simplify the implementation, configure the skcipher walk to only + * give a partial block at the very end, never earlier. + */ + inst->alg.chunksize = alg->cra_blocksize; - inst->alg.setkey = crypto_cfb_setkey; inst->alg.encrypt = crypto_cfb_encrypt; inst->alg.decrypt = crypto_cfb_decrypt; - inst->free = crypto_cfb_free; - err = skcipher_register_instance(tmpl, inst); if (err) - goto err_drop_spawn; - crypto_mod_put(alg); + inst->free(inst); -out: - return err; - -err_drop_spawn: - crypto_drop_spawn(spawn); -err_put_alg: crypto_mod_put(alg); -err_free_inst: - kfree(inst); - goto out; + return err; } static struct crypto_template crypto_cfb_tmpl = { @@ -350,5 +247,5 @@ module_init(crypto_cfb_module_init); module_exit(crypto_cfb_module_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("CFB block cipher algorithm"); +MODULE_DESCRIPTION("CFB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("cfb"); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c @@ -701,37 +701,28 @@ static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb) return chachapoly_create(tmpl, tb, "rfc7539esp", 8); } -static struct crypto_template rfc7539_tmpl = { - .name = "rfc7539", - .create = rfc7539_create, - .module = THIS_MODULE, -}; - -static struct crypto_template rfc7539esp_tmpl = { - .name = "rfc7539esp", - .create = rfc7539esp_create, - .module = THIS_MODULE, +static struct crypto_template rfc7539_tmpls[] = { + { + .name = "rfc7539", + .create = rfc7539_create, + .module = THIS_MODULE, + }, { + .name = "rfc7539esp", + .create = rfc7539esp_create, + .module = THIS_MODULE, + }, }; static int __init chacha20poly1305_module_init(void) { - int err; - - err = crypto_register_template(&rfc7539_tmpl); - if (err) - return err; - - err = crypto_register_template(&rfc7539esp_tmpl); - if (err) - crypto_unregister_template(&rfc7539_tmpl); - - return err; + return crypto_register_templates(rfc7539_tmpls, + ARRAY_SIZE(rfc7539_tmpls)); } static void __exit chacha20poly1305_module_exit(void) { - crypto_unregister_template(&rfc7539esp_tmpl); - crypto_unregister_template(&rfc7539_tmpl); + crypto_unregister_templates(rfc7539_tmpls, + ARRAY_SIZE(rfc7539_tmpls)); } module_init(chacha20poly1305_module_init); diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c @@ -65,6 +65,10 @@ static int null_hash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { return 0; } +static int null_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ return 0; } + static int null_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { return 0; } @@ -74,21 +78,18 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) memcpy(dst, src, NULL_BLOCK_SIZE); } -static int skcipher_null_crypt(struct blkcipher_desc *desc, - struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int null_skcipher_crypt(struct skcipher_request *req) { - struct blkcipher_walk walk; + struct skcipher_walk walk; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes) { if (walk.src.virt.addr != walk.dst.virt.addr) memcpy(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes); - err = blkcipher_walk_done(desc, &walk, 0); + err = skcipher_walk_done(&walk, 0); } return err; @@ -109,7 +110,22 @@ static struct shash_alg digest_null = { } }; -static struct crypto_alg null_algs[3] = { { +static struct skcipher_alg skcipher_null = { + .base.cra_name = "ecb(cipher_null)", + .base.cra_driver_name = "ecb-cipher_null", + .base.cra_priority = 100, + .base.cra_blocksize = NULL_BLOCK_SIZE, + .base.cra_ctxsize = 0, + .base.cra_module = THIS_MODULE, + .min_keysize = NULL_KEY_SIZE, + .max_keysize = NULL_KEY_SIZE, + .ivsize = NULL_IV_SIZE, + .setkey = null_skcipher_setkey, + .encrypt = null_skcipher_crypt, + .decrypt = null_skcipher_crypt, +}; + +static struct crypto_alg null_algs[] = { { .cra_name = "cipher_null", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = NULL_BLOCK_SIZE, @@ -122,22 +138,6 @@ static struct crypto_alg null_algs[3] = { { .cia_encrypt = null_crypt, .cia_decrypt = null_crypt } } }, { - .cra_name = "ecb(cipher_null)", - .cra_driver_name = "ecb-cipher_null", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = NULL_BLOCK_SIZE, - .cra_type = &crypto_blkcipher_type, - .cra_ctxsize = 0, - .cra_module = THIS_MODULE, - .cra_u = { .blkcipher = { - .min_keysize = NULL_KEY_SIZE, - .max_keysize = NULL_KEY_SIZE, - .ivsize = NULL_IV_SIZE, - .setkey = null_setkey, - .encrypt = skcipher_null_crypt, - .decrypt = skcipher_null_crypt } } -}, { .cra_name = "compress_null", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_blocksize = NULL_BLOCK_SIZE, @@ -199,8 +199,14 @@ static int __init crypto_null_mod_init(void) if (ret < 0) goto out_unregister_algs; + ret = crypto_register_skcipher(&skcipher_null); + if (ret < 0) + goto out_unregister_shash; + return 0; +out_unregister_shash: + crypto_unregister_shash(&digest_null); out_unregister_algs: crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); out: @@ -209,8 +215,9 @@ out: static void __exit crypto_null_mod_fini(void) { - crypto_unregister_shash(&digest_null); crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); + crypto_unregister_shash(&digest_null); + crypto_unregister_skcipher(&skcipher_null); } module_init(crypto_null_mod_init); diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c @@ -20,10 +20,6 @@ #define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x)) -static DEFINE_MUTEX(crypto_cfg_mutex); - -extern struct sock *crypto_nlsk; - struct crypto_dump_info { struct sk_buff *in_skb; struct sk_buff *out_skb; diff --git a/crypto/ctr.c b/crypto/ctr.c @@ -17,14 +17,8 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/random.h> -#include <linux/scatterlist.h> #include <linux/slab.h> -struct crypto_ctr_ctx { - struct crypto_cipher *child; -}; - struct crypto_rfc3686_ctx { struct crypto_skcipher *child; u8 nonce[CTR_RFC3686_NONCE_SIZE]; @@ -35,24 +29,7 @@ struct crypto_rfc3686_req_ctx { struct skcipher_request subreq CRYPTO_MINALIGN_ATTR; }; -static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, - unsigned int keylen) -{ - struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent); - struct crypto_cipher *child = ctx->child; - int err; - - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - - return err; -} - -static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, +static void crypto_ctr_crypt_final(struct skcipher_walk *walk, struct crypto_cipher *tfm) { unsigned int bsize = crypto_cipher_blocksize(tfm); @@ -70,7 +47,7 @@ static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, crypto_inc(ctrblk, bsize); } -static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, +static int crypto_ctr_crypt_segment(struct skcipher_walk *walk, struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = @@ -96,7 +73,7 @@ static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, return nbytes; } -static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, +static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk, struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = @@ -123,138 +100,77 @@ static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, return nbytes; } -static int crypto_ctr_crypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int crypto_ctr_crypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; - unsigned int bsize = crypto_cipher_blocksize(child); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + const unsigned int bsize = crypto_cipher_blocksize(cipher); + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt_block(desc, &walk, bsize); + err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes >= bsize) { if (walk.src.virt.addr == walk.dst.virt.addr) - nbytes = crypto_ctr_crypt_inplace(&walk, child); + nbytes = crypto_ctr_crypt_inplace(&walk, cipher); else - nbytes = crypto_ctr_crypt_segment(&walk, child); + nbytes = crypto_ctr_crypt_segment(&walk, cipher); - err = blkcipher_walk_done(desc, &walk, nbytes); + err = skcipher_walk_done(&walk, nbytes); } if (walk.nbytes) { - crypto_ctr_crypt_final(&walk, child); - err = blkcipher_walk_done(desc, &walk, 0); + crypto_ctr_crypt_final(&walk, cipher); + err = skcipher_walk_done(&walk, 0); } return err; } -static int crypto_ctr_init_tfm(struct crypto_tfm *tfm) -{ - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - - return 0; -} - -static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm) -{ - struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); - - crypto_free_cipher(ctx->child); -} - -static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) +static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_instance *inst; - struct crypto_attr_type *algt; + struct skcipher_instance *inst; struct crypto_alg *alg; - u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); - if (err) - return ERR_PTR(err); - - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - mask = CRYPTO_ALG_TYPE_MASK | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); - - alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, mask); - if (IS_ERR(alg)) - return ERR_CAST(alg); + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + if (IS_ERR(inst)) + return PTR_ERR(inst); /* Block size must be >= 4 bytes. */ err = -EINVAL; if (alg->cra_blocksize < 4) - goto out_put_alg; + goto out_free_inst; /* If this is false we'd fail the alignment of crypto_inc. */ if (alg->cra_blocksize % 4) - goto out_put_alg; - - inst = crypto_alloc_instance("ctr", alg); - if (IS_ERR(inst)) - goto out; - - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = 1; - inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_blkcipher_type; - - inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; - inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; + goto out_free_inst; - inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx); + /* CTR mode is a stream cipher. */ + inst->alg.base.cra_blocksize = 1; - inst->alg.cra_init = crypto_ctr_init_tfm; - inst->alg.cra_exit = crypto_ctr_exit_tfm; + /* + * To simplify the implementation, configure the skcipher walk to only + * give a partial block at the very end, never earlier. + */ + inst->alg.chunksize = alg->cra_blocksize; - inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey; - inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; - inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; + inst->alg.encrypt = crypto_ctr_crypt; + inst->alg.decrypt = crypto_ctr_crypt; -out: - crypto_mod_put(alg); - return inst; + err = skcipher_register_instance(tmpl, inst); + if (err) + goto out_free_inst; + goto out_put_alg; +out_free_inst: + inst->free(inst); out_put_alg: - inst = ERR_PTR(err); - goto out; -} - -static void crypto_ctr_free(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); - kfree(inst); + crypto_mod_put(alg); + return err; } -static struct crypto_template crypto_ctr_tmpl = { - .name = "ctr", - .alloc = crypto_ctr_alloc, - .free = crypto_ctr_free, - .module = THIS_MODULE, -}; - static int crypto_rfc3686_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { @@ -444,42 +360,34 @@ err_free_inst: goto out; } -static struct crypto_template crypto_rfc3686_tmpl = { - .name = "rfc3686", - .create = crypto_rfc3686_create, - .module = THIS_MODULE, +static struct crypto_template crypto_ctr_tmpls[] = { + { + .name = "ctr", + .create = crypto_ctr_create, + .module = THIS_MODULE, + }, { + .name = "rfc3686", + .create = crypto_rfc3686_create, + .module = THIS_MODULE, + }, }; static int __init crypto_ctr_module_init(void) { - int err; - - err = crypto_register_template(&crypto_ctr_tmpl); - if (err) - goto out; - - err = crypto_register_template(&crypto_rfc3686_tmpl); - if (err) - goto out_drop_ctr; - -out: - return err; - -out_drop_ctr: - crypto_unregister_template(&crypto_ctr_tmpl); - goto out; + return crypto_register_templates(crypto_ctr_tmpls, + ARRAY_SIZE(crypto_ctr_tmpls)); } static void __exit crypto_ctr_module_exit(void) { - crypto_unregister_template(&crypto_rfc3686_tmpl); - crypto_unregister_template(&crypto_ctr_tmpl); + crypto_unregister_templates(crypto_ctr_tmpls, + ARRAY_SIZE(crypto_ctr_tmpls)); } module_init(crypto_ctr_module_init); module_exit(crypto_ctr_module_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("CTR Counter block mode"); +MODULE_DESCRIPTION("CTR block cipher mode of operation"); MODULE_ALIAS_CRYPTO("rfc3686"); MODULE_ALIAS_CRYPTO("ctr"); diff --git a/crypto/des_generic.c b/crypto/des_generic.c @@ -789,7 +789,7 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key, /* Expand to tmp */ ret = des_ekey(tmp, key); - if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -866,7 +866,7 @@ int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key, if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && - (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/crypto/ecb.c b/crypto/ecb.c @@ -11,162 +11,83 @@ */ #include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/scatterlist.h> -#include <linux/slab.h> -struct crypto_ecb_ctx { - struct crypto_cipher *child; -}; - -static int crypto_ecb_setkey(struct crypto_tfm *parent, const u8 *key, - unsigned int keylen) -{ - struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(parent); - struct crypto_cipher *child = ctx->child; - int err; - - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} - -static int crypto_ecb_crypt(struct blkcipher_desc *desc, - struct blkcipher_walk *walk, - struct crypto_cipher *tfm, +static int crypto_ecb_crypt(struct skcipher_request *req, + struct crypto_cipher *cipher, void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) { - int bsize = crypto_cipher_blocksize(tfm); + const unsigned int bsize = crypto_cipher_blocksize(cipher); + struct skcipher_walk walk; unsigned int nbytes; int err; - err = blkcipher_walk_virt(desc, walk); + err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk->nbytes)) { - u8 *wsrc = walk->src.virt.addr; - u8 *wdst = walk->dst.virt.addr; + while ((nbytes = walk.nbytes) != 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; do { - fn(crypto_cipher_tfm(tfm), wdst, wsrc); + fn(crypto_cipher_tfm(cipher), dst, src); - wsrc += bsize; - wdst += bsize; + src += bsize; + dst += bsize; } while ((nbytes -= bsize) >= bsize); - err = blkcipher_walk_done(desc, walk, nbytes); + err = skcipher_walk_done(&walk, nbytes); } return err; } -static int crypto_ecb_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int crypto_ecb_encrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return crypto_ecb_crypt(desc, &walk, child, - crypto_cipher_alg(child)->cia_encrypt); -} + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); -static int crypto_ecb_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct blkcipher_walk walk; - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return crypto_ecb_crypt(desc, &walk, child, - crypto_cipher_alg(child)->cia_decrypt); + return crypto_ecb_crypt(req, cipher, + crypto_cipher_alg(cipher)->cia_encrypt); } -static int crypto_ecb_init_tfm(struct crypto_tfm *tfm) +static int crypto_ecb_decrypt(struct skcipher_request *req) { - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_cipher *cipher; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - return 0; + return crypto_ecb_crypt(req, cipher, + crypto_cipher_alg(cipher)->cia_decrypt); } -static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm) +static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm); - crypto_free_cipher(ctx->child); -} - -static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb) -{ - struct crypto_instance *inst; + struct skcipher_instance *inst; struct crypto_alg *alg; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); - if (err) - return ERR_PTR(err); - - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(alg)) - return ERR_CAST(alg); - - inst = crypto_alloc_instance("ecb", alg); + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); if (IS_ERR(inst)) - goto out_put_alg; - - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = alg->cra_blocksize; - inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_blkcipher_type; - - inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; + return PTR_ERR(inst); - inst->alg.cra_ctxsize = sizeof(struct crypto_ecb_ctx); + inst->alg.ivsize = 0; /* ECB mode doesn't take an IV */ - inst->alg.cra_init = crypto_ecb_init_tfm; - inst->alg.cra_exit = crypto_ecb_exit_tfm; + inst->alg.encrypt = crypto_ecb_encrypt; + inst->alg.decrypt = crypto_ecb_decrypt; - inst->alg.cra_blkcipher.setkey = crypto_ecb_setkey; - inst->alg.cra_blkcipher.encrypt = crypto_ecb_encrypt; - inst->alg.cra_blkcipher.decrypt = crypto_ecb_decrypt; - -out_put_alg: + err = skcipher_register_instance(tmpl, inst); + if (err) + inst->free(inst); crypto_mod_put(alg); - return inst; -} - -static void crypto_ecb_free(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); - kfree(inst); + return err; } static struct crypto_template crypto_ecb_tmpl = { .name = "ecb", - .alloc = crypto_ecb_alloc, - .free = crypto_ecb_free, + .create = crypto_ecb_create, .module = THIS_MODULE, }; @@ -184,5 +105,5 @@ module_init(crypto_ecb_module_init); module_exit(crypto_ecb_module_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("ECB block cipher algorithm"); +MODULE_DESCRIPTION("ECB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("ecb"); diff --git a/crypto/gcm.c b/crypto/gcm.c @@ -247,7 +247,7 @@ static int gcm_hash_len(struct aead_request *req, u32 flags) struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct ahash_request *ahreq = &pctx->u.ahreq; struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; - u128 lengths; + be128 lengths; lengths.a = cpu_to_be64(req->assoclen * 8); lengths.b = cpu_to_be64(gctx->cryptlen * 8); @@ -727,12 +727,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb) ctr_name, "ghash"); } -static struct crypto_template crypto_gcm_tmpl = { - .name = "gcm", - .create = crypto_gcm_create, - .module = THIS_MODULE, -}; - static int crypto_gcm_base_create(struct crypto_template *tmpl, struct rtattr **tb) { @@ -756,12 +750,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl, ctr_name, ghash_name); } -static struct crypto_template crypto_gcm_base_tmpl = { - .name = "gcm_base", - .create = crypto_gcm_base_create, - .module = THIS_MODULE, -}; - static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { @@ -989,12 +977,6 @@ out_free_inst: goto out; } -static struct crypto_template crypto_rfc4106_tmpl = { - .name = "rfc4106", - .create = crypto_rfc4106_create, - .module = THIS_MODULE, -}; - static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { @@ -1231,10 +1213,24 @@ out_free_inst: goto out; } -static struct crypto_template crypto_rfc4543_tmpl = { - .name = "rfc4543", - .create = crypto_rfc4543_create, - .module = THIS_MODULE, +static struct crypto_template crypto_gcm_tmpls[] = { + { + .name = "gcm_base", + .create = crypto_gcm_base_create, + .module = THIS_MODULE, + }, { + .name = "gcm", + .create = crypto_gcm_create, + .module = THIS_MODULE, + }, { + .name = "rfc4106", + .create = crypto_rfc4106_create, + .module = THIS_MODULE, + }, { + .name = "rfc4543", + .create = crypto_rfc4543_create, + .module = THIS_MODULE, + }, }; static int __init crypto_gcm_module_init(void) @@ -1247,42 +1243,19 @@ static int __init crypto_gcm_module_init(void) sg_init_one(&gcm_zeroes->sg, gcm_zeroes->buf, sizeof(gcm_zeroes->buf)); - err = crypto_register_template(&crypto_gcm_base_tmpl); - if (err) - goto out; - - err = crypto_register_template(&crypto_gcm_tmpl); + err = crypto_register_templates(crypto_gcm_tmpls, + ARRAY_SIZE(crypto_gcm_tmpls)); if (err) - goto out_undo_base; + kfree(gcm_zeroes); - err = crypto_register_template(&crypto_rfc4106_tmpl); - if (err) - goto out_undo_gcm; - - err = crypto_register_template(&crypto_rfc4543_tmpl); - if (err) - goto out_undo_rfc4106; - - return 0; - -out_undo_rfc4106: - crypto_unregister_template(&crypto_rfc4106_tmpl); -out_undo_gcm: - crypto_unregister_template(&crypto_gcm_tmpl); -out_undo_base: - crypto_unregister_template(&crypto_gcm_base_tmpl); -out: - kfree(gcm_zeroes); return err; } static void __exit crypto_gcm_module_exit(void) { kfree(gcm_zeroes); - crypto_unregister_template(&crypto_rfc4543_tmpl); - crypto_unregister_template(&crypto_rfc4106_tmpl); - crypto_unregister_template(&crypto_gcm_tmpl); - crypto_unregister_template(&crypto_gcm_base_tmpl); + crypto_unregister_templates(crypto_gcm_tmpls, + ARRAY_SIZE(crypto_gcm_tmpls)); } module_init(crypto_gcm_module_init); diff --git a/crypto/keywrap.c b/crypto/keywrap.c @@ -56,7 +56,7 @@ * u8 *iv = data; * u8 *pt = data + crypto_skcipher_ivsize(tfm); * <ensure that pt contains the plaintext of size ptlen> - * sg_init_one(&sg, ptdata, ptlen); + * sg_init_one(&sg, pt, ptlen); * skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv); * * ==> After encryption, data now contains full KW result as per SP800-38F. @@ -70,8 +70,8 @@ * u8 *iv = data; * u8 *ct = data + crypto_skcipher_ivsize(tfm); * unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm); - * sg_init_one(&sg, ctdata, ctlen); - * skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv); + * sg_init_one(&sg, ct, ctlen); + * skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv); * * ==> After decryption (which hopefully does not return EBADMSG), the ct * pointer now points to the plaintext of size ctlen. @@ -87,10 +87,6 @@ #include <crypto/scatterwalk.h> #include <crypto/internal/skcipher.h> -struct crypto_kw_ctx { - struct crypto_cipher *child; -}; - struct crypto_kw_block { #define SEMIBSIZE 8 __be64 A; @@ -124,16 +120,13 @@ static void crypto_kw_scatterlist_ff(struct scatter_walk *walk, } } -static int crypto_kw_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int crypto_kw_decrypt(struct skcipher_request *req) { - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); struct crypto_kw_block block; - struct scatterlist *lsrc, *ldst; - u64 t = 6 * ((nbytes) >> 3); + struct scatterlist *src, *dst; + u64 t = 6 * ((req->cryptlen) >> 3); unsigned int i; int ret = 0; @@ -141,27 +134,27 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, * Require at least 2 semiblocks (note, the 3rd semiblock that is * required by SP800-38F is the IV. */ - if (nbytes < (2 * SEMIBSIZE) || nbytes % SEMIBSIZE) + if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE) return -EINVAL; /* Place the IV into block A */ - memcpy(&block.A, desc->info, SEMIBSIZE); + memcpy(&block.A, req->iv, SEMIBSIZE); /* * src scatterlist is read-only. dst scatterlist is r/w. During the - * first loop, lsrc points to src and ldst to dst. For any - * subsequent round, the code operates on dst only. + * first loop, src points to req->src and dst to req->dst. For any + * subsequent round, the code operates on req->dst only. */ - lsrc = src; - ldst = dst; + src = req->src; + dst = req->dst; for (i = 0; i < 6; i++) { struct scatter_walk src_walk, dst_walk; - unsigned int tmp_nbytes = nbytes; + unsigned int nbytes = req->cryptlen; - while (tmp_nbytes) { - /* move pointer by tmp_nbytes in the SGL */ - crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes); + while (nbytes) { + /* move pointer by nbytes in the SGL */ + crypto_kw_scatterlist_ff(&src_walk, src, nbytes); /* get the source block */ scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, false); @@ -170,21 +163,21 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, block.A ^= cpu_to_be64(t); t--; /* perform KW operation: decrypt block */ - crypto_cipher_decrypt_one(child, (u8*)&block, - (u8*)&block); + crypto_cipher_decrypt_one(cipher, (u8 *)&block, + (u8 *)&block); - /* move pointer by tmp_nbytes in the SGL */ - crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes); + /* move pointer by nbytes in the SGL */ + crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes); /* Copy block->R into place */ scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, true); - tmp_nbytes -= SEMIBSIZE; + nbytes -= SEMIBSIZE; } /* we now start to operate on the dst SGL only */ - lsrc = dst; - ldst = dst; + src = req->dst; + dst = req->dst; } /* Perform authentication check */ @@ -196,15 +189,12 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, return ret; } -static int crypto_kw_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int crypto_kw_encrypt(struct skcipher_request *req) { - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); struct crypto_kw_block block; - struct scatterlist *lsrc, *ldst; + struct scatterlist *src, *dst; u64 t = 1; unsigned int i; @@ -214,7 +204,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, * This means that the dst memory must be one semiblock larger than src. * Also ensure that the given data is aligned to semiblock. */ - if (nbytes < (2 * SEMIBSIZE) || nbytes % SEMIBSIZE) + if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE) return -EINVAL; /* @@ -225,26 +215,26 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, /* * src scatterlist is read-only. dst scatterlist is r/w. During the - * first loop, lsrc points to src and ldst to dst. For any - * subsequent round, the code operates on dst only. + * first loop, src points to req->src and dst to req->dst. For any + * subsequent round, the code operates on req->dst only. */ - lsrc = src; - ldst = dst; + src = req->src; + dst = req->dst; for (i = 0; i < 6; i++) { struct scatter_walk src_walk, dst_walk; - unsigned int tmp_nbytes = nbytes; + unsigned int nbytes = req->cryptlen; - scatterwalk_start(&src_walk, lsrc); - scatterwalk_start(&dst_walk, ldst); + scatterwalk_start(&src_walk, src); + scatterwalk_start(&dst_walk, dst); - while (tmp_nbytes) { + while (nbytes) { /* get the source block */ scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, false); /* perform KW operation: encrypt block */ - crypto_cipher_encrypt_one(child, (u8 *)&block, + crypto_cipher_encrypt_one(cipher, (u8 *)&block, (u8 *)&block); /* perform KW operation: modify IV with counter */ block.A ^= cpu_to_be64(t); @@ -254,117 +244,59 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, true); - tmp_nbytes -= SEMIBSIZE; + nbytes -= SEMIBSIZE; } /* we now start to operate on the dst SGL only */ - lsrc = dst; - ldst = dst; + src = req->dst; + dst = req->dst; } /* establish the IV for the caller to pick up */ - memcpy(desc->info, &block.A, SEMIBSIZE); + memcpy(req->iv, &block.A, SEMIBSIZE); memzero_explicit(&block, sizeof(struct crypto_kw_block)); return 0; } -static int crypto_kw_setkey(struct crypto_tfm *parent, const u8 *key, - unsigned int keylen) +static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_kw_ctx *ctx = crypto_tfm_ctx(parent); - struct crypto_cipher *child = ctx->child; + struct skcipher_instance *inst; + struct crypto_alg *alg; int err; - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} - -static int crypto_kw_init_tfm(struct crypto_tfm *tfm) -{ - struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); - struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct crypto_kw_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - return 0; -} - -static void crypto_kw_exit_tfm(struct crypto_tfm *tfm) -{ - struct crypto_kw_ctx *ctx = crypto_tfm_ctx(tfm); - - crypto_free_cipher(ctx->child); -} - -static struct crypto_instance *crypto_kw_alloc(struct rtattr **tb) -{ - struct crypto_instance *inst = NULL; - struct crypto_alg *alg = NULL; - int err; - - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); - if (err) - return ERR_PTR(err); - - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(alg)) - return ERR_CAST(alg); + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + if (IS_ERR(inst)) + return PTR_ERR(inst); - inst = ERR_PTR(-EINVAL); + err = -EINVAL; /* Section 5.1 requirement for KW */ if (alg->cra_blocksize != sizeof(struct crypto_kw_block)) - goto err; + goto out_free_inst; - inst = crypto_alloc_instance("kw", alg); - if (IS_ERR(inst)) - goto err; + inst->alg.base.cra_blocksize = SEMIBSIZE; + inst->alg.base.cra_alignmask = 0; + inst->alg.ivsize = SEMIBSIZE; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = SEMIBSIZE; - inst->alg.cra_alignmask = 0; - inst->alg.cra_type = &crypto_blkcipher_type; - inst->alg.cra_blkcipher.ivsize = SEMIBSIZE; - inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; + inst->alg.encrypt = crypto_kw_encrypt; + inst->alg.decrypt = crypto_kw_decrypt; - inst->alg.cra_ctxsize = sizeof(struct crypto_kw_ctx); - - inst->alg.cra_init = crypto_kw_init_tfm; - inst->alg.cra_exit = crypto_kw_exit_tfm; - - inst->alg.cra_blkcipher.setkey = crypto_kw_setkey; - inst->alg.cra_blkcipher.encrypt = crypto_kw_encrypt; - inst->alg.cra_blkcipher.decrypt = crypto_kw_decrypt; + err = skcipher_register_instance(tmpl, inst); + if (err) + goto out_free_inst; + goto out_put_alg; -err: +out_free_inst: + inst->free(inst); +out_put_alg: crypto_mod_put(alg); - return inst; -} - -static void crypto_kw_free(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); - kfree(inst); + return err; } static struct crypto_template crypto_kw_tmpl = { .name = "kw", - .alloc = crypto_kw_alloc, - .free = crypto_kw_free, + .create = crypto_kw_create, .module = THIS_MODULE, }; diff --git a/crypto/morus1280.c b/crypto/morus1280.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * The MORUS-1280 Authenticated-Encryption Algorithm * * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com> * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. */ #include <asm/unaligned.h> @@ -366,18 +362,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state, const struct morus1280_ops *ops) { struct skcipher_walk walk; - u8 *dst; - const u8 *src; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - ops->crypt_chunk(state, dst, src, walk.nbytes); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); - skcipher_walk_done(&walk, 0); + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } diff --git a/crypto/morus640.c b/crypto/morus640.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * The MORUS-640 Authenticated-Encryption Algorithm * * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com> * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. */ #include <asm/unaligned.h> @@ -365,18 +361,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state, const struct morus640_ops *ops) { struct skcipher_walk walk; - u8 *dst; - const u8 *src; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - ops->crypt_chunk(state, dst, src, walk.nbytes); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); - skcipher_walk_done(&walk, 0); + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } diff --git a/crypto/ofb.c b/crypto/ofb.c @@ -5,9 +5,6 @@ * * Copyright (C) 2018 ARM Limited or its affiliates. * All rights reserved. - * - * Based loosely on public domain code gleaned from libtomcrypt - * (https://github.com/libtom/libtomcrypt). */ #include <crypto/algapi.h> @@ -16,189 +13,70 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/scatterlist.h> -#include <linux/slab.h> - -struct crypto_ofb_ctx { - struct crypto_cipher *child; - int cnt; -}; - -static int crypto_ofb_setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) +static int crypto_ofb_crypt(struct skcipher_request *req) { - struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_cipher *child = ctx->child; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + const unsigned int bsize = crypto_cipher_blocksize(cipher); + struct skcipher_walk walk; int err; - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} - -static int crypto_ofb_encrypt_segment(struct crypto_ofb_ctx *ctx, - struct skcipher_walk *walk, - struct crypto_cipher *tfm) -{ - int bsize = crypto_cipher_blocksize(tfm); - int nbytes = walk->nbytes; - - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; - - do { - if (ctx->cnt == bsize) { - if (nbytes < bsize) - break; - crypto_cipher_encrypt_one(tfm, iv, iv); - ctx->cnt = 0; - } - *dst = *src ^ iv[ctx->cnt]; - src++; - dst++; - ctx->cnt++; - } while (--nbytes); - return nbytes; -} - -static int crypto_ofb_encrypt(struct skcipher_request *req) -{ - struct skcipher_walk walk; - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - unsigned int bsize; - struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; - int ret = 0; + err = skcipher_walk_virt(&walk, req, false); - bsize = crypto_cipher_blocksize(child); - ctx->cnt = bsize; + while (walk.nbytes >= bsize) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + u8 * const iv = walk.iv; + unsigned int nbytes = walk.nbytes; - ret = skcipher_walk_virt(&walk, req, false); + do { + crypto_cipher_encrypt_one(cipher, iv, iv); + crypto_xor_cpy(dst, src, iv, bsize); + dst += bsize; + src += bsize; + } while ((nbytes -= bsize) >= bsize); - while (walk.nbytes) { - ret = crypto_ofb_encrypt_segment(ctx, &walk, child); - ret = skcipher_walk_done(&walk, ret); + err = skcipher_walk_done(&walk, nbytes); } - return ret; -} - -/* OFB encrypt and decrypt are identical */ -static int crypto_ofb_decrypt(struct skcipher_request *req) -{ - return crypto_ofb_encrypt(req); -} - -static int crypto_ofb_init_tfm(struct crypto_skcipher *tfm) -{ - struct skcipher_instance *inst = skcipher_alg_instance(tfm); - struct crypto_spawn *spawn = skcipher_instance_ctx(inst); - struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - return 0; -} - -static void crypto_ofb_exit_tfm(struct crypto_skcipher *tfm) -{ - struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_cipher(ctx->child); -} - -static void crypto_ofb_free(struct skcipher_instance *inst) -{ - crypto_drop_skcipher(skcipher_instance_ctx(inst)); - kfree(inst); + if (walk.nbytes) { + crypto_cipher_encrypt_one(cipher, walk.iv, walk.iv); + crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, walk.iv, + walk.nbytes); + err = skcipher_walk_done(&walk, 0); + } + return err; } static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_attr_type *algt; - struct crypto_spawn *spawn; struct crypto_alg *alg; - u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); - if (err) - return err; - - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return -ENOMEM; - - algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); - if (IS_ERR(algt)) - goto err_free_inst; - - mask = CRYPTO_ALG_TYPE_MASK | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + if (IS_ERR(inst)) + return PTR_ERR(inst); - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask); - err = PTR_ERR(alg); - if (IS_ERR(alg)) - goto err_free_inst; + /* OFB mode is a stream cipher. */ + inst->alg.base.cra_blocksize = 1; - spawn = skcipher_instance_ctx(inst); - err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); - crypto_mod_put(alg); - if (err) - goto err_free_inst; - - err = crypto_inst_setname(skcipher_crypto_instance(inst), "ofb", alg); - if (err) - goto err_drop_spawn; - - inst->alg.base.cra_priority = alg->cra_priority; - inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask; - - /* We access the data as u32s when xoring. */ - inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; + /* + * To simplify the implementation, configure the skcipher walk to only + * give a partial block at the very end, never earlier. + */ + inst->alg.chunksize = alg->cra_blocksize; - inst->alg.ivsize = alg->cra_blocksize; - inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; - - inst->alg.base.cra_ctxsize = sizeof(struct crypto_ofb_ctx); - - inst->alg.init = crypto_ofb_init_tfm; - inst->alg.exit = crypto_ofb_exit_tfm; - - inst->alg.setkey = crypto_ofb_setkey; - inst->alg.encrypt = crypto_ofb_encrypt; - inst->alg.decrypt = crypto_ofb_decrypt; - - inst->free = crypto_ofb_free; + inst->alg.encrypt = crypto_ofb_crypt; + inst->alg.decrypt = crypto_ofb_crypt; err = skcipher_register_instance(tmpl, inst); if (err) - goto err_drop_spawn; + inst->free(inst); -out: + crypto_mod_put(alg); return err; - -err_drop_spawn: - crypto_drop_spawn(spawn); -err_free_inst: - kfree(inst); - goto out; } static struct crypto_template crypto_ofb_tmpl = { @@ -221,5 +99,5 @@ module_init(crypto_ofb_module_init); module_exit(crypto_ofb_module_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("OFB block cipher algorithm"); +MODULE_DESCRIPTION("OFB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("ofb"); diff --git a/crypto/pcbc.c b/crypto/pcbc.c @@ -20,28 +20,6 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/slab.h> -#include <linux/compiler.h> - -struct crypto_pcbc_ctx { - struct crypto_cipher *child; -}; - -static int crypto_pcbc_setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) -{ - struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_cipher *child = ctx->child; - int err; - - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, struct skcipher_walk *walk, @@ -51,7 +29,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; do { crypto_xor(iv, src, bsize); @@ -72,7 +50,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE]; do { @@ -84,16 +62,13 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, src += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); - return nbytes; } static int crypto_pcbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; @@ -103,10 +78,10 @@ static int crypto_pcbc_encrypt(struct skcipher_request *req) while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_pcbc_encrypt_inplace(req, &walk, - child); + cipher); else nbytes = crypto_pcbc_encrypt_segment(req, &walk, - child); + cipher); err = skcipher_walk_done(&walk, nbytes); } @@ -121,7 +96,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; do { crypto_cipher_decrypt_one(tfm, dst, src); @@ -132,8 +107,6 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, dst += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); - return nbytes; } @@ -144,7 +117,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32)); do { @@ -156,16 +129,13 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, src += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); - return nbytes; } static int crypto_pcbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; @@ -175,117 +145,34 @@ static int crypto_pcbc_decrypt(struct skcipher_request *req) while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_pcbc_decrypt_inplace(req, &walk, - child); + cipher); else nbytes = crypto_pcbc_decrypt_segment(req, &walk, - child); + cipher); err = skcipher_walk_done(&walk, nbytes); } return err; } -static int crypto_pcbc_init_tfm(struct crypto_skcipher *tfm) -{ - struct skcipher_instance *inst = skcipher_alg_instance(tfm); - struct crypto_spawn *spawn = skcipher_instance_ctx(inst); - struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - return 0; -} - -static void crypto_pcbc_exit_tfm(struct crypto_skcipher *tfm) -{ - struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_cipher(ctx->child); -} - -static void crypto_pcbc_free(struct skcipher_instance *inst) -{ - crypto_drop_skcipher(skcipher_instance_ctx(inst)); - kfree(inst); -} - static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_attr_type *algt; - struct crypto_spawn *spawn; struct crypto_alg *alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if (((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) & - ~CRYPTO_ALG_INTERNAL) - return -EINVAL; - - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return -ENOMEM; - - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER | - (algt->type & CRYPTO_ALG_INTERNAL), - CRYPTO_ALG_TYPE_MASK | - (algt->mask & CRYPTO_ALG_INTERNAL)); - err = PTR_ERR(alg); - if (IS_ERR(alg)) - goto err_free_inst; - - spawn = skcipher_instance_ctx(inst); - err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); - if (err) - goto err_put_alg; - - err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg); - if (err) - goto err_drop_spawn; + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + if (IS_ERR(inst)) + return PTR_ERR(inst); - inst->alg.base.cra_flags = alg->cra_flags & CRYPTO_ALG_INTERNAL; - inst->alg.base.cra_priority = alg->cra_priority; - inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask; - - inst->alg.ivsize = alg->cra_blocksize; - inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; - - inst->alg.base.cra_ctxsize = sizeof(struct crypto_pcbc_ctx); - - inst->alg.init = crypto_pcbc_init_tfm; - inst->alg.exit = crypto_pcbc_exit_tfm; - - inst->alg.setkey = crypto_pcbc_setkey; inst->alg.encrypt = crypto_pcbc_encrypt; inst->alg.decrypt = crypto_pcbc_decrypt; - inst->free = crypto_pcbc_free; - err = skcipher_register_instance(tmpl, inst); if (err) - goto err_drop_spawn; + inst->free(inst); crypto_mod_put(alg); - -out: return err; - -err_drop_spawn: - crypto_drop_spawn(spawn); -err_put_alg: - crypto_mod_put(alg); -err_free_inst: - kfree(inst); - goto out; } static struct crypto_template crypto_pcbc_tmpl = { @@ -308,5 +195,5 @@ module_init(crypto_pcbc_module_init); module_exit(crypto_pcbc_module_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("PCBC block cipher algorithm"); +MODULE_DESCRIPTION("PCBC block cipher mode of operation"); MODULE_ALIAS_CRYPTO("pcbc"); diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c @@ -12,6 +12,7 @@ #include <crypto/algapi.h> #include <crypto/akcipher.h> #include <crypto/internal/akcipher.h> +#include <crypto/internal/rsa.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> diff --git a/crypto/seqiv.c b/crypto/seqiv.c @@ -89,13 +89,12 @@ static int seqiv_aead_encrypt(struct aead_request *req) if (unlikely(!IS_ALIGNED((unsigned long)info, crypto_aead_alignmask(geniv) + 1))) { - info = kmalloc(ivsize, req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: - GFP_ATOMIC); + info = kmemdup(req->iv, ivsize, req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : + GFP_ATOMIC); if (!info) return -ENOMEM; - memcpy(info, req->iv, ivsize); compl = seqiv_aead_encrypt_complete; data = req; } diff --git a/crypto/shash.c b/crypto/shash.c @@ -53,6 +53,13 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, return err; } +static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg) +{ + if (crypto_shash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); +} + int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { @@ -65,8 +72,10 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, else err = shash->setkey(tfm, key, keylen); - if (err) + if (unlikely(err)) { + shash_set_needkey(tfm, shash); return err; + } crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; @@ -373,15 +382,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) crt->final = shash_async_final; crt->finup = shash_async_finup; crt->digest = shash_async_digest; - crt->setkey = shash_async_setkey; + if (crypto_shash_alg_has_setkey(alg)) + crt->setkey = shash_async_setkey; crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & CRYPTO_TFM_NEED_KEY); - if (alg->export) - crt->export = shash_async_export; - if (alg->import) - crt->import = shash_async_import; + crt->export = shash_async_export; + crt->import = shash_async_import; crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); @@ -395,9 +403,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) hash->descsize = alg->descsize; - if (crypto_shash_alg_has_setkey(alg) && - !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) - crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY); + shash_set_needkey(hash, alg); return 0; } @@ -464,6 +470,9 @@ static int shash_prepare_alg(struct shash_alg *alg) alg->statesize > HASH_MAX_STATESIZE) return -EINVAL; + if ((alg->export && !alg->import) || (alg->import && !alg->export)) + return -EINVAL; + base->cra_type = &crypto_shash_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; diff --git a/crypto/skcipher.c b/crypto/skcipher.c @@ -585,6 +585,12 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) return crypto_alg_extsize(alg); } +static void skcipher_set_needkey(struct crypto_skcipher *tfm) +{ + if (tfm->keysize) + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY); +} + static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { @@ -598,8 +604,10 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, err = crypto_blkcipher_setkey(blkcipher, key, keylen); crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & CRYPTO_TFM_RES_MASK); - if (err) + if (unlikely(err)) { + skcipher_set_needkey(tfm); return err; + } crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; @@ -677,8 +685,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); skcipher->keysize = calg->cra_blkcipher.max_keysize; - if (skcipher->keysize) - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); + skcipher_set_needkey(skcipher); return 0; } @@ -698,8 +705,10 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, crypto_skcipher_set_flags(tfm, crypto_ablkcipher_get_flags(ablkcipher) & CRYPTO_TFM_RES_MASK); - if (err) + if (unlikely(err)) { + skcipher_set_needkey(tfm); return err; + } crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; @@ -776,8 +785,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) sizeof(struct ablkcipher_request); skcipher->keysize = calg->cra_ablkcipher.max_keysize; - if (skcipher->keysize) - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); + skcipher_set_needkey(skcipher); return 0; } @@ -820,8 +828,10 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, else err = cipher->setkey(tfm, key, keylen); - if (err) + if (unlikely(err)) { + skcipher_set_needkey(tfm); return err; + } crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; @@ -852,8 +862,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) skcipher->ivsize = alg->ivsize; skcipher->keysize = alg->max_keysize; - if (skcipher->keysize) - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); + skcipher_set_needkey(skcipher); if (alg->exit) skcipher->base.exit = crypto_skcipher_exit_tfm; @@ -1058,5 +1067,136 @@ int skcipher_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(skcipher_register_instance); +static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + int err; + + crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_cipher_setkey(cipher, key, keylen); + crypto_skcipher_set_flags(tfm, crypto_cipher_get_flags(cipher) & + CRYPTO_TFM_RES_MASK); + return err; +} + +static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm) +{ + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct crypto_spawn *spawn = skcipher_instance_ctx(inst); + struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); + struct crypto_cipher *cipher; + + cipher = crypto_spawn_cipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->cipher = cipher; + return 0; +} + +static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm) +{ + struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_cipher(ctx->cipher); +} + +static void skcipher_free_instance_simple(struct skcipher_instance *inst) +{ + crypto_drop_spawn(skcipher_instance_ctx(inst)); + kfree(inst); +} + +/** + * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode + * + * Allocate an skcipher_instance for a simple block cipher mode of operation, + * e.g. cbc or ecb. The instance context will have just a single crypto_spawn, + * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize, + * alignmask, and priority are set from the underlying cipher but can be + * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and + * default ->setkey(), ->init(), and ->exit() methods are installed. + * + * @tmpl: the template being instantiated + * @tb: the template parameters + * @cipher_alg_ret: on success, a pointer to the underlying cipher algorithm is + * returned here. It must be dropped with crypto_mod_put(). + * + * Return: a pointer to the new instance, or an ERR_PTR(). The caller still + * needs to register the instance. + */ +struct skcipher_instance * +skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_alg **cipher_alg_ret) +{ + struct crypto_attr_type *algt; + struct crypto_alg *cipher_alg; + struct skcipher_instance *inst; + struct crypto_spawn *spawn; + u32 mask; + int err; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return ERR_CAST(algt); + + if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) + return ERR_PTR(-EINVAL); + + mask = CRYPTO_ALG_TYPE_MASK | + crypto_requires_off(algt->type, algt->mask, + CRYPTO_ALG_NEED_FALLBACK); + + cipher_alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask); + if (IS_ERR(cipher_alg)) + return ERR_CAST(cipher_alg); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) { + err = -ENOMEM; + goto err_put_cipher_alg; + } + spawn = skcipher_instance_ctx(inst); + + err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name, + cipher_alg); + if (err) + goto err_free_inst; + + err = crypto_init_spawn(spawn, cipher_alg, + skcipher_crypto_instance(inst), + CRYPTO_ALG_TYPE_MASK); + if (err) + goto err_free_inst; + inst->free = skcipher_free_instance_simple; + + /* Default algorithm properties, can be overridden */ + inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize; + inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask; + inst->alg.base.cra_priority = cipher_alg->cra_priority; + inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize; + inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize; + inst->alg.ivsize = cipher_alg->cra_blocksize; + + /* Use skcipher_ctx_simple by default, can be overridden */ + inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple); + inst->alg.setkey = skcipher_setkey_simple; + inst->alg.init = skcipher_init_tfm_simple; + inst->alg.exit = skcipher_exit_tfm_simple; + + *cipher_alg_ret = cipher_alg; + return inst; + +err_free_inst: + kfree(inst); +err_put_cipher_alg: + crypto_mod_put(cipher_alg); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Symmetric key cipher type"); diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c @@ -960,7 +960,7 @@ static int streebog_init(struct shash_desc *desc) memset(ctx, 0, sizeof(struct streebog_state)); for (i = 0; i < 8; i++) { if (digest_size == STREEBOG256_DIGEST_SIZE) - ctx->h.qword[i] = 0x0101010101010101ULL; + ctx->h.qword[i] = cpu_to_le64(0x0101010101010101ULL); } return 0; } diff --git a/crypto/testmgr.c b/crypto/testmgr.c @@ -5,6 +5,7 @@ * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org> * Copyright (c) 2007 Nokia Siemens Networks * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> + * Copyright (c) 2019 Google LLC * * Updated RFC4106 AES-GCM testing. * Authors: Aidan O'Mahony (aidan.o.mahony@intel.com) @@ -26,6 +27,8 @@ #include <linux/err.h> #include <linux/fips.h> #include <linux/module.h> +#include <linux/once.h> +#include <linux/random.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/string.h> @@ -41,6 +44,16 @@ static bool notests; module_param(notests, bool, 0644); MODULE_PARM_DESC(notests, "disable crypto self-tests"); +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS +static bool noextratests; +module_param(noextratests, bool, 0644); +MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests"); + +static unsigned int fuzz_iterations = 100; +module_param(fuzz_iterations, uint, 0644); +MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations"); +#endif + #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS /* a perfect nop */ @@ -59,28 +72,14 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) #define XBUFSIZE 8 /* - * Indexes into the xbuf to simulate cross-page access. - */ -#define IDX1 32 -#define IDX2 32400 -#define IDX3 1511 -#define IDX4 8193 -#define IDX5 22222 -#define IDX6 17101 -#define IDX7 27333 -#define IDX8 3000 - -/* * Used by test_cipher() */ #define ENCRYPT 1 #define DECRYPT 0 struct aead_test_suite { - struct { - const struct aead_testvec *vecs; - unsigned int count; - } enc, dec; + const struct aead_testvec *vecs; + unsigned int count; }; struct cipher_test_suite { @@ -138,9 +137,6 @@ struct alg_test_desc { } suite; }; -static const unsigned int IDX[8] = { - IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 }; - static void hexdump(unsigned char *buf, unsigned int len) { print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, @@ -148,12 +144,12 @@ static void hexdump(unsigned char *buf, unsigned int len) buf, len, false); } -static int testmgr_alloc_buf(char *buf[XBUFSIZE]) +static int __testmgr_alloc_buf(char *buf[XBUFSIZE], int order) { int i; for (i = 0; i < XBUFSIZE; i++) { - buf[i] = (void *)__get_free_page(GFP_KERNEL); + buf[i] = (char *)__get_free_pages(GFP_KERNEL, order); if (!buf[i]) goto err_free_buf; } @@ -162,858 +158,1266 @@ static int testmgr_alloc_buf(char *buf[XBUFSIZE]) err_free_buf: while (i-- > 0) - free_page((unsigned long)buf[i]); + free_pages((unsigned long)buf[i], order); return -ENOMEM; } -static void testmgr_free_buf(char *buf[XBUFSIZE]) +static int testmgr_alloc_buf(char *buf[XBUFSIZE]) +{ + return __testmgr_alloc_buf(buf, 0); +} + +static void __testmgr_free_buf(char *buf[XBUFSIZE], int order) { int i; for (i = 0; i < XBUFSIZE; i++) - free_page((unsigned long)buf[i]); + free_pages((unsigned long)buf[i], order); } -static int ahash_guard_result(char *result, char c, int size) +static void testmgr_free_buf(char *buf[XBUFSIZE]) { - int i; + __testmgr_free_buf(buf, 0); +} - for (i = 0; i < size; i++) { - if (result[i] != c) - return -EINVAL; - } +#define TESTMGR_POISON_BYTE 0xfe +#define TESTMGR_POISON_LEN 16 - return 0; +static inline void testmgr_poison(void *addr, size_t len) +{ + memset(addr, TESTMGR_POISON_BYTE, len); } -static int ahash_partial_update(struct ahash_request **preq, - struct crypto_ahash *tfm, const struct hash_testvec *template, - void *hash_buff, int k, int temp, struct scatterlist *sg, - const char *algo, char *result, struct crypto_wait *wait) +/* Is the memory region still fully poisoned? */ +static inline bool testmgr_is_poison(const void *addr, size_t len) { - char *state; - struct ahash_request *req; - int statesize, ret = -EINVAL; - static const unsigned char guard[] = { 0x00, 0xba, 0xad, 0x00 }; - int digestsize = crypto_ahash_digestsize(tfm); - - req = *preq; - statesize = crypto_ahash_statesize( - crypto_ahash_reqtfm(req)); - state = kmalloc(statesize + sizeof(guard), GFP_KERNEL); - if (!state) { - pr_err("alg: hash: Failed to alloc state for %s\n", algo); - goto out_nostate; - } - memcpy(state + statesize, guard, sizeof(guard)); - memset(result, 1, digestsize); - ret = crypto_ahash_export(req, state); - WARN_ON(memcmp(state + statesize, guard, sizeof(guard))); - if (ret) { - pr_err("alg: hash: Failed to export() for %s\n", algo); - goto out; - } - ret = ahash_guard_result(result, 1, digestsize); - if (ret) { - pr_err("alg: hash: Failed, export used req->result for %s\n", - algo); - goto out; - } - ahash_request_free(req); - req = ahash_request_alloc(tfm, GFP_KERNEL); - if (!req) { - pr_err("alg: hash: Failed to alloc request for %s\n", algo); - goto out_noreq; - } - ahash_request_set_callback(req, - CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, wait); - - memcpy(hash_buff, template->plaintext + temp, - template->tap[k]); - sg_init_one(&sg[0], hash_buff, template->tap[k]); - ahash_request_set_crypt(req, sg, result, template->tap[k]); - ret = crypto_ahash_import(req, state); - if (ret) { - pr_err("alg: hash: Failed to import() for %s\n", algo); - goto out; - } - ret = ahash_guard_result(result, 1, digestsize); - if (ret) { - pr_err("alg: hash: Failed, import used req->result for %s\n", - algo); - goto out; - } - ret = crypto_wait_req(crypto_ahash_update(req), wait); - if (ret) - goto out; - *preq = req; - ret = 0; - goto out_noreq; -out: - ahash_request_free(req); -out_noreq: - kfree(state); -out_nostate: - return ret; + return memchr_inv(addr, TESTMGR_POISON_BYTE, len) == NULL; } -enum hash_test { - HASH_TEST_DIGEST, - HASH_TEST_FINAL, - HASH_TEST_FINUP +/* flush type for hash algorithms */ +enum flush_type { + /* merge with update of previous buffer(s) */ + FLUSH_TYPE_NONE = 0, + + /* update with previous buffer(s) before doing this one */ + FLUSH_TYPE_FLUSH, + + /* likewise, but also export and re-import the intermediate state */ + FLUSH_TYPE_REIMPORT, }; -static int __test_hash(struct crypto_ahash *tfm, - const struct hash_testvec *template, unsigned int tcount, - enum hash_test test_type, const int align_offset) -{ - const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); - size_t digest_size = crypto_ahash_digestsize(tfm); - unsigned int i, j, k, temp; - struct scatterlist sg[8]; - char *result; - char *key; - struct ahash_request *req; - struct crypto_wait wait; - void *hash_buff; - char *xbuf[XBUFSIZE]; - int ret = -ENOMEM; +/* finalization function for hash algorithms */ +enum finalization_type { + FINALIZATION_TYPE_FINAL, /* use final() */ + FINALIZATION_TYPE_FINUP, /* use finup() */ + FINALIZATION_TYPE_DIGEST, /* use digest() */ +}; - result = kmalloc(digest_size, GFP_KERNEL); - if (!result) - return ret; - key = kmalloc(MAX_KEYLEN, GFP_KERNEL); - if (!key) - goto out_nobuf; - if (testmgr_alloc_buf(xbuf)) - goto out_nobuf; +#define TEST_SG_TOTAL 10000 - crypto_init_wait(&wait); +/** + * struct test_sg_division - description of a scatterlist entry + * + * This struct describes one entry of a scatterlist being constructed to check a + * crypto test vector. + * + * @proportion_of_total: length of this chunk relative to the total length, + * given as a proportion out of TEST_SG_TOTAL so that it + * scales to fit any test vector + * @offset: byte offset into a 2-page buffer at which this chunk will start + * @offset_relative_to_alignmask: if true, add the algorithm's alignmask to the + * @offset + * @flush_type: for hashes, whether an update() should be done now vs. + * continuing to accumulate data + */ +struct test_sg_division { + unsigned int proportion_of_total; + unsigned int offset; + bool offset_relative_to_alignmask; + enum flush_type flush_type; +}; - req = ahash_request_alloc(tfm, GFP_KERNEL); - if (!req) { - printk(KERN_ERR "alg: hash: Failed to allocate request for " - "%s\n", algo); - goto out_noreq; +/** + * struct testvec_config - configuration for testing a crypto test vector + * + * This struct describes the data layout and other parameters with which each + * crypto test vector can be tested. + * + * @name: name of this config, logged for debugging purposes if a test fails + * @inplace: operate on the data in-place, if applicable for the algorithm type? + * @req_flags: extra request_flags, e.g. CRYPTO_TFM_REQ_MAY_SLEEP + * @src_divs: description of how to arrange the source scatterlist + * @dst_divs: description of how to arrange the dst scatterlist, if applicable + * for the algorithm type. Defaults to @src_divs if unset. + * @iv_offset: misalignment of the IV in the range [0..MAX_ALGAPI_ALIGNMASK+1], + * where 0 is aligned to a 2*(MAX_ALGAPI_ALIGNMASK+1) byte boundary + * @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to + * the @iv_offset + * @finalization_type: what finalization function to use for hashes + */ +struct testvec_config { + const char *name; + bool inplace; + u32 req_flags; + struct test_sg_division src_divs[XBUFSIZE]; + struct test_sg_division dst_divs[XBUFSIZE]; + unsigned int iv_offset; + bool iv_offset_relative_to_alignmask; + enum finalization_type finalization_type; +}; + +#define TESTVEC_CONFIG_NAMELEN 192 + +/* + * The following are the lists of testvec_configs to test for each algorithm + * type when the basic crypto self-tests are enabled, i.e. when + * CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is unset. They aim to provide good test + * coverage, while keeping the test time much shorter than the full fuzz tests + * so that the basic tests can be enabled in a wider range of circumstances. + */ + +/* Configs for skciphers and aeads */ +static const struct testvec_config default_cipher_testvec_configs[] = { + { + .name = "in-place", + .inplace = true, + .src_divs = { { .proportion_of_total = 10000 } }, + }, { + .name = "out-of-place", + .src_divs = { { .proportion_of_total = 10000 } }, + }, { + .name = "unaligned buffer, offset=1", + .src_divs = { { .proportion_of_total = 10000, .offset = 1 } }, + .iv_offset = 1, + }, { + .name = "buffer aligned only to alignmask", + .src_divs = { + { + .proportion_of_total = 10000, + .offset = 1, + .offset_relative_to_alignmask = true, + }, + }, + .iv_offset = 1, + .iv_offset_relative_to_alignmask = true, + }, { + .name = "two even aligned splits", + .src_divs = { + { .proportion_of_total = 5000 }, + { .proportion_of_total = 5000 }, + }, + }, { + .name = "uneven misaligned splits, may sleep", + .req_flags = CRYPTO_TFM_REQ_MAY_SLEEP, + .src_divs = { + { .proportion_of_total = 1900, .offset = 33 }, + { .proportion_of_total = 3300, .offset = 7 }, + { .proportion_of_total = 4800, .offset = 18 }, + }, + .iv_offset = 3, + }, { + .name = "misaligned splits crossing pages, inplace", + .inplace = true, + .src_divs = { + { + .proportion_of_total = 7500, + .offset = PAGE_SIZE - 32 + }, { + .proportion_of_total = 2500, + .offset = PAGE_SIZE - 7 + }, + }, } - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &wait); +}; - j = 0; - for (i = 0; i < tcount; i++) { - if (template[i].np) - continue; +static const struct testvec_config default_hash_testvec_configs[] = { + { + .name = "init+update+final aligned buffer", + .src_divs = { { .proportion_of_total = 10000 } }, + .finalization_type = FINALIZATION_TYPE_FINAL, + }, { + .name = "init+finup aligned buffer", + .src_divs = { { .proportion_of_total = 10000 } }, + .finalization_type = FINALIZATION_TYPE_FINUP, + }, { + .name = "digest aligned buffer", + .src_divs = { { .proportion_of_total = 10000 } }, + .finalization_type = FINALIZATION_TYPE_DIGEST, + }, { + .name = "init+update+final misaligned buffer", + .src_divs = { { .proportion_of_total = 10000, .offset = 1 } }, + .finalization_type = FINALIZATION_TYPE_FINAL, + }, { + .name = "digest buffer aligned only to alignmask", + .src_divs = { + { + .proportion_of_total = 10000, + .offset = 1, + .offset_relative_to_alignmask = true, + }, + }, + .finalization_type = FINALIZATION_TYPE_DIGEST, + }, { + .name = "init+update+update+final two even splits", + .src_divs = { + { .proportion_of_total = 5000 }, + { + .proportion_of_total = 5000, + .flush_type = FLUSH_TYPE_FLUSH, + }, + }, + .finalization_type = FINALIZATION_TYPE_FINAL, + }, { + .name = "digest uneven misaligned splits, may sleep", + .req_flags = CRYPTO_TFM_REQ_MAY_SLEEP, + .src_divs = { + { .proportion_of_total = 1900, .offset = 33 }, + { .proportion_of_total = 3300, .offset = 7 }, + { .proportion_of_total = 4800, .offset = 18 }, + }, + .finalization_type = FINALIZATION_TYPE_DIGEST, + }, { + .name = "digest misaligned splits crossing pages", + .src_divs = { + { + .proportion_of_total = 7500, + .offset = PAGE_SIZE - 32, + }, { + .proportion_of_total = 2500, + .offset = PAGE_SIZE - 7, + }, + }, + .finalization_type = FINALIZATION_TYPE_DIGEST, + }, { + .name = "import/export", + .src_divs = { + { + .proportion_of_total = 6500, + .flush_type = FLUSH_TYPE_REIMPORT, + }, { + .proportion_of_total = 3500, + .flush_type = FLUSH_TYPE_REIMPORT, + }, + }, + .finalization_type = FINALIZATION_TYPE_FINAL, + } +}; - ret = -EINVAL; - if (WARN_ON(align_offset + template[i].psize > PAGE_SIZE)) - goto out; +static unsigned int count_test_sg_divisions(const struct test_sg_division *divs) +{ + unsigned int remaining = TEST_SG_TOTAL; + unsigned int ndivs = 0; - j++; - memset(result, 0, digest_size); + do { + remaining -= divs[ndivs++].proportion_of_total; + } while (remaining); - hash_buff = xbuf[0]; - hash_buff += align_offset; + return ndivs; +} - memcpy(hash_buff, template[i].plaintext, template[i].psize); - sg_init_one(&sg[0], hash_buff, template[i].psize); +static bool valid_sg_divisions(const struct test_sg_division *divs, + unsigned int count, bool *any_flushes_ret) +{ + unsigned int total = 0; + unsigned int i; - if (template[i].ksize) { - crypto_ahash_clear_flags(tfm, ~0); - if (template[i].ksize > MAX_KEYLEN) { - pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", - j, algo, template[i].ksize, MAX_KEYLEN); - ret = -EINVAL; - goto out; - } - memcpy(key, template[i].key, template[i].ksize); - ret = crypto_ahash_setkey(tfm, key, template[i].ksize); - if (ret) { - printk(KERN_ERR "alg: hash: setkey failed on " - "test %d for %s: ret=%d\n", j, algo, - -ret); - goto out; - } - } + for (i = 0; i < count && total != TEST_SG_TOTAL; i++) { + if (divs[i].proportion_of_total <= 0 || + divs[i].proportion_of_total > TEST_SG_TOTAL - total) + return false; + total += divs[i].proportion_of_total; + if (divs[i].flush_type != FLUSH_TYPE_NONE) + *any_flushes_ret = true; + } + return total == TEST_SG_TOTAL && + memchr_inv(&divs[i], 0, (count - i) * sizeof(divs[0])) == NULL; +} - ahash_request_set_crypt(req, sg, result, template[i].psize); - switch (test_type) { - case HASH_TEST_DIGEST: - ret = crypto_wait_req(crypto_ahash_digest(req), &wait); - if (ret) { - pr_err("alg: hash: digest failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - break; +/* + * Check whether the given testvec_config is valid. This isn't strictly needed + * since every testvec_config should be valid, but check anyway so that people + * don't unknowingly add broken configs that don't do what they wanted. + */ +static bool valid_testvec_config(const struct testvec_config *cfg) +{ + bool any_flushes = false; - case HASH_TEST_FINAL: - memset(result, 1, digest_size); - ret = crypto_wait_req(crypto_ahash_init(req), &wait); - if (ret) { - pr_err("alg: hash: init failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - ret = ahash_guard_result(result, 1, digest_size); - if (ret) { - pr_err("alg: hash: init failed on test %d " - "for %s: used req->result\n", j, algo); - goto out; - } - ret = crypto_wait_req(crypto_ahash_update(req), &wait); - if (ret) { - pr_err("alg: hash: update failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - ret = ahash_guard_result(result, 1, digest_size); - if (ret) { - pr_err("alg: hash: update failed on test %d " - "for %s: used req->result\n", j, algo); - goto out; - } - ret = crypto_wait_req(crypto_ahash_final(req), &wait); - if (ret) { - pr_err("alg: hash: final failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - break; + if (cfg->name == NULL) + return false; - case HASH_TEST_FINUP: - memset(result, 1, digest_size); - ret = crypto_wait_req(crypto_ahash_init(req), &wait); - if (ret) { - pr_err("alg: hash: init failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - ret = ahash_guard_result(result, 1, digest_size); - if (ret) { - pr_err("alg: hash: init failed on test %d " - "for %s: used req->result\n", j, algo); - goto out; - } - ret = crypto_wait_req(crypto_ahash_finup(req), &wait); - if (ret) { - pr_err("alg: hash: final failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - break; - } + if (!valid_sg_divisions(cfg->src_divs, ARRAY_SIZE(cfg->src_divs), + &any_flushes)) + return false; - if (memcmp(result, template[i].digest, - crypto_ahash_digestsize(tfm))) { - printk(KERN_ERR "alg: hash: Test %d failed for %s\n", - j, algo); - hexdump(result, crypto_ahash_digestsize(tfm)); - ret = -EINVAL; - goto out; - } + if (cfg->dst_divs[0].proportion_of_total) { + if (!valid_sg_divisions(cfg->dst_divs, + ARRAY_SIZE(cfg->dst_divs), + &any_flushes)) + return false; + } else { + if (memchr_inv(cfg->dst_divs, 0, sizeof(cfg->dst_divs))) + return false; + /* defaults to dst_divs=src_divs */ } - if (test_type) - goto out; + if (cfg->iv_offset + + (cfg->iv_offset_relative_to_alignmask ? MAX_ALGAPI_ALIGNMASK : 0) > + MAX_ALGAPI_ALIGNMASK + 1) + return false; - j = 0; - for (i = 0; i < tcount; i++) { - /* alignment tests are only done with continuous buffers */ - if (align_offset != 0) - break; + if (any_flushes && cfg->finalization_type == FINALIZATION_TYPE_DIGEST) + return false; - if (!template[i].np) - continue; + return true; +} - j++; - memset(result, 0, digest_size); +struct test_sglist { + char *bufs[XBUFSIZE]; + struct scatterlist sgl[XBUFSIZE]; + struct scatterlist sgl_saved[XBUFSIZE]; + struct scatterlist *sgl_ptr; + unsigned int nents; +}; - temp = 0; - sg_init_table(sg, template[i].np); - ret = -EINVAL; - for (k = 0; k < template[i].np; k++) { - if (WARN_ON(offset_in_page(IDX[k]) + - template[i].tap[k] > PAGE_SIZE)) - goto out; - sg_set_buf(&sg[k], - memcpy(xbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]), - template[i].plaintext + temp, - template[i].tap[k]), - template[i].tap[k]); - temp += template[i].tap[k]; - } - - if (template[i].ksize) { - if (template[i].ksize > MAX_KEYLEN) { - pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", - j, algo, template[i].ksize, MAX_KEYLEN); - ret = -EINVAL; - goto out; - } - crypto_ahash_clear_flags(tfm, ~0); - memcpy(key, template[i].key, template[i].ksize); - ret = crypto_ahash_setkey(tfm, key, template[i].ksize); - - if (ret) { - printk(KERN_ERR "alg: hash: setkey " - "failed on chunking test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - } +static int init_test_sglist(struct test_sglist *tsgl) +{ + return __testmgr_alloc_buf(tsgl->bufs, 1 /* two pages per buffer */); +} - ahash_request_set_crypt(req, sg, result, template[i].psize); - ret = crypto_wait_req(crypto_ahash_digest(req), &wait); - if (ret) { - pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n", - j, algo, -ret); - goto out; - } +static void destroy_test_sglist(struct test_sglist *tsgl) +{ + return __testmgr_free_buf(tsgl->bufs, 1 /* two pages per buffer */); +} - if (memcmp(result, template[i].digest, - crypto_ahash_digestsize(tfm))) { - printk(KERN_ERR "alg: hash: Chunking test %d " - "failed for %s\n", j, algo); - hexdump(result, crypto_ahash_digestsize(tfm)); - ret = -EINVAL; - goto out; +/** + * build_test_sglist() - build a scatterlist for a crypto test + * + * @tsgl: the scatterlist to build. @tsgl->bufs[] contains an array of 2-page + * buffers which the scatterlist @tsgl->sgl[] will be made to point into. + * @divs: the layout specification on which the scatterlist will be based + * @alignmask: the algorithm's alignmask + * @total_len: the total length of the scatterlist to build in bytes + * @data: if non-NULL, the buffers will be filled with this data until it ends. + * Otherwise the buffers will be poisoned. In both cases, some bytes + * past the end of each buffer will be poisoned to help detect overruns. + * @out_divs: if non-NULL, the test_sg_division to which each scatterlist entry + * corresponds will be returned here. This will match @divs except + * that divisions resolving to a length of 0 are omitted as they are + * not included in the scatterlist. + * + * Return: 0 or a -errno value + */ +static int build_test_sglist(struct test_sglist *tsgl, + const struct test_sg_division *divs, + const unsigned int alignmask, + const unsigned int total_len, + struct iov_iter *data, + const struct test_sg_division *out_divs[XBUFSIZE]) +{ + struct { + const struct test_sg_division *div; + size_t length; + } partitions[XBUFSIZE]; + const unsigned int ndivs = count_test_sg_divisions(divs); + unsigned int len_remaining = total_len; + unsigned int i; + + BUILD_BUG_ON(ARRAY_SIZE(partitions) != ARRAY_SIZE(tsgl->sgl)); + if (WARN_ON(ndivs > ARRAY_SIZE(partitions))) + return -EINVAL; + + /* Calculate the (div, length) pairs */ + tsgl->nents = 0; + for (i = 0; i < ndivs; i++) { + unsigned int len_this_sg = + min(len_remaining, + (total_len * divs[i].proportion_of_total + + TEST_SG_TOTAL / 2) / TEST_SG_TOTAL); + + if (len_this_sg != 0) { + partitions[tsgl->nents].div = &divs[i]; + partitions[tsgl->nents].length = len_this_sg; + tsgl->nents++; + len_remaining -= len_this_sg; } } + if (tsgl->nents == 0) { + partitions[tsgl->nents].div = &divs[0]; + partitions[tsgl->nents].length = 0; + tsgl->nents++; + } + partitions[tsgl->nents - 1].length += len_remaining; - /* partial update exercise */ - j = 0; - for (i = 0; i < tcount; i++) { - /* alignment tests are only done with continuous buffers */ - if (align_offset != 0) - break; - - if (template[i].np < 2) - continue; + /* Set up the sgl entries and fill the data or poison */ + sg_init_table(tsgl->sgl, tsgl->nents); + for (i = 0; i < tsgl->nents; i++) { + unsigned int offset = partitions[i].div->offset; + void *addr; - j++; - memset(result, 0, digest_size); + if (partitions[i].div->offset_relative_to_alignmask) + offset += alignmask; - ret = -EINVAL; - hash_buff = xbuf[0]; - memcpy(hash_buff, template[i].plaintext, - template[i].tap[0]); - sg_init_one(&sg[0], hash_buff, template[i].tap[0]); - - if (template[i].ksize) { - crypto_ahash_clear_flags(tfm, ~0); - if (template[i].ksize > MAX_KEYLEN) { - pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", - j, algo, template[i].ksize, MAX_KEYLEN); - ret = -EINVAL; - goto out; - } - memcpy(key, template[i].key, template[i].ksize); - ret = crypto_ahash_setkey(tfm, key, template[i].ksize); - if (ret) { - pr_err("alg: hash: setkey failed on test %d for %s: ret=%d\n", - j, algo, -ret); - goto out; - } + while (offset + partitions[i].length + TESTMGR_POISON_LEN > + 2 * PAGE_SIZE) { + if (WARN_ON(offset <= 0)) + return -EINVAL; + offset /= 2; } - ahash_request_set_crypt(req, sg, result, template[i].tap[0]); - ret = crypto_wait_req(crypto_ahash_init(req), &wait); - if (ret) { - pr_err("alg: hash: init failed on test %d for %s: ret=%d\n", - j, algo, -ret); - goto out; - } - ret = crypto_wait_req(crypto_ahash_update(req), &wait); - if (ret) { - pr_err("alg: hash: update failed on test %d for %s: ret=%d\n", - j, algo, -ret); - goto out; - } + addr = &tsgl->bufs[i][offset]; + sg_set_buf(&tsgl->sgl[i], addr, partitions[i].length); - temp = template[i].tap[0]; - for (k = 1; k < template[i].np; k++) { - ret = ahash_partial_update(&req, tfm, &template[i], - hash_buff, k, temp, &sg[0], algo, result, - &wait); - if (ret) { - pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n", - j, algo, -ret); - goto out_noreq; - } - temp += template[i].tap[k]; - } - ret = crypto_wait_req(crypto_ahash_final(req), &wait); - if (ret) { - pr_err("alg: hash: final failed on test %d for %s: ret=%d\n", - j, algo, -ret); - goto out; - } - if (memcmp(result, template[i].digest, - crypto_ahash_digestsize(tfm))) { - pr_err("alg: hash: Partial Test %d failed for %s\n", - j, algo); - hexdump(result, crypto_ahash_digestsize(tfm)); - ret = -EINVAL; - goto out; + if (out_divs) + out_divs[i] = partitions[i].div; + + if (data) { + size_t copy_len, copied; + + copy_len = min(partitions[i].length, data->count); + copied = copy_from_iter(addr, copy_len, data); + if (WARN_ON(copied != copy_len)) + return -EINVAL; + testmgr_poison(addr + copy_len, partitions[i].length + + TESTMGR_POISON_LEN - copy_len); + } else { + testmgr_poison(addr, partitions[i].length + + TESTMGR_POISON_LEN); } } - ret = 0; - -out: - ahash_request_free(req); -out_noreq: - testmgr_free_buf(xbuf); -out_nobuf: - kfree(key); - kfree(result); - return ret; + sg_mark_end(&tsgl->sgl[tsgl->nents - 1]); + tsgl->sgl_ptr = tsgl->sgl; + memcpy(tsgl->sgl_saved, tsgl->sgl, tsgl->nents * sizeof(tsgl->sgl[0])); + return 0; } -static int test_hash(struct crypto_ahash *tfm, - const struct hash_testvec *template, - unsigned int tcount, enum hash_test test_type) +/* + * Verify that a scatterlist crypto operation produced the correct output. + * + * @tsgl: scatterlist containing the actual output + * @expected_output: buffer containing the expected output + * @len_to_check: length of @expected_output in bytes + * @unchecked_prefix_len: number of ignored bytes in @tsgl prior to real result + * @check_poison: verify that the poison bytes after each chunk are intact? + * + * Return: 0 if correct, -EINVAL if incorrect, -EOVERFLOW if buffer overrun. + */ +static int verify_correct_output(const struct test_sglist *tsgl, + const char *expected_output, + unsigned int len_to_check, + unsigned int unchecked_prefix_len, + bool check_poison) { - unsigned int alignmask; - int ret; + unsigned int i; - ret = __test_hash(tfm, template, tcount, test_type, 0); - if (ret) - return ret; + for (i = 0; i < tsgl->nents; i++) { + struct scatterlist *sg = &tsgl->sgl_ptr[i]; + unsigned int len = sg->length; + unsigned int offset = sg->offset; + const char *actual_output; - /* test unaligned buffers, check with one byte offset */ - ret = __test_hash(tfm, template, tcount, test_type, 1); - if (ret) - return ret; + if (unchecked_prefix_len) { + if (unchecked_prefix_len >= len) { + unchecked_prefix_len -= len; + continue; + } + offset += unchecked_prefix_len; + len -= unchecked_prefix_len; + unchecked_prefix_len = 0; + } + len = min(len, len_to_check); + actual_output = page_address(sg_page(sg)) + offset; + if (memcmp(expected_output, actual_output, len) != 0) + return -EINVAL; + if (check_poison && + !testmgr_is_poison(actual_output + len, TESTMGR_POISON_LEN)) + return -EOVERFLOW; + len_to_check -= len; + expected_output += len; + } + if (WARN_ON(len_to_check != 0)) + return -EINVAL; + return 0; +} - alignmask = crypto_tfm_alg_alignmask(&tfm->base); - if (alignmask) { - /* Check if alignment mask for tfm is correctly set. */ - ret = __test_hash(tfm, template, tcount, test_type, - alignmask + 1); - if (ret) - return ret; - } +static bool is_test_sglist_corrupted(const struct test_sglist *tsgl) +{ + unsigned int i; - return 0; + for (i = 0; i < tsgl->nents; i++) { + if (tsgl->sgl[i].page_link != tsgl->sgl_saved[i].page_link) + return true; + if (tsgl->sgl[i].offset != tsgl->sgl_saved[i].offset) + return true; + if (tsgl->sgl[i].length != tsgl->sgl_saved[i].length) + return true; + } + return false; } -static int __test_aead(struct crypto_aead *tfm, int enc, - const struct aead_testvec *template, unsigned int tcount, - const bool diff_dst, const int align_offset) +struct cipher_test_sglists { + struct test_sglist src; + struct test_sglist dst; +}; + +static struct cipher_test_sglists *alloc_cipher_test_sglists(void) { - const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); - unsigned int i, j, k, n, temp; - int ret = -ENOMEM; - char *q; - char *key; - struct aead_request *req; - struct scatterlist *sg; - struct scatterlist *sgout; - const char *e, *d; - struct crypto_wait wait; - unsigned int authsize, iv_len; - void *input; - void *output; - void *assoc; - char *iv; - char *xbuf[XBUFSIZE]; - char *xoutbuf[XBUFSIZE]; - char *axbuf[XBUFSIZE]; + struct cipher_test_sglists *tsgls; - iv = kzalloc(MAX_IVLEN, GFP_KERNEL); - if (!iv) - return ret; - key = kmalloc(MAX_KEYLEN, GFP_KERNEL); - if (!key) - goto out_noxbuf; - if (testmgr_alloc_buf(xbuf)) - goto out_noxbuf; - if (testmgr_alloc_buf(axbuf)) - goto out_noaxbuf; - if (diff_dst && testmgr_alloc_buf(xoutbuf)) - goto out_nooutbuf; - - /* avoid "the frame size is larger than 1024 bytes" compiler warning */ - sg = kmalloc(array3_size(sizeof(*sg), 8, (diff_dst ? 4 : 2)), - GFP_KERNEL); - if (!sg) - goto out_nosg; - sgout = &sg[16]; - - if (diff_dst) - d = "-ddst"; - else - d = ""; + tsgls = kmalloc(sizeof(*tsgls), GFP_KERNEL); + if (!tsgls) + return NULL; - if (enc == ENCRYPT) - e = "encryption"; - else - e = "decryption"; + if (init_test_sglist(&tsgls->src) != 0) + goto fail_kfree; + if (init_test_sglist(&tsgls->dst) != 0) + goto fail_destroy_src; - crypto_init_wait(&wait); + return tsgls; - req = aead_request_alloc(tfm, GFP_KERNEL); - if (!req) { - pr_err("alg: aead%s: Failed to allocate request for %s\n", - d, algo); - goto out; - } +fail_destroy_src: + destroy_test_sglist(&tsgls->src); +fail_kfree: + kfree(tsgls); + return NULL; +} - aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &wait); +static void free_cipher_test_sglists(struct cipher_test_sglists *tsgls) +{ + if (tsgls) { + destroy_test_sglist(&tsgls->src); + destroy_test_sglist(&tsgls->dst); + kfree(tsgls); + } +} - iv_len = crypto_aead_ivsize(tfm); +/* Build the src and dst scatterlists for an skcipher or AEAD test */ +static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls, + const struct testvec_config *cfg, + unsigned int alignmask, + unsigned int src_total_len, + unsigned int dst_total_len, + const struct kvec *inputs, + unsigned int nr_inputs) +{ + struct iov_iter input; + int err; - for (i = 0, j = 0; i < tcount; i++) { - if (template[i].np) - continue; + iov_iter_kvec(&input, WRITE, inputs, nr_inputs, src_total_len); + err = build_test_sglist(&tsgls->src, cfg->src_divs, alignmask, + cfg->inplace ? + max(dst_total_len, src_total_len) : + src_total_len, + &input, NULL); + if (err) + return err; - j++; + if (cfg->inplace) { + tsgls->dst.sgl_ptr = tsgls->src.sgl; + tsgls->dst.nents = tsgls->src.nents; + return 0; + } + return build_test_sglist(&tsgls->dst, + cfg->dst_divs[0].proportion_of_total ? + cfg->dst_divs : cfg->src_divs, + alignmask, dst_total_len, NULL, NULL); +} - /* some templates have no input data but they will - * touch input - */ - input = xbuf[0]; - input += align_offset; - assoc = axbuf[0]; +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS +static char *generate_random_sgl_divisions(struct test_sg_division *divs, + size_t max_divs, char *p, char *end, + bool gen_flushes) +{ + struct test_sg_division *div = divs; + unsigned int remaining = TEST_SG_TOTAL; - ret = -EINVAL; - if (WARN_ON(align_offset + template[i].ilen > - PAGE_SIZE || template[i].alen > PAGE_SIZE)) - goto out; + do { + unsigned int this_len; - memcpy(input, template[i].input, template[i].ilen); - memcpy(assoc, template[i].assoc, template[i].alen); - if (template[i].iv) - memcpy(iv, template[i].iv, iv_len); + if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0) + this_len = remaining; else - memset(iv, 0, iv_len); + this_len = 1 + (prandom_u32() % remaining); + div->proportion_of_total = this_len; - crypto_aead_clear_flags(tfm, ~0); - if (template[i].wk) - crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); - - if (template[i].klen > MAX_KEYLEN) { - pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n", - d, j, algo, template[i].klen, - MAX_KEYLEN); - ret = -EINVAL; - goto out; + if (prandom_u32() % 4 == 0) + div->offset = (PAGE_SIZE - 128) + (prandom_u32() % 128); + else if (prandom_u32() % 2 == 0) + div->offset = prandom_u32() % 32; + else + div->offset = prandom_u32() % PAGE_SIZE; + if (prandom_u32() % 8 == 0) + div->offset_relative_to_alignmask = true; + + div->flush_type = FLUSH_TYPE_NONE; + if (gen_flushes) { + switch (prandom_u32() % 4) { + case 0: + div->flush_type = FLUSH_TYPE_REIMPORT; + break; + case 1: + div->flush_type = FLUSH_TYPE_FLUSH; + break; + } } - memcpy(key, template[i].key, template[i].klen); - ret = crypto_aead_setkey(tfm, key, template[i].klen); - if (template[i].fail == !ret) { - pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n", - d, j, algo, crypto_aead_get_flags(tfm)); - goto out; - } else if (ret) - continue; + BUILD_BUG_ON(TEST_SG_TOTAL != 10000); /* for "%u.%u%%" */ + p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s", + div->flush_type == FLUSH_TYPE_NONE ? "" : + div->flush_type == FLUSH_TYPE_FLUSH ? + "<flush> " : "<reimport> ", + this_len / 100, this_len % 100, + div->offset_relative_to_alignmask ? + "alignmask" : "", + div->offset, this_len == remaining ? "" : ", "); + remaining -= this_len; + div++; + } while (remaining); + + return p; +} - authsize = abs(template[i].rlen - template[i].ilen); - ret = crypto_aead_setauthsize(tfm, authsize); - if (ret) { - pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n", - d, authsize, j, algo); - goto out; - } +/* Generate a random testvec_config for fuzz testing */ +static void generate_random_testvec_config(struct testvec_config *cfg, + char *name, size_t max_namelen) +{ + char *p = name; + char * const end = name + max_namelen; - k = !!template[i].alen; - sg_init_table(sg, k + 1); - sg_set_buf(&sg[0], assoc, template[i].alen); - sg_set_buf(&sg[k], input, - template[i].ilen + (enc ? authsize : 0)); - output = input; + memset(cfg, 0, sizeof(*cfg)); - if (diff_dst) { - sg_init_table(sgout, k + 1); - sg_set_buf(&sgout[0], assoc, template[i].alen); + cfg->name = name; - output = xoutbuf[0]; - output += align_offset; - sg_set_buf(&sgout[k], output, - template[i].rlen + (enc ? 0 : authsize)); - } + p += scnprintf(p, end - p, "random:"); - aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, - template[i].ilen, iv); + if (prandom_u32() % 2 == 0) { + cfg->inplace = true; + p += scnprintf(p, end - p, " inplace"); + } - aead_request_set_ad(req, template[i].alen); + if (prandom_u32() % 2 == 0) { + cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP; + p += scnprintf(p, end - p, " may_sleep"); + } - ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) - : crypto_aead_decrypt(req), &wait); + switch (prandom_u32() % 4) { + case 0: + cfg->finalization_type = FINALIZATION_TYPE_FINAL; + p += scnprintf(p, end - p, " use_final"); + break; + case 1: + cfg->finalization_type = FINALIZATION_TYPE_FINUP; + p += scnprintf(p, end - p, " use_finup"); + break; + default: + cfg->finalization_type = FINALIZATION_TYPE_DIGEST; + p += scnprintf(p, end - p, " use_digest"); + break; + } - switch (ret) { - case 0: - if (template[i].novrfy) { - /* verification was supposed to fail */ - pr_err("alg: aead%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n", - d, e, j, algo); - /* so really, we got a bad message */ - ret = -EBADMSG; - goto out; - } - break; - case -EBADMSG: - if (template[i].novrfy) - /* verification failure was expected */ - continue; - /* fall through */ - default: - pr_err("alg: aead%s: %s failed on test %d for %s: ret=%d\n", - d, e, j, algo, -ret); - goto out; - } + p += scnprintf(p, end - p, " src_divs=["); + p = generate_random_sgl_divisions(cfg->src_divs, + ARRAY_SIZE(cfg->src_divs), p, end, + (cfg->finalization_type != + FINALIZATION_TYPE_DIGEST)); + p += scnprintf(p, end - p, "]"); - q = output; - if (memcmp(q, template[i].result, template[i].rlen)) { - pr_err("alg: aead%s: Test %d failed on %s for %s\n", - d, j, e, algo); - hexdump(q, template[i].rlen); - ret = -EINVAL; - goto out; - } + if (!cfg->inplace && prandom_u32() % 2 == 0) { + p += scnprintf(p, end - p, " dst_divs=["); + p = generate_random_sgl_divisions(cfg->dst_divs, + ARRAY_SIZE(cfg->dst_divs), + p, end, false); + p += scnprintf(p, end - p, "]"); } - for (i = 0, j = 0; i < tcount; i++) { - /* alignment tests are only done with continuous buffers */ - if (align_offset != 0) - break; + if (prandom_u32() % 2 == 0) { + cfg->iv_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK); + p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset); + } - if (!template[i].np) - continue; + WARN_ON_ONCE(!valid_testvec_config(cfg)); +} +#endif /* CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ - j++; +static int check_nonfinal_hash_op(const char *op, int err, + u8 *result, unsigned int digestsize, + const char *driver, unsigned int vec_num, + const struct testvec_config *cfg) +{ + if (err) { + pr_err("alg: hash: %s %s() failed with err %d on test vector %u, cfg=\"%s\"\n", + driver, op, err, vec_num, cfg->name); + return err; + } + if (!testmgr_is_poison(result, digestsize)) { + pr_err("alg: hash: %s %s() used result buffer on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return -EINVAL; + } + return 0; +} - if (template[i].iv) - memcpy(iv, template[i].iv, iv_len); - else - memset(iv, 0, MAX_IVLEN); +static int test_hash_vec_cfg(const char *driver, + const struct hash_testvec *vec, + unsigned int vec_num, + const struct testvec_config *cfg, + struct ahash_request *req, + struct test_sglist *tsgl, + u8 *hashstate) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + const unsigned int alignmask = crypto_ahash_alignmask(tfm); + const unsigned int digestsize = crypto_ahash_digestsize(tfm); + const unsigned int statesize = crypto_ahash_statesize(tfm); + const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags; + const struct test_sg_division *divs[XBUFSIZE]; + DECLARE_CRYPTO_WAIT(wait); + struct kvec _input; + struct iov_iter input; + unsigned int i; + struct scatterlist *pending_sgl; + unsigned int pending_len; + u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN]; + int err; - crypto_aead_clear_flags(tfm, ~0); - if (template[i].wk) - crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); - if (template[i].klen > MAX_KEYLEN) { - pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n", - d, j, algo, template[i].klen, MAX_KEYLEN); - ret = -EINVAL; - goto out; + /* Set the key, if specified */ + if (vec->ksize) { + err = crypto_ahash_setkey(tfm, vec->key, vec->ksize); + if (err) { + pr_err("alg: hash: %s setkey failed with err %d on test vector %u; flags=%#x\n", + driver, err, vec_num, + crypto_ahash_get_flags(tfm)); + return err; } - memcpy(key, template[i].key, template[i].klen); - - ret = crypto_aead_setkey(tfm, key, template[i].klen); - if (template[i].fail == !ret) { - pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n", - d, j, algo, crypto_aead_get_flags(tfm)); - goto out; - } else if (ret) - continue; + } - authsize = abs(template[i].rlen - template[i].ilen); + /* Build the scatterlist for the source data */ + _input.iov_base = (void *)vec->plaintext; + _input.iov_len = vec->psize; + iov_iter_kvec(&input, WRITE, &_input, 1, vec->psize); + err = build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize, + &input, divs); + if (err) { + pr_err("alg: hash: %s: error preparing scatterlist for test vector %u, cfg=\"%s\"\n", + driver, vec_num, cfg->name); + return err; + } - ret = -EINVAL; - sg_init_table(sg, template[i].anp + template[i].np); - if (diff_dst) - sg_init_table(sgout, template[i].anp + template[i].np); + /* Do the actual hashing */ - ret = -EINVAL; - for (k = 0, temp = 0; k < template[i].anp; k++) { - if (WARN_ON(offset_in_page(IDX[k]) + - template[i].atap[k] > PAGE_SIZE)) - goto out; - sg_set_buf(&sg[k], - memcpy(axbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]), - template[i].assoc + temp, - template[i].atap[k]), - template[i].atap[k]); - if (diff_dst) - sg_set_buf(&sgout[k], - axbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]), - template[i].atap[k]); - temp += template[i].atap[k]; - } - - for (k = 0, temp = 0; k < template[i].np; k++) { - if (WARN_ON(offset_in_page(IDX[k]) + - template[i].tap[k] > PAGE_SIZE)) - goto out; + testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm)); + testmgr_poison(result, digestsize + TESTMGR_POISON_LEN); - q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]); - memcpy(q, template[i].input + temp, template[i].tap[k]); - sg_set_buf(&sg[template[i].anp + k], - q, template[i].tap[k]); + if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST) { + /* Just using digest() */ + ahash_request_set_callback(req, req_flags, crypto_req_done, + &wait); + ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize); + err = crypto_wait_req(crypto_ahash_digest(req), &wait); + if (err) { + pr_err("alg: hash: %s digest() failed with err %d on test vector %u, cfg=\"%s\"\n", + driver, err, vec_num, cfg->name); + return err; + } + goto result_ready; + } - if (diff_dst) { - q = xoutbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]); + /* Using init(), zero or more update(), then final() or finup() */ - memset(q, 0, template[i].tap[k]); + ahash_request_set_callback(req, req_flags, crypto_req_done, &wait); + ahash_request_set_crypt(req, NULL, result, 0); + err = crypto_wait_req(crypto_ahash_init(req), &wait); + err = check_nonfinal_hash_op("init", err, result, digestsize, + driver, vec_num, cfg); + if (err) + return err; - sg_set_buf(&sgout[template[i].anp + k], - q, template[i].tap[k]); + pending_sgl = NULL; + pending_len = 0; + for (i = 0; i < tsgl->nents; i++) { + if (divs[i]->flush_type != FLUSH_TYPE_NONE && + pending_sgl != NULL) { + /* update() with the pending data */ + ahash_request_set_callback(req, req_flags, + crypto_req_done, &wait); + ahash_request_set_crypt(req, pending_sgl, result, + pending_len); + err = crypto_wait_req(crypto_ahash_update(req), &wait); + err = check_nonfinal_hash_op("update", err, + result, digestsize, + driver, vec_num, cfg); + if (err) + return err; + pending_sgl = NULL; + pending_len = 0; + } + if (divs[i]->flush_type == FLUSH_TYPE_REIMPORT) { + /* Test ->export() and ->import() */ + testmgr_poison(hashstate + statesize, + TESTMGR_POISON_LEN); + err = crypto_ahash_export(req, hashstate); + err = check_nonfinal_hash_op("export", err, + result, digestsize, + driver, vec_num, cfg); + if (err) + return err; + if (!testmgr_is_poison(hashstate + statesize, + TESTMGR_POISON_LEN)) { + pr_err("alg: hash: %s export() overran state buffer on test vector %u, cfg=\"%s\"\n", + driver, vec_num, cfg->name); + return -EOVERFLOW; } - n = template[i].tap[k]; - if (k == template[i].np - 1 && enc) - n += authsize; - if (offset_in_page(q) + n < PAGE_SIZE) - q[n] = 0; - - temp += template[i].tap[k]; + testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm)); + err = crypto_ahash_import(req, hashstate); + err = check_nonfinal_hash_op("import", err, + result, digestsize, + driver, vec_num, cfg); + if (err) + return err; + } + if (pending_sgl == NULL) + pending_sgl = &tsgl->sgl[i]; + pending_len += tsgl->sgl[i].length; + } + + ahash_request_set_callback(req, req_flags, crypto_req_done, &wait); + ahash_request_set_crypt(req, pending_sgl, result, pending_len); + if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) { + /* finish with update() and final() */ + err = crypto_wait_req(crypto_ahash_update(req), &wait); + err = check_nonfinal_hash_op("update", err, result, digestsize, + driver, vec_num, cfg); + if (err) + return err; + err = crypto_wait_req(crypto_ahash_final(req), &wait); + if (err) { + pr_err("alg: hash: %s final() failed with err %d on test vector %u, cfg=\"%s\"\n", + driver, err, vec_num, cfg->name); + return err; } - - ret = crypto_aead_setauthsize(tfm, authsize); - if (ret) { - pr_err("alg: aead%s: Failed to set authsize to %u on chunk test %d for %s\n", - d, authsize, j, algo); - goto out; + } else { + /* finish with finup() */ + err = crypto_wait_req(crypto_ahash_finup(req), &wait); + if (err) { + pr_err("alg: hash: %s finup() failed with err %d on test vector %u, cfg=\"%s\"\n", + driver, err, vec_num, cfg->name); + return err; } + } - if (enc) { - if (WARN_ON(sg[template[i].anp + k - 1].offset + - sg[template[i].anp + k - 1].length + - authsize > PAGE_SIZE)) { - ret = -EINVAL; - goto out; - } +result_ready: + /* Check that the algorithm produced the correct digest */ + if (memcmp(result, vec->digest, digestsize) != 0) { + pr_err("alg: hash: %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n", + driver, vec_num, cfg->name); + return -EINVAL; + } + if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) { + pr_err("alg: hash: %s overran result buffer on test vector %u, cfg=\"%s\"\n", + driver, vec_num, cfg->name); + return -EOVERFLOW; + } + + return 0; +} + +static int test_hash_vec(const char *driver, const struct hash_testvec *vec, + unsigned int vec_num, struct ahash_request *req, + struct test_sglist *tsgl, u8 *hashstate) +{ + unsigned int i; + int err; + + for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) { + err = test_hash_vec_cfg(driver, vec, vec_num, + &default_hash_testvec_configs[i], + req, tsgl, hashstate); + if (err) + return err; + } + +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS + if (!noextratests) { + struct testvec_config cfg; + char cfgname[TESTVEC_CONFIG_NAMELEN]; - if (diff_dst) - sgout[template[i].anp + k - 1].length += - authsize; - sg[template[i].anp + k - 1].length += authsize; + for (i = 0; i < fuzz_iterations; i++) { + generate_random_testvec_config(&cfg, cfgname, + sizeof(cfgname)); + err = test_hash_vec_cfg(driver, vec, vec_num, &cfg, + req, tsgl, hashstate); + if (err) + return err; } + } +#endif + return 0; +} - aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, - template[i].ilen, - iv); +static int __alg_test_hash(const struct hash_testvec *vecs, + unsigned int num_vecs, const char *driver, + u32 type, u32 mask) +{ + struct crypto_ahash *tfm; + struct ahash_request *req = NULL; + struct test_sglist *tsgl = NULL; + u8 *hashstate = NULL; + unsigned int i; + int err; - aead_request_set_ad(req, template[i].alen); + tfm = crypto_alloc_ahash(driver, type, mask); + if (IS_ERR(tfm)) { + pr_err("alg: hash: failed to allocate transform for %s: %ld\n", + driver, PTR_ERR(tfm)); + return PTR_ERR(tfm); + } - ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) - : crypto_aead_decrypt(req), &wait); + req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req) { + pr_err("alg: hash: failed to allocate request for %s\n", + driver); + err = -ENOMEM; + goto out; + } - switch (ret) { - case 0: - if (template[i].novrfy) { - /* verification was supposed to fail */ - pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret was 0, expected -EBADMSG\n", - d, e, j, algo); - /* so really, we got a bad message */ - ret = -EBADMSG; - goto out; - } - break; - case -EBADMSG: - if (template[i].novrfy) - /* verification failure was expected */ - continue; - /* fall through */ - default: - pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret=%d\n", - d, e, j, algo, -ret); + tsgl = kmalloc(sizeof(*tsgl), GFP_KERNEL); + if (!tsgl || init_test_sglist(tsgl) != 0) { + pr_err("alg: hash: failed to allocate test buffers for %s\n", + driver); + kfree(tsgl); + tsgl = NULL; + err = -ENOMEM; + goto out; + } + + hashstate = kmalloc(crypto_ahash_statesize(tfm) + TESTMGR_POISON_LEN, + GFP_KERNEL); + if (!hashstate) { + pr_err("alg: hash: failed to allocate hash state buffer for %s\n", + driver); + err = -ENOMEM; + goto out; + } + + for (i = 0; i < num_vecs; i++) { + err = test_hash_vec(driver, &vecs[i], i, req, tsgl, hashstate); + if (err) goto out; + } + err = 0; +out: + kfree(hashstate); + if (tsgl) { + destroy_test_sglist(tsgl); + kfree(tsgl); + } + ahash_request_free(req); + crypto_free_ahash(tfm); + return err; +} + +static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, + u32 type, u32 mask) +{ + const struct hash_testvec *template = desc->suite.hash.vecs; + unsigned int tcount = desc->suite.hash.count; + unsigned int nr_unkeyed, nr_keyed; + int err; + + /* + * For OPTIONAL_KEY algorithms, we have to do all the unkeyed tests + * first, before setting a key on the tfm. To make this easier, we + * require that the unkeyed test vectors (if any) are listed first. + */ + + for (nr_unkeyed = 0; nr_unkeyed < tcount; nr_unkeyed++) { + if (template[nr_unkeyed].ksize) + break; + } + for (nr_keyed = 0; nr_unkeyed + nr_keyed < tcount; nr_keyed++) { + if (!template[nr_unkeyed + nr_keyed].ksize) { + pr_err("alg: hash: test vectors for %s out of order, " + "unkeyed ones must come first\n", desc->alg); + return -EINVAL; } + } - ret = -EINVAL; - for (k = 0, temp = 0; k < template[i].np; k++) { - if (diff_dst) - q = xoutbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]); - else - q = xbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]); + err = 0; + if (nr_unkeyed) { + err = __alg_test_hash(template, nr_unkeyed, driver, type, mask); + template += nr_unkeyed; + } - n = template[i].tap[k]; - if (k == template[i].np - 1) - n += enc ? authsize : -authsize; + if (!err && nr_keyed) + err = __alg_test_hash(template, nr_keyed, driver, type, mask); - if (memcmp(q, template[i].result + temp, n)) { - pr_err("alg: aead%s: Chunk test %d failed on %s at page %u for %s\n", - d, j, e, k, algo); - hexdump(q, n); - goto out; - } + return err; +} - q += n; - if (k == template[i].np - 1 && !enc) { - if (!diff_dst && - memcmp(q, template[i].input + - temp + n, authsize)) - n = authsize; - else - n = 0; - } else { - for (n = 0; offset_in_page(q + n) && q[n]; n++) - ; - } - if (n) { - pr_err("alg: aead%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n", - d, j, e, k, algo, n); - hexdump(q, n); - goto out; - } +static int test_aead_vec_cfg(const char *driver, int enc, + const struct aead_testvec *vec, + unsigned int vec_num, + const struct testvec_config *cfg, + struct aead_request *req, + struct cipher_test_sglists *tsgls) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + const unsigned int alignmask = crypto_aead_alignmask(tfm); + const unsigned int ivsize = crypto_aead_ivsize(tfm); + const unsigned int authsize = vec->clen - vec->plen; + const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags; + const char *op = enc ? "encryption" : "decryption"; + DECLARE_CRYPTO_WAIT(wait); + u8 _iv[3 * (MAX_ALGAPI_ALIGNMASK + 1) + MAX_IVLEN]; + u8 *iv = PTR_ALIGN(&_iv[0], 2 * (MAX_ALGAPI_ALIGNMASK + 1)) + + cfg->iv_offset + + (cfg->iv_offset_relative_to_alignmask ? alignmask : 0); + struct kvec input[2]; + int err; - temp += template[i].tap[k]; - } + /* Set the key */ + if (vec->wk) + crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); + else + crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); + err = crypto_aead_setkey(tfm, vec->key, vec->klen); + if (err) { + if (vec->fail) /* expectedly failed to set key? */ + return 0; + pr_err("alg: aead: %s setkey failed with err %d on test vector %u; flags=%#x\n", + driver, err, vec_num, crypto_aead_get_flags(tfm)); + return err; + } + if (vec->fail) { + pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %u\n", + driver, vec_num); + return -EINVAL; } - ret = 0; + /* Set the authentication tag size */ + err = crypto_aead_setauthsize(tfm, authsize); + if (err) { + pr_err("alg: aead: %s setauthsize failed with err %d on test vector %u\n", + driver, err, vec_num); + return err; + } -out: - aead_request_free(req); - kfree(sg); -out_nosg: - if (diff_dst) - testmgr_free_buf(xoutbuf); -out_nooutbuf: - testmgr_free_buf(axbuf); -out_noaxbuf: - testmgr_free_buf(xbuf); -out_noxbuf: - kfree(key); - kfree(iv); - return ret; + /* The IV must be copied to a buffer, as the algorithm may modify it */ + if (WARN_ON(ivsize > MAX_IVLEN)) + return -EINVAL; + if (vec->iv) + memcpy(iv, vec->iv, ivsize); + else + memset(iv, 0, ivsize); + + /* Build the src/dst scatterlists */ + input[0].iov_base = (void *)vec->assoc; + input[0].iov_len = vec->alen; + input[1].iov_base = enc ? (void *)vec->ptext : (void *)vec->ctext; + input[1].iov_len = enc ? vec->plen : vec->clen; + err = build_cipher_test_sglists(tsgls, cfg, alignmask, + vec->alen + (enc ? vec->plen : + vec->clen), + vec->alen + (enc ? vec->clen : + vec->plen), + input, 2); + if (err) { + pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return err; + } + + /* Do the actual encryption or decryption */ + testmgr_poison(req->__ctx, crypto_aead_reqsize(tfm)); + aead_request_set_callback(req, req_flags, crypto_req_done, &wait); + aead_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr, + enc ? vec->plen : vec->clen, iv); + aead_request_set_ad(req, vec->alen); + err = crypto_wait_req(enc ? crypto_aead_encrypt(req) : + crypto_aead_decrypt(req), &wait); + + aead_request_set_tfm(req, tfm); /* TODO: get rid of this */ + + if (err) { + if (err == -EBADMSG && vec->novrfy) + return 0; + pr_err("alg: aead: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n", + driver, op, err, vec_num, cfg->name); + return err; + } + if (vec->novrfy) { + pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return -EINVAL; + } + + /* Check that the algorithm didn't overwrite things it shouldn't have */ + if (req->cryptlen != (enc ? vec->plen : vec->clen) || + req->assoclen != vec->alen || + req->iv != iv || + req->src != tsgls->src.sgl_ptr || + req->dst != tsgls->dst.sgl_ptr || + crypto_aead_reqtfm(req) != tfm || + req->base.complete != crypto_req_done || + req->base.flags != req_flags || + req->base.data != &wait) { + pr_err("alg: aead: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + if (req->cryptlen != (enc ? vec->plen : vec->clen)) + pr_err("alg: aead: changed 'req->cryptlen'\n"); + if (req->assoclen != vec->alen) + pr_err("alg: aead: changed 'req->assoclen'\n"); + if (req->iv != iv) + pr_err("alg: aead: changed 'req->iv'\n"); + if (req->src != tsgls->src.sgl_ptr) + pr_err("alg: aead: changed 'req->src'\n"); + if (req->dst != tsgls->dst.sgl_ptr) + pr_err("alg: aead: changed 'req->dst'\n"); + if (crypto_aead_reqtfm(req) != tfm) + pr_err("alg: aead: changed 'req->base.tfm'\n"); + if (req->base.complete != crypto_req_done) + pr_err("alg: aead: changed 'req->base.complete'\n"); + if (req->base.flags != req_flags) + pr_err("alg: aead: changed 'req->base.flags'\n"); + if (req->base.data != &wait) + pr_err("alg: aead: changed 'req->base.data'\n"); + return -EINVAL; + } + if (is_test_sglist_corrupted(&tsgls->src)) { + pr_err("alg: aead: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return -EINVAL; + } + if (tsgls->dst.sgl_ptr != tsgls->src.sgl && + is_test_sglist_corrupted(&tsgls->dst)) { + pr_err("alg: aead: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return -EINVAL; + } + + /* Check for the correct output (ciphertext or plaintext) */ + err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext, + enc ? vec->clen : vec->plen, + vec->alen, enc || !cfg->inplace); + if (err == -EOVERFLOW) { + pr_err("alg: aead: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return err; + } + if (err) { + pr_err("alg: aead: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return err; + } + + return 0; } -static int test_aead(struct crypto_aead *tfm, int enc, - const struct aead_testvec *template, unsigned int tcount) +static int test_aead_vec(const char *driver, int enc, + const struct aead_testvec *vec, unsigned int vec_num, + struct aead_request *req, + struct cipher_test_sglists *tsgls) { - unsigned int alignmask; - int ret; + unsigned int i; + int err; - /* test 'dst == src' case */ - ret = __test_aead(tfm, enc, template, tcount, false, 0); - if (ret) - return ret; + if (enc && vec->novrfy) + return 0; - /* test 'dst != src' case */ - ret = __test_aead(tfm, enc, template, tcount, true, 0); - if (ret) - return ret; + for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) { + err = test_aead_vec_cfg(driver, enc, vec, vec_num, + &default_cipher_testvec_configs[i], + req, tsgls); + if (err) + return err; + } - /* test unaligned buffers, check with one byte offset */ - ret = __test_aead(tfm, enc, template, tcount, true, 1); - if (ret) - return ret; +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS + if (!noextratests) { + struct testvec_config cfg; + char cfgname[TESTVEC_CONFIG_NAMELEN]; - alignmask = crypto_tfm_alg_alignmask(&tfm->base); - if (alignmask) { - /* Check if alignment mask for tfm is correctly set. */ - ret = __test_aead(tfm, enc, template, tcount, true, - alignmask + 1); - if (ret) - return ret; + for (i = 0; i < fuzz_iterations; i++) { + generate_random_testvec_config(&cfg, cfgname, + sizeof(cfgname)); + err = test_aead_vec_cfg(driver, enc, vec, vec_num, + &cfg, req, tsgls); + if (err) + return err; + } } +#endif + return 0; +} +static int test_aead(const char *driver, int enc, + const struct aead_test_suite *suite, + struct aead_request *req, + struct cipher_test_sglists *tsgls) +{ + unsigned int i; + int err; + + for (i = 0; i < suite->count; i++) { + err = test_aead_vec(driver, enc, &suite->vecs[i], i, req, + tsgls); + if (err) + return err; + } return 0; } +static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, + u32 type, u32 mask) +{ + const struct aead_test_suite *suite = &desc->suite.aead; + struct crypto_aead *tfm; + struct aead_request *req = NULL; + struct cipher_test_sglists *tsgls = NULL; + int err; + + if (suite->count <= 0) { + pr_err("alg: aead: empty test suite for %s\n", driver); + return -EINVAL; + } + + tfm = crypto_alloc_aead(driver, type, mask); + if (IS_ERR(tfm)) { + pr_err("alg: aead: failed to allocate transform for %s: %ld\n", + driver, PTR_ERR(tfm)); + return PTR_ERR(tfm); + } + + req = aead_request_alloc(tfm, GFP_KERNEL); + if (!req) { + pr_err("alg: aead: failed to allocate request for %s\n", + driver); + err = -ENOMEM; + goto out; + } + + tsgls = alloc_cipher_test_sglists(); + if (!tsgls) { + pr_err("alg: aead: failed to allocate test buffers for %s\n", + driver); + err = -ENOMEM; + goto out; + } + + err = test_aead(driver, ENCRYPT, suite, req, tsgls); + if (err) + goto out; + + err = test_aead(driver, DECRYPT, suite, req, tsgls); +out: + free_cipher_test_sglists(tsgls); + aead_request_free(req); + crypto_free_aead(tfm); + return err; +} + static int test_cipher(struct crypto_cipher *tfm, int enc, const struct cipher_testvec *template, unsigned int tcount) @@ -1037,8 +1441,6 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, j = 0; for (i = 0; i < tcount; i++) { - if (template[i].np) - continue; if (fips_enabled && template[i].fips_skip) continue; @@ -1056,7 +1458,7 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, crypto_cipher_clear_flags(tfm, ~0); if (template[i].wk) - crypto_cipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); + crypto_cipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); ret = crypto_cipher_setkey(tfm, template[i].key, template[i].klen); @@ -1092,288 +1494,261 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, out: testmgr_free_buf(xbuf); -out_nobuf: - return ret; -} - -static int __test_skcipher(struct crypto_skcipher *tfm, int enc, - const struct cipher_testvec *template, - unsigned int tcount, - const bool diff_dst, const int align_offset) -{ - const char *algo = - crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); - unsigned int i, j, k, n, temp; - char *q; - struct skcipher_request *req; - struct scatterlist sg[8]; - struct scatterlist sgout[8]; - const char *e, *d; - struct crypto_wait wait; - const char *input, *result; - void *data; - char iv[MAX_IVLEN]; - char *xbuf[XBUFSIZE]; - char *xoutbuf[XBUFSIZE]; - int ret = -ENOMEM; - unsigned int ivsize = crypto_skcipher_ivsize(tfm); - - if (testmgr_alloc_buf(xbuf)) - goto out_nobuf; - - if (diff_dst && testmgr_alloc_buf(xoutbuf)) - goto out_nooutbuf; - - if (diff_dst) - d = "-ddst"; - else - d = ""; - - if (enc == ENCRYPT) - e = "encryption"; - else - e = "decryption"; - - crypto_init_wait(&wait); - - req = skcipher_request_alloc(tfm, GFP_KERNEL); - if (!req) { - pr_err("alg: skcipher%s: Failed to allocate request for %s\n", - d, algo); - goto out; - } - - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &wait); - - j = 0; - for (i = 0; i < tcount; i++) { - if (template[i].np && !template[i].also_non_np) - continue; - - if (fips_enabled && template[i].fips_skip) - continue; - - if (template[i].iv && !(template[i].generates_iv && enc)) - memcpy(iv, template[i].iv, ivsize); - else - memset(iv, 0, MAX_IVLEN); - - input = enc ? template[i].ptext : template[i].ctext; - result = enc ? template[i].ctext : template[i].ptext; - j++; - ret = -EINVAL; - if (WARN_ON(align_offset + template[i].len > PAGE_SIZE)) - goto out; - - data = xbuf[0]; - data += align_offset; - memcpy(data, input, template[i].len); - - crypto_skcipher_clear_flags(tfm, ~0); - if (template[i].wk) - crypto_skcipher_set_flags(tfm, - CRYPTO_TFM_REQ_WEAK_KEY); - - ret = crypto_skcipher_setkey(tfm, template[i].key, - template[i].klen); - if (template[i].fail == !ret) { - pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n", - d, j, algo, crypto_skcipher_get_flags(tfm)); - goto out; - } else if (ret) - continue; - - sg_init_one(&sg[0], data, template[i].len); - if (diff_dst) { - data = xoutbuf[0]; - data += align_offset; - sg_init_one(&sgout[0], data, template[i].len); - } - - skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, - template[i].len, iv); - ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : - crypto_skcipher_decrypt(req), &wait); - - if (ret) { - pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n", - d, e, j, algo, -ret); - goto out; - } - - q = data; - if (memcmp(q, result, template[i].len)) { - pr_err("alg: skcipher%s: Test %d failed (invalid result) on %s for %s\n", - d, j, e, algo); - hexdump(q, template[i].len); - ret = -EINVAL; - goto out; - } - - if (template[i].generates_iv && enc && - memcmp(iv, template[i].iv, crypto_skcipher_ivsize(tfm))) { - pr_err("alg: skcipher%s: Test %d failed (invalid output IV) on %s for %s\n", - d, j, e, algo); - hexdump(iv, crypto_skcipher_ivsize(tfm)); - ret = -EINVAL; - goto out; - } - } - - j = 0; - for (i = 0; i < tcount; i++) { - /* alignment tests are only done with continuous buffers */ - if (align_offset != 0) - break; - - if (!template[i].np) - continue; - - if (fips_enabled && template[i].fips_skip) - continue; - - if (template[i].iv && !(template[i].generates_iv && enc)) - memcpy(iv, template[i].iv, ivsize); - else - memset(iv, 0, MAX_IVLEN); - - input = enc ? template[i].ptext : template[i].ctext; - result = enc ? template[i].ctext : template[i].ptext; - j++; - crypto_skcipher_clear_flags(tfm, ~0); - if (template[i].wk) - crypto_skcipher_set_flags(tfm, - CRYPTO_TFM_REQ_WEAK_KEY); - - ret = crypto_skcipher_setkey(tfm, template[i].key, - template[i].klen); - if (template[i].fail == !ret) { - pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n", - d, j, algo, crypto_skcipher_get_flags(tfm)); - goto out; - } else if (ret) - continue; - - temp = 0; - ret = -EINVAL; - sg_init_table(sg, template[i].np); - if (diff_dst) - sg_init_table(sgout, template[i].np); - for (k = 0; k < template[i].np; k++) { - if (WARN_ON(offset_in_page(IDX[k]) + - template[i].tap[k] > PAGE_SIZE)) - goto out; +out_nobuf: + return ret; +} - q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]); +static int test_skcipher_vec_cfg(const char *driver, int enc, + const struct cipher_testvec *vec, + unsigned int vec_num, + const struct testvec_config *cfg, + struct skcipher_request *req, + struct cipher_test_sglists *tsgls) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const unsigned int alignmask = crypto_skcipher_alignmask(tfm); + const unsigned int ivsize = crypto_skcipher_ivsize(tfm); + const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags; + const char *op = enc ? "encryption" : "decryption"; + DECLARE_CRYPTO_WAIT(wait); + u8 _iv[3 * (MAX_ALGAPI_ALIGNMASK + 1) + MAX_IVLEN]; + u8 *iv = PTR_ALIGN(&_iv[0], 2 * (MAX_ALGAPI_ALIGNMASK + 1)) + + cfg->iv_offset + + (cfg->iv_offset_relative_to_alignmask ? alignmask : 0); + struct kvec input; + int err; - memcpy(q, input + temp, template[i].tap[k]); + /* Set the key */ + if (vec->wk) + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); + else + crypto_skcipher_clear_flags(tfm, + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); + err = crypto_skcipher_setkey(tfm, vec->key, vec->klen); + if (err) { + if (vec->fail) /* expectedly failed to set key? */ + return 0; + pr_err("alg: skcipher: %s setkey failed with err %d on test vector %u; flags=%#x\n", + driver, err, vec_num, crypto_skcipher_get_flags(tfm)); + return err; + } + if (vec->fail) { + pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %u\n", + driver, vec_num); + return -EINVAL; + } - if (offset_in_page(q) + template[i].tap[k] < PAGE_SIZE) - q[template[i].tap[k]] = 0; + /* The IV must be copied to a buffer, as the algorithm may modify it */ + if (ivsize) { + if (WARN_ON(ivsize > MAX_IVLEN)) + return -EINVAL; + if (vec->generates_iv && !enc) + memcpy(iv, vec->iv_out, ivsize); + else if (vec->iv) + memcpy(iv, vec->iv, ivsize); + else + memset(iv, 0, ivsize); + } else { + if (vec->generates_iv) { + pr_err("alg: skcipher: %s has ivsize=0 but test vector %u generates IV!\n", + driver, vec_num); + return -EINVAL; + } + iv = NULL; + } - sg_set_buf(&sg[k], q, template[i].tap[k]); - if (diff_dst) { - q = xoutbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]); + /* Build the src/dst scatterlists */ + input.iov_base = enc ? (void *)vec->ptext : (void *)vec->ctext; + input.iov_len = vec->len; + err = build_cipher_test_sglists(tsgls, cfg, alignmask, + vec->len, vec->len, &input, 1); + if (err) { + pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return err; + } - sg_set_buf(&sgout[k], q, template[i].tap[k]); + /* Do the actual encryption or decryption */ + testmgr_poison(req->__ctx, crypto_skcipher_reqsize(tfm)); + skcipher_request_set_callback(req, req_flags, crypto_req_done, &wait); + skcipher_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr, + vec->len, iv); + err = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : + crypto_skcipher_decrypt(req), &wait); + if (err) { + pr_err("alg: skcipher: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n", + driver, op, err, vec_num, cfg->name); + return err; + } - memset(q, 0, template[i].tap[k]); - if (offset_in_page(q) + - template[i].tap[k] < PAGE_SIZE) - q[template[i].tap[k]] = 0; - } + /* Check that the algorithm didn't overwrite things it shouldn't have */ + if (req->cryptlen != vec->len || + req->iv != iv || + req->src != tsgls->src.sgl_ptr || + req->dst != tsgls->dst.sgl_ptr || + crypto_skcipher_reqtfm(req) != tfm || + req->base.complete != crypto_req_done || + req->base.flags != req_flags || + req->base.data != &wait) { + pr_err("alg: skcipher: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + if (req->cryptlen != vec->len) + pr_err("alg: skcipher: changed 'req->cryptlen'\n"); + if (req->iv != iv) + pr_err("alg: skcipher: changed 'req->iv'\n"); + if (req->src != tsgls->src.sgl_ptr) + pr_err("alg: skcipher: changed 'req->src'\n"); + if (req->dst != tsgls->dst.sgl_ptr) + pr_err("alg: skcipher: changed 'req->dst'\n"); + if (crypto_skcipher_reqtfm(req) != tfm) + pr_err("alg: skcipher: changed 'req->base.tfm'\n"); + if (req->base.complete != crypto_req_done) + pr_err("alg: skcipher: changed 'req->base.complete'\n"); + if (req->base.flags != req_flags) + pr_err("alg: skcipher: changed 'req->base.flags'\n"); + if (req->base.data != &wait) + pr_err("alg: skcipher: changed 'req->base.data'\n"); + return -EINVAL; + } + if (is_test_sglist_corrupted(&tsgls->src)) { + pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return -EINVAL; + } + if (tsgls->dst.sgl_ptr != tsgls->src.sgl && + is_test_sglist_corrupted(&tsgls->dst)) { + pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return -EINVAL; + } + + /* Check for the correct output (ciphertext or plaintext) */ + err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext, + vec->len, 0, true); + if (err == -EOVERFLOW) { + pr_err("alg: skcipher: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return err; + } + if (err) { + pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return err; + } - temp += template[i].tap[k]; - } + /* If applicable, check that the algorithm generated the correct IV */ + if (vec->iv_out && memcmp(iv, vec->iv_out, ivsize) != 0) { + pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + hexdump(iv, ivsize); + return -EINVAL; + } - skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, - template[i].len, iv); + return 0; +} - ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : - crypto_skcipher_decrypt(req), &wait); +static int test_skcipher_vec(const char *driver, int enc, + const struct cipher_testvec *vec, + unsigned int vec_num, + struct skcipher_request *req, + struct cipher_test_sglists *tsgls) +{ + unsigned int i; + int err; - if (ret) { - pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n", - d, e, j, algo, -ret); - goto out; - } + if (fips_enabled && vec->fips_skip) + return 0; - temp = 0; - ret = -EINVAL; - for (k = 0; k < template[i].np; k++) { - if (diff_dst) - q = xoutbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]); - else - q = xbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]); + for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) { + err = test_skcipher_vec_cfg(driver, enc, vec, vec_num, + &default_cipher_testvec_configs[i], + req, tsgls); + if (err) + return err; + } - if (memcmp(q, result + temp, template[i].tap[k])) { - pr_err("alg: skcipher%s: Chunk test %d failed on %s at page %u for %s\n", - d, j, e, k, algo); - hexdump(q, template[i].tap[k]); - goto out; - } +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS + if (!noextratests) { + struct testvec_config cfg; + char cfgname[TESTVEC_CONFIG_NAMELEN]; - q += template[i].tap[k]; - for (n = 0; offset_in_page(q + n) && q[n]; n++) - ; - if (n) { - pr_err("alg: skcipher%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n", - d, j, e, k, algo, n); - hexdump(q, n); - goto out; - } - temp += template[i].tap[k]; + for (i = 0; i < fuzz_iterations; i++) { + generate_random_testvec_config(&cfg, cfgname, + sizeof(cfgname)); + err = test_skcipher_vec_cfg(driver, enc, vec, vec_num, + &cfg, req, tsgls); + if (err) + return err; } } +#endif + return 0; +} - ret = 0; +static int test_skcipher(const char *driver, int enc, + const struct cipher_test_suite *suite, + struct skcipher_request *req, + struct cipher_test_sglists *tsgls) +{ + unsigned int i; + int err; -out: - skcipher_request_free(req); - if (diff_dst) - testmgr_free_buf(xoutbuf); -out_nooutbuf: - testmgr_free_buf(xbuf); -out_nobuf: - return ret; + for (i = 0; i < suite->count; i++) { + err = test_skcipher_vec(driver, enc, &suite->vecs[i], i, req, + tsgls); + if (err) + return err; + } + return 0; } -static int test_skcipher(struct crypto_skcipher *tfm, int enc, - const struct cipher_testvec *template, - unsigned int tcount) +static int alg_test_skcipher(const struct alg_test_desc *desc, + const char *driver, u32 type, u32 mask) { - unsigned int alignmask; - int ret; + const struct cipher_test_suite *suite = &desc->suite.cipher; + struct crypto_skcipher *tfm; + struct skcipher_request *req = NULL; + struct cipher_test_sglists *tsgls = NULL; + int err; - /* test 'dst == src' case */ - ret = __test_skcipher(tfm, enc, template, tcount, false, 0); - if (ret) - return ret; + if (suite->count <= 0) { + pr_err("alg: skcipher: empty test suite for %s\n", driver); + return -EINVAL; + } - /* test 'dst != src' case */ - ret = __test_skcipher(tfm, enc, template, tcount, true, 0); - if (ret) - return ret; + tfm = crypto_alloc_skcipher(driver, type, mask); + if (IS_ERR(tfm)) { + pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n", + driver, PTR_ERR(tfm)); + return PTR_ERR(tfm); + } - /* test unaligned buffers, check with one byte offset */ - ret = __test_skcipher(tfm, enc, template, tcount, true, 1); - if (ret) - return ret; + req = skcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) { + pr_err("alg: skcipher: failed to allocate request for %s\n", + driver); + err = -ENOMEM; + goto out; + } - alignmask = crypto_tfm_alg_alignmask(&tfm->base); - if (alignmask) { - /* Check if alignment mask for tfm is correctly set. */ - ret = __test_skcipher(tfm, enc, template, tcount, true, - alignmask + 1); - if (ret) - return ret; + tsgls = alloc_cipher_test_sglists(); + if (!tsgls) { + pr_err("alg: skcipher: failed to allocate test buffers for %s\n", + driver); + err = -ENOMEM; + goto out; } - return 0; + err = test_skcipher(driver, ENCRYPT, suite, req, tsgls); + if (err) + goto out; + + err = test_skcipher(driver, DECRYPT, suite, req, tsgls); +out: + free_cipher_test_sglists(tsgls); + skcipher_request_free(req); + crypto_free_skcipher(tfm); + return err; } static int test_comp(struct crypto_comp *tfm, @@ -1713,35 +2088,6 @@ out: return err; } -static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, - u32 type, u32 mask) -{ - struct crypto_aead *tfm; - int err = 0; - - tfm = crypto_alloc_aead(driver, type, mask); - if (IS_ERR(tfm)) { - printk(KERN_ERR "alg: aead: Failed to load transform for %s: " - "%ld\n", driver, PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - - if (desc->suite.aead.enc.vecs) { - err = test_aead(tfm, ENCRYPT, desc->suite.aead.enc.vecs, - desc->suite.aead.enc.count); - if (err) - goto out; - } - - if (!err && desc->suite.aead.dec.vecs) - err = test_aead(tfm, DECRYPT, desc->suite.aead.dec.vecs, - desc->suite.aead.dec.count); - -out: - crypto_free_aead(tfm); - return err; -} - static int alg_test_cipher(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { @@ -1764,28 +2110,6 @@ static int alg_test_cipher(const struct alg_test_desc *desc, return err; } -static int alg_test_skcipher(const struct alg_test_desc *desc, - const char *driver, u32 type, u32 mask) -{ - const struct cipher_test_suite *suite = &desc->suite.cipher; - struct crypto_skcipher *tfm; - int err; - - tfm = crypto_alloc_skcipher(driver, type, mask); - if (IS_ERR(tfm)) { - printk(KERN_ERR "alg: skcipher: Failed to load transform for " - "%s: %ld\n", driver, PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - - err = test_skcipher(tfm, ENCRYPT, suite->vecs, suite->count); - if (!err) - err = test_skcipher(tfm, DECRYPT, suite->vecs, suite->count); - - crypto_free_skcipher(tfm); - return err; -} - static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { @@ -1824,84 +2148,30 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, return err; } -static int __alg_test_hash(const struct hash_testvec *template, - unsigned int tcount, const char *driver, - u32 type, u32 mask) -{ - struct crypto_ahash *tfm; - int err; - - tfm = crypto_alloc_ahash(driver, type, mask); - if (IS_ERR(tfm)) { - printk(KERN_ERR "alg: hash: Failed to load transform for %s: " - "%ld\n", driver, PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - - err = test_hash(tfm, template, tcount, HASH_TEST_DIGEST); - if (!err) - err = test_hash(tfm, template, tcount, HASH_TEST_FINAL); - if (!err) - err = test_hash(tfm, template, tcount, HASH_TEST_FINUP); - crypto_free_ahash(tfm); - return err; -} - -static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, - u32 type, u32 mask) -{ - const struct hash_testvec *template = desc->suite.hash.vecs; - unsigned int tcount = desc->suite.hash.count; - unsigned int nr_unkeyed, nr_keyed; - int err; - - /* - * For OPTIONAL_KEY algorithms, we have to do all the unkeyed tests - * first, before setting a key on the tfm. To make this easier, we - * require that the unkeyed test vectors (if any) are listed first. - */ - - for (nr_unkeyed = 0; nr_unkeyed < tcount; nr_unkeyed++) { - if (template[nr_unkeyed].ksize) - break; - } - for (nr_keyed = 0; nr_unkeyed + nr_keyed < tcount; nr_keyed++) { - if (!template[nr_unkeyed + nr_keyed].ksize) { - pr_err("alg: hash: test vectors for %s out of order, " - "unkeyed ones must come first\n", desc->alg); - return -EINVAL; - } - } - - err = 0; - if (nr_unkeyed) { - err = __alg_test_hash(template, nr_unkeyed, driver, type, mask); - template += nr_unkeyed; - } - - if (!err && nr_keyed) - err = __alg_test_hash(template, nr_keyed, driver, type, mask); - - return err; -} - static int alg_test_crc32c(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { struct crypto_shash *tfm; - u32 val; + __le32 val; int err; err = alg_test_hash(desc, driver, type, mask); if (err) - goto out; + return err; tfm = crypto_alloc_shash(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) { + /* + * This crc32c implementation is only available through + * ahash API, not the shash API, so the remaining part + * of the test is not applicable to it. + */ + return 0; + } printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(tfm)); - err = PTR_ERR(tfm); - goto out; + return PTR_ERR(tfm); } do { @@ -1911,7 +2181,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc, shash->tfm = tfm; shash->flags = 0; - *ctx = le32_to_cpu(420553207); + *ctx = 420553207; err = crypto_shash_final(shash, (u8 *)&val); if (err) { printk(KERN_ERR "alg: crc32c: Operation failed for " @@ -1919,16 +2189,15 @@ static int alg_test_crc32c(const struct alg_test_desc *desc, break; } - if (val != ~420553207) { - printk(KERN_ERR "alg: crc32c: Test failed for %s: " - "%d\n", driver, val); + if (val != cpu_to_le32(~420553207)) { + pr_err("alg: crc32c: Test failed for %s: %u\n", + driver, le32_to_cpu(val)); err = -EINVAL; } } while (0); crypto_free_shash(tfm); -out: return err; } @@ -2094,12 +2363,11 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, if (vec->genkey) { /* Save party A's public key */ - a_public = kzalloc(out_len_max, GFP_KERNEL); + a_public = kmemdup(sg_virt(req->dst), out_len_max, GFP_KERNEL); if (!a_public) { err = -ENOMEM; goto free_output; } - memcpy(a_public, sg_virt(req->dst), out_len_max); } else { /* Verify calculated public key */ if (memcmp(vec->expected_a_public, sg_virt(req->dst), @@ -2112,13 +2380,12 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, } /* Calculate shared secret key by using counter part (b) public key. */ - input_buf = kzalloc(vec->b_public_size, GFP_KERNEL); + input_buf = kmemdup(vec->b_public, vec->b_public_size, GFP_KERNEL); if (!input_buf) { err = -ENOMEM; goto free_output; } - memcpy(input_buf, vec->b_public, vec->b_public_size); sg_init_one(&src, input_buf, vec->b_public_size); sg_init_one(&dst, output_buf, out_len_max); kpp_request_set_input(req, &src, vec->b_public_size); @@ -2134,12 +2401,11 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, if (vec->genkey) { /* Save the shared secret obtained by party A */ - a_ss = kzalloc(vec->expected_ss_size, GFP_KERNEL); + a_ss = kmemdup(sg_virt(req->dst), vec->expected_ss_size, GFP_KERNEL); if (!a_ss) { err = -ENOMEM; goto free_all; } - memcpy(a_ss, sg_virt(req->dst), vec->expected_ss_size); /* * Calculate party B's shared secret by using party A's @@ -2238,6 +2504,9 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, unsigned int out_len_max, out_len = 0; int err = -ENOMEM; struct scatterlist src, dst, src_tab[2]; + const char *m, *c; + unsigned int m_size, c_size; + const char *op; if (testmgr_alloc_buf(xbuf)) return err; @@ -2259,46 +2528,72 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, err = -ENOMEM; out_len_max = crypto_akcipher_maxsize(tfm); + + /* + * First run test which do not require a private key, such as + * encrypt or verify. + */ outbuf_enc = kzalloc(out_len_max, GFP_KERNEL); if (!outbuf_enc) goto free_req; - if (WARN_ON(vecs->m_size > PAGE_SIZE)) - goto free_all; + if (!vecs->siggen_sigver_test) { + m = vecs->m; + m_size = vecs->m_size; + c = vecs->c; + c_size = vecs->c_size; + op = "encrypt"; + } else { + /* Swap args so we could keep plaintext (digest) + * in vecs->m, and cooked signature in vecs->c. + */ + m = vecs->c; /* signature */ + m_size = vecs->c_size; + c = vecs->m; /* digest */ + c_size = vecs->m_size; + op = "verify"; + } - memcpy(xbuf[0], vecs->m, vecs->m_size); + if (WARN_ON(m_size > PAGE_SIZE)) + goto free_all; + memcpy(xbuf[0], m, m_size); sg_init_table(src_tab, 2); sg_set_buf(&src_tab[0], xbuf[0], 8); - sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8); + sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8); sg_init_one(&dst, outbuf_enc, out_len_max); - akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size, + akcipher_request_set_crypt(req, src_tab, &dst, m_size, out_len_max); akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &wait); err = crypto_wait_req(vecs->siggen_sigver_test ? - /* Run asymmetric signature generation */ - crypto_akcipher_sign(req) : + /* Run asymmetric signature verification */ + crypto_akcipher_verify(req) : /* Run asymmetric encrypt */ crypto_akcipher_encrypt(req), &wait); if (err) { - pr_err("alg: akcipher: encrypt test failed. err %d\n", err); + pr_err("alg: akcipher: %s test failed. err %d\n", op, err); goto free_all; } - if (req->dst_len != vecs->c_size) { - pr_err("alg: akcipher: encrypt test failed. Invalid output len\n"); + if (req->dst_len != c_size) { + pr_err("alg: akcipher: %s test failed. Invalid output len\n", + op); err = -EINVAL; goto free_all; } /* verify that encrypted message is equal to expected */ - if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) { - pr_err("alg: akcipher: encrypt test failed. Invalid output\n"); - hexdump(outbuf_enc, vecs->c_size); + if (memcmp(c, outbuf_enc, c_size)) { + pr_err("alg: akcipher: %s test failed. Invalid output\n", op); + hexdump(outbuf_enc, c_size); err = -EINVAL; goto free_all; } - /* Don't invoke decrypt for vectors with public key */ + + /* + * Don't invoke (decrypt or sign) test which require a private key + * for vectors with only a public key. + */ if (vecs->public_key_vec) { err = 0; goto free_all; @@ -2309,37 +2604,36 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, goto free_all; } - if (WARN_ON(vecs->c_size > PAGE_SIZE)) + op = vecs->siggen_sigver_test ? "sign" : "decrypt"; + if (WARN_ON(c_size > PAGE_SIZE)) goto free_all; + memcpy(xbuf[0], c, c_size); - memcpy(xbuf[0], vecs->c, vecs->c_size); - - sg_init_one(&src, xbuf[0], vecs->c_size); + sg_init_one(&src, xbuf[0], c_size); sg_init_one(&dst, outbuf_dec, out_len_max); crypto_init_wait(&wait); - akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max); + akcipher_request_set_crypt(req, &src, &dst, c_size, out_len_max); err = crypto_wait_req(vecs->siggen_sigver_test ? - /* Run asymmetric signature verification */ - crypto_akcipher_verify(req) : + /* Run asymmetric signature generation */ + crypto_akcipher_sign(req) : /* Run asymmetric decrypt */ crypto_akcipher_decrypt(req), &wait); if (err) { - pr_err("alg: akcipher: decrypt test failed. err %d\n", err); + pr_err("alg: akcipher: %s test failed. err %d\n", op, err); goto free_all; } out_len = req->dst_len; - if (out_len < vecs->m_size) { - pr_err("alg: akcipher: decrypt test failed. " - "Invalid output len %u\n", out_len); + if (out_len < m_size) { + pr_err("alg: akcipher: %s test failed. Invalid output len %u\n", + op, out_len); err = -EINVAL; goto free_all; } /* verify that decrypted message is equal to the original msg */ - if (memchr_inv(outbuf_dec, 0, out_len - vecs->m_size) || - memcmp(vecs->m, outbuf_dec + out_len - vecs->m_size, - vecs->m_size)) { - pr_err("alg: akcipher: decrypt test failed. Invalid output\n"); + if (memchr_inv(outbuf_dec, 0, out_len - m_size) || + memcmp(m, outbuf_dec + out_len - m_size, m_size)) { + pr_err("alg: akcipher: %s test failed. Invalid output\n", op); hexdump(outbuf_dec, out_len); err = -EINVAL; } @@ -2419,28 +2713,19 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "aegis128", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(aegis128_enc_tv_template), - .dec = __VECS(aegis128_dec_tv_template), - } + .aead = __VECS(aegis128_tv_template) } }, { .alg = "aegis128l", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(aegis128l_enc_tv_template), - .dec = __VECS(aegis128l_dec_tv_template), - } + .aead = __VECS(aegis128l_tv_template) } }, { .alg = "aegis256", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(aegis256_enc_tv_template), - .dec = __VECS(aegis256_dec_tv_template), - } + .aead = __VECS(aegis256_tv_template) } }, { .alg = "ansi_cprng", @@ -2452,36 +2737,27 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "authenc(hmac(md5),ecb(cipher_null))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template), - .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template) - } + .aead = __VECS(hmac_md5_ecb_cipher_null_tv_template) } }, { .alg = "authenc(hmac(sha1),cbc(aes))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha1_aes_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha1),cbc(des))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha1_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha1),cbc(des3_ede))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha1_des3_ede_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha1),ctr(aes))", @@ -2491,10 +2767,7 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "authenc(hmac(sha1),ecb(cipher_null))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp), - .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp) - } + .aead = __VECS(hmac_sha1_ecb_cipher_null_tv_temp) } }, { .alg = "authenc(hmac(sha1),rfc3686(ctr(aes)))", @@ -2504,44 +2777,34 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "authenc(hmac(sha224),cbc(des))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha224_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha224),cbc(des3_ede))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha224_des3_ede_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha256),cbc(aes))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha256_aes_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha256),cbc(des))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha256_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha256),cbc(des3_ede))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha256_des3_ede_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha256),ctr(aes))", @@ -2555,18 +2818,14 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "authenc(hmac(sha384),cbc(des))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha384_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha384),cbc(des3_ede))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha384_des3_ede_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha384),ctr(aes))", @@ -2581,26 +2840,20 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha512_aes_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha512),cbc(des))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha512_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha512),cbc(des3_ede))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha512_des3_ede_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha512),ctr(aes))", @@ -2697,10 +2950,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(aes_ccm_enc_tv_template), - .dec = __VECS(aes_ccm_dec_tv_template) - } + .aead = __VECS(aes_ccm_tv_template) } }, { .alg = "cfb(aes)", @@ -2735,6 +2985,7 @@ static const struct alg_test_desc alg_test_descs[] = { }, { .alg = "crc32", .test = alg_test_hash, + .fips_allowed = 1, .suite = { .hash = __VECS(crc32_tv_template) } @@ -3111,10 +3362,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(aes_gcm_enc_tv_template), - .dec = __VECS(aes_gcm_dec_tv_template) - } + .aead = __VECS(aes_gcm_tv_template) } }, { .alg = "ghash", @@ -3309,19 +3557,13 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "morus1280", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(morus1280_enc_tv_template), - .dec = __VECS(morus1280_dec_tv_template), - } + .aead = __VECS(morus1280_tv_template) } }, { .alg = "morus640", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(morus640_enc_tv_template), - .dec = __VECS(morus640_dec_tv_template), - } + .aead = __VECS(morus640_tv_template) } }, { .alg = "nhpoly1305", @@ -3386,47 +3628,32 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(aes_gcm_rfc4106_enc_tv_template), - .dec = __VECS(aes_gcm_rfc4106_dec_tv_template) - } + .aead = __VECS(aes_gcm_rfc4106_tv_template) } }, { .alg = "rfc4309(ccm(aes))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(aes_ccm_rfc4309_enc_tv_template), - .dec = __VECS(aes_ccm_rfc4309_dec_tv_template) - } + .aead = __VECS(aes_ccm_rfc4309_tv_template) } }, { .alg = "rfc4543(gcm(aes))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(aes_gcm_rfc4543_enc_tv_template), - .dec = __VECS(aes_gcm_rfc4543_dec_tv_template), - } + .aead = __VECS(aes_gcm_rfc4543_tv_template) } }, { .alg = "rfc7539(chacha20,poly1305)", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(rfc7539_enc_tv_template), - .dec = __VECS(rfc7539_dec_tv_template), - } + .aead = __VECS(rfc7539_tv_template) } }, { .alg = "rfc7539esp(chacha20,poly1305)", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(rfc7539esp_enc_tv_template), - .dec = __VECS(rfc7539esp_dec_tv_template), - } + .aead = __VECS(rfc7539esp_tv_template) } }, { .alg = "rmd128", @@ -3675,18 +3902,10 @@ static const struct alg_test_desc alg_test_descs[] = { } }; -static bool alg_test_descs_checked; - -static void alg_test_descs_check_order(void) +static void alg_check_test_descs_order(void) { int i; - /* only check once */ - if (alg_test_descs_checked) - return; - - alg_test_descs_checked = true; - for (i = 1; i < ARRAY_SIZE(alg_test_descs); i++) { int diff = strcmp(alg_test_descs[i - 1].alg, alg_test_descs[i].alg); @@ -3704,6 +3923,29 @@ static void alg_test_descs_check_order(void) } } +static void alg_check_testvec_configs(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) + WARN_ON(!valid_testvec_config( + &default_cipher_testvec_configs[i])); + + for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) + WARN_ON(!valid_testvec_config( + &default_hash_testvec_configs[i])); +} + +static void testmgr_onetime_init(void) +{ + alg_check_test_descs_order(); + alg_check_testvec_configs(); + +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS + pr_warn("alg: extra crypto tests enabled. This is intended for developer use only.\n"); +#endif +} + static int alg_find_test(const char *alg) { int start = 0; @@ -3740,7 +3982,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) return 0; } - alg_test_descs_check_order(); + DO_ONCE(testmgr_onetime_init); if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { char nalg[CRYPTO_MAX_ALG_NAME]; diff --git a/crypto/testmgr.h b/crypto/testmgr.h @@ -5,6 +5,7 @@ * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org> * Copyright (c) 2007 Nokia Siemens Networks * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> + * Copyright (c) 2019 Google LLC * * Updated RFC4106 AES-GCM testing. Some test vectors were taken from * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/ @@ -24,19 +25,20 @@ #ifndef _CRYPTO_TESTMGR_H #define _CRYPTO_TESTMGR_H -#define MAX_DIGEST_SIZE 64 -#define MAX_TAP 8 - -#define MAX_KEYLEN 1088 #define MAX_IVLEN 32 +/* + * hash_testvec: structure to describe a hash (message digest) test + * @key: Pointer to key (NULL if none) + * @plaintext: Pointer to source data + * @digest: Pointer to expected digest + * @psize: Length of source data in bytes + * @ksize: Length of @key in bytes (0 if no key) + */ struct hash_testvec { - /* only used with keyed hash algorithms */ const char *key; const char *plaintext; const char *digest; - unsigned short tap[MAX_TAP]; - unsigned short np; unsigned short psize; unsigned short ksize; }; @@ -45,29 +47,24 @@ struct hash_testvec { * cipher_testvec: structure to describe a symmetric cipher test * @key: Pointer to key * @klen: Length of @key in bytes - * @iv: Pointer to IV (optional for some ciphers) + * @iv: Pointer to IV. If NULL, an all-zeroes IV is used. + * @iv_out: Pointer to output IV, if applicable for the cipher. * @ptext: Pointer to plaintext * @ctext: Pointer to ciphertext * @len: Length of @ptext and @ctext in bytes * @fail: If set to one, the test need to fail - * @wk: Does the test need CRYPTO_TFM_REQ_WEAK_KEY + * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS? * ( e.g. test needs to fail due to a weak key ) - * @np: numbers of SG to distribute data in (from 1 to MAX_TAP) - * @tap: How to distribute data in @np SGs - * @also_non_np: if set to 1, the test will be also done without - * splitting data in @np SGs * @fips_skip: Skip the test vector in FIPS mode - * @generates_iv: Encryption should ignore the given IV, and output @iv. - * Decryption takes @iv. Needed for AES Keywrap ("kw(aes)"). + * @generates_iv: Encryption should ignore the given IV, and output @iv_out. + * Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)"). */ struct cipher_testvec { const char *key; const char *iv; + const char *iv_out; const char *ptext; const char *ctext; - unsigned short tap[MAX_TAP]; - int np; - unsigned char also_non_np; bool fail; unsigned char wk; /* weak key flag */ unsigned char klen; @@ -76,23 +73,37 @@ struct cipher_testvec { bool generates_iv; }; +/* + * aead_testvec: structure to describe an AEAD test + * @key: Pointer to key + * @iv: Pointer to IV. If NULL, an all-zeroes IV is used. + * @ptext: Pointer to plaintext + * @assoc: Pointer to associated data + * @ctext: Pointer to the full authenticated ciphertext. For AEADs that + * produce a separate "ciphertext" and "authentication tag", these + * two parts are concatenated: ciphertext || tag. + * @fail: setkey() failure expected? + * @novrfy: Decryption verification failure expected? + * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS? + * (e.g. setkey() needs to fail due to a weak key) + * @klen: Length of @key in bytes + * @plen: Length of @ptext in bytes + * @alen: Length of @assoc in bytes + * @clen: Length of @ctext in bytes + */ struct aead_testvec { const char *key; const char *iv; - const char *input; + const char *ptext; const char *assoc; - const char *result; - unsigned char tap[MAX_TAP]; - unsigned char atap[MAX_TAP]; - int np; - int anp; + const char *ctext; bool fail; - unsigned char novrfy; /* ccm dec verification failure expected */ - unsigned char wk; /* weak key flag */ + unsigned char novrfy; + unsigned char wk; unsigned char klen; - unsigned short ilen; + unsigned short plen; + unsigned short clen; unsigned short alen; - unsigned short rlen; }; struct cprng_testvec { @@ -1015,8 +1026,6 @@ static const struct hash_testvec md4_tv_template[] = { .psize = 26, .digest = "\xd7\x9e\x1c\x30\x8a\xa5\xbb\xcd" "\xee\xa8\xed\x63\xdf\x41\x2d\xa9", - .np = 2, - .tap = { 13, 13 }, }, { .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", .psize = 62, @@ -1053,8 +1062,6 @@ static const struct hash_testvec sha3_224_tv_template[] = { "\xc9\xfd\x55\x74\x49\x44\x79\xba" "\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea" "\xd0\xfc\xce\x33", - .np = 2, - .tap = { 28, 28 }, }, { .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" @@ -1214,8 +1221,6 @@ static const struct hash_testvec sha3_256_tv_template[] = { "\x49\x10\x03\x76\xa8\x23\x5e\x2c" "\x82\xe1\xb9\x99\x8a\x99\x9e\x21" "\xdb\x32\xdd\x97\x49\x6d\x33\x76", - .np = 2, - .tap = { 28, 28 }, }, { .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" @@ -1382,8 +1387,6 @@ static const struct hash_testvec sha3_384_tv_template[] = { "\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a" "\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1" "\x9e\xef\x51\xac\xd0\x65\x7c\x22", - .np = 2, - .tap = { 28, 28 }, }, { .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" @@ -1558,8 +1561,6 @@ static const struct hash_testvec sha3_512_tv_template[] = { "\xba\x1b\x0d\x8d\xc7\x8c\x08\x63" "\x46\xb5\x33\xb4\x9c\x03\x0d\x99" "\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e", - .np = 2, - .tap = { 28, 28 }, }, { .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" @@ -1729,8 +1730,6 @@ static const struct hash_testvec md5_tv_template[] = { .psize = 26, .digest = "\xc3\xfc\xd3\xd7\x61\x92\xe4\x00" "\x7d\xfb\x49\x6c\xca\x67\xe1\x3b", - .np = 2, - .tap = {13, 13} }, { .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", .psize = 62, @@ -1791,8 +1790,6 @@ static const struct hash_testvec rmd128_tv_template[] = { .psize = 56, .digest = "\xa1\xaa\x06\x89\xd0\xfa\xfa\x2d" "\xdc\x22\xe8\x8b\x49\x13\x3a\x06", - .np = 2, - .tap = { 28, 28 }, }, { .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi" "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr" @@ -1853,8 +1850,6 @@ static const struct hash_testvec rmd160_tv_template[] = { .psize = 56, .digest = "\x12\xa0\x53\x38\x4a\x9c\x0c\x88\xe4\x05" "\xa0\x6c\x27\xdc\xf4\x9a\xda\x62\xeb\x2b", - .np = 2, - .tap = { 28, 28 }, }, { .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi" "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr" @@ -1931,8 +1926,6 @@ static const struct hash_testvec rmd256_tv_template[] = { "\xc8\xd9\x12\x85\x73\xe7\xa9\x80" "\x9a\xfb\x2a\x0f\x34\xcc\xc3\x6e" "\xa9\xe7\x2f\x16\xf6\x36\x8e\x3f", - .np = 2, - .tap = { 28, 28 }, } }; @@ -1997,8 +1990,6 @@ static const struct hash_testvec rmd320_tv_template[] = { "\xb8\x4d\xf7\x69\xa5\xde\x20\x60\xe2\x59" "\xdf\x4c\x9b\xb4\xa4\x26\x8c\x0e\x93\x5b" "\xbc\x74\x70\xa9\x69\xc9\xd0\x72\xa1\xac", - .np = 2, - .tap = { 28, 28 }, } }; @@ -2012,26 +2003,11 @@ static const struct hash_testvec crct10dif_tv_template[] = { "123456789012345678901234567890123456789", .psize = 79, .digest = (u8 *)(u16 []){ 0x4b70 }, - .np = 2, - .tap = { 63, 16 }, }, { .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd" "ddddddddddddd", .psize = 56, .digest = (u8 *)(u16 []){ 0x9ce3 }, - .np = 8, - .tap = { 1, 2, 28, 7, 6, 5, 4, 3 }, - }, { - .plaintext = "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "123456789012345678901234567890123456789", - .psize = 319, - .digest = (u8 *)(u16 []){ 0x44c6 }, }, { .plaintext = "1234567890123456789012345678901234567890" "1234567890123456789012345678901234567890" @@ -2043,8 +2019,6 @@ static const struct hash_testvec crct10dif_tv_template[] = { "123456789012345678901234567890123456789", .psize = 319, .digest = (u8 *)(u16 []){ 0x44c6 }, - .np = 4, - .tap = { 1, 255, 57, 6 }, }, { .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49" "\xe0\x54\xeb\x82\x19\x8d\x24\xbb" @@ -2510,8 +2484,6 @@ static const struct hash_testvec sha1_tv_template[] = { .psize = 56, .digest = "\x84\x98\x3e\x44\x1c\x3b\xd2\x6e\xba\xae" "\x4a\xa1\xf9\x51\x29\xe5\xe5\x46\x70\xf1", - .np = 2, - .tap = { 28, 28 } }, { .plaintext = "\xec\x29\x56\x12\x44\xed\xe7\x06" "\xb6\xeb\x30\xa1\xc3\x71\xd7\x44" @@ -2537,8 +2509,6 @@ static const struct hash_testvec sha1_tv_template[] = { .psize = 163, .digest = "\x97\x01\x11\xc4\xe7\x7b\xcc\x88\xcc\x20" "\x45\x9c\x02\xb6\x9b\x4a\xa8\xf5\x82\x17", - .np = 4, - .tap = { 63, 64, 31, 5 } }, { .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-", .psize = 64, @@ -2707,8 +2677,6 @@ static const struct hash_testvec sha224_tv_template[] = { "\x5D\xBA\x5D\xA1\xFD\x89\x01\x50" "\xB0\xC6\x45\x5C\xB4\xF5\x8B\x19" "\x52\x52\x25\x25", - .np = 2, - .tap = { 28, 28 } }, { .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-", .psize = 64, @@ -2878,8 +2846,6 @@ static const struct hash_testvec sha256_tv_template[] = { "\xe5\xc0\x26\x93\x0c\x3e\x60\x39" "\xa3\x3c\xe4\x59\x64\xff\x21\x67" "\xf6\xec\xed\xd4\x19\xdb\x06\xc1", - .np = 2, - .tap = { 28, 28 } }, { .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-", .psize = 64, @@ -3075,8 +3041,6 @@ static const struct hash_testvec sha384_tv_template[] = { "\x4d\x8f\xd0\x14\xe5\x82\x82\x3a" "\x89\xe1\x6f\x9b\x2a\x7b\xbc\x1a" "\xc9\x38\xe2\xd1\x99\xe8\xbe\xa4", - .np = 4, - .tap = { 26, 26, 26, 26 } }, { .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" @@ -3277,8 +3241,6 @@ static const struct hash_testvec sha512_tv_template[] = { "\xb2\x78\xe6\x6d\xff\x8b\x84\xfe" "\x2b\x28\x70\xf7\x42\xa5\x80\xd8" "\xed\xb4\x19\x87\x23\x28\x50\xc9", - .np = 4, - .tap = { 26, 26, 26, 26 } }, { .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" @@ -3811,8 +3773,6 @@ static const struct hash_testvec ghash_tv_template[] = .psize = 28, .digest = "\x3e\x1f\x5c\x4d\x65\xf0\xef\xce" "\x0d\x61\x06\x27\x66\x51\xd5\xe2", - .np = 2, - .tap = {14, 14} }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", @@ -3923,8 +3883,6 @@ static const struct hash_testvec hmac_md5_tv_template[] = .psize = 28, .digest = "\x75\x0c\x78\x3e\x6a\xb0\xb5\x03" "\xea\xa8\x6e\x31\x0a\x5d\xb7\x38", - .np = 2, - .tap = {14, 14} }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", .ksize = 16, @@ -4002,8 +3960,6 @@ static const struct hash_testvec hmac_rmd128_tv_template[] = { .psize = 28, .digest = "\x87\x5f\x82\x88\x62\xb6\xb3\x34" "\xb4\x27\xc5\x5f\x9f\x7f\xf0\x9b", - .np = 2, - .tap = { 14, 14 }, }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", .ksize = 16, @@ -4081,8 +4037,6 @@ static const struct hash_testvec hmac_rmd160_tv_template[] = { .psize = 28, .digest = "\xdd\xa6\xc0\x21\x3a\x48\x5a\x9e\x24\xf4" "\x74\x20\x64\xa7\xf0\x33\xb4\x3c\x40\x69", - .np = 2, - .tap = { 14, 14 }, }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", .ksize = 20, @@ -4161,8 +4115,6 @@ static const struct hash_testvec hmac_sha1_tv_template[] = { .psize = 28, .digest = "\xef\xfc\xdf\x6a\xe5\xeb\x2f\xa2\xd2\x74" "\x16\xd5\xf1\x84\xdf\x9c\x25\x9a\x7c\x79", - .np = 2, - .tap = { 14, 14 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", .ksize = 20, @@ -4252,8 +4204,6 @@ static const struct hash_testvec hmac_sha224_tv_template[] = { "\x45\x69\x0f\x3a\x7e\x9e\x6d\x0f" "\x8b\xbe\xa2\xa3\x9e\x61\x48\x00" "\x8f\xd0\x5e\x44", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -4397,8 +4347,6 @@ static const struct hash_testvec hmac_sha256_tv_template[] = { "\x6a\x04\x24\x26\x08\x95\x75\xc7" "\x5a\x00\x3f\x08\x9d\x27\x39\x83" "\x9d\xec\x58\xb9\x64\xec\x38\x43", - .np = 2, - .tap = { 14, 14 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -4571,8 +4519,6 @@ static const struct hash_testvec aes_cbcmac_tv_template[] = { "\xf8\xf2\x76\x03\xac\x39\xb0\x9d", .psize = 33, .ksize = 16, - .np = 2, - .tap = { 7, 26 }, }, { .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", @@ -4689,9 +4635,7 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = { "\x10\x11\x12\x13", .digest = "\x47\xf5\x1b\x45\x64\x96\x62\x15" "\xb8\x98\x5c\x63\x05\x5e\xd3\x08", - .tap = { 10, 10 }, .psize = 20, - .np = 2, .ksize = 16, }, { .key = "\x00\x01\x02\x03\x04\x05\x06\x07" @@ -4714,9 +4658,7 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = { "\x20\x21", .digest = "\xbe\xcb\xb3\xbc\xcd\xb5\x18\xa3" "\x06\x77\xd5\x48\x1f\xb6\xb4\xd8", - .tap = { 17, 17 }, .psize = 34, - .np = 2, .ksize = 16, } }; @@ -4799,8 +4741,6 @@ static const struct hash_testvec vmac64_aes_tv_template[] = { "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc", .psize = 316, .digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe", - .tap = { 1, 100, 200, 15 }, - .np = 4, }, { .key = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", @@ -4905,8 +4845,6 @@ static const struct hash_testvec hmac_sha384_tv_template[] = { "\xe4\x2e\xc3\x73\x63\x22\x44\x5e" "\x8e\x22\x40\xca\x5e\x69\xe2\xc7" "\x8b\x32\x39\xec\xfa\xb2\x16\x49", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -5007,8 +4945,6 @@ static const struct hash_testvec hmac_sha512_tv_template[] = { "\x6d\x03\x4f\x65\xf8\xf0\xe6\xfd" "\xca\xea\xb1\xa3\x4d\x4a\x6b\x4b" "\x63\x6e\x07\x0a\x38\xbc\xe7\x37", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -5104,8 +5040,6 @@ static const struct hash_testvec hmac_sha3_224_tv_template[] = { "\x1b\x79\x86\x34\xad\x38\x68\x11" "\xc2\xcf\xc8\x5b\xfa\xf5\xd5\x2b" "\xba\xce\x5e\x66", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -5193,8 +5127,6 @@ static const struct hash_testvec hmac_sha3_256_tv_template[] = { "\x35\x96\xbb\xb0\xda\x73\xb8\x87" "\xc9\x17\x1f\x93\x09\x5b\x29\x4a" "\xe8\x57\xfb\xe2\x64\x5e\x1b\xa5", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -5286,8 +5218,6 @@ static const struct hash_testvec hmac_sha3_384_tv_template[] = { "\x3c\xa1\x35\x08\xa9\x32\x43\xce" "\x48\xc0\x45\xdc\x00\x7f\x26\xa2" "\x1b\x3f\x5e\x0e\x9d\xf4\xc2\x0a", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -5387,8 +5317,6 @@ static const struct hash_testvec hmac_sha3_512_tv_template[] = { "\xee\x7a\x0c\x31\xd0\x22\xa9\x5e" "\x1f\xc9\x2b\xa9\xd7\x7d\xf8\x83" "\x96\x02\x75\xbe\xb4\xe6\x20\x24", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -5996,8 +5924,150 @@ static const struct hash_testvec nhpoly1305_tv_template[] = { .psize = 16, .digest = "\x04\xbf\x7f\x6a\xce\x72\xea\x6a" "\x79\xdb\xb0\xc9\x60\xf6\x12\xcc", - .np = 6, - .tap = { 4, 4, 1, 1, 1, 5 }, + }, { + .key = "\x2e\x77\x1e\x2c\x63\x76\x34\x3f" + "\x71\x08\x4f\x5a\xe3\x3d\x74\x56" + "\xc7\x98\x46\x52\xe5\x8a\xba\x0d" + "\x72\x41\x11\x15\x14\x72\x50\x8a" + "\xd5\xec\x60\x09\xdd\x71\xcc\xb9" + "\x59\x81\x65\x2d\x9e\x50\x18\xf3" + "\x32\xf3\xf1\xe7\x01\x82\x1c\xad" + "\x88\xa0\x21\x0c\x4b\x80\x5e\x62" + "\xfc\x81\xec\x52\xaa\xe4\xa5\x86" + "\xc2\xe6\x03\x11\xdc\x66\x09\x86" + "\x3c\x3b\xf0\x59\x0f\xb3\xf7\x44" + "\x24\xb7\x88\xc5\xfc\xc8\x77\x9f" + "\x8c\x44\xc4\x11\x55\xce\x7a\xa3" + "\xe0\xa2\xb8\xbf\xb5\x3d\x07\x2c" + "\x32\xb6\x6c\xfc\xb4\x42\x95\x95" + "\x98\x32\x81\xc4\xe7\xe2\xd9\x6a" + "\x87\xf4\xf4\x1e\x74\x7c\xb5\xcd" + "\x51\x45\x68\x38\x51\xdb\x30\x74" + "\x11\xe0\xaa\xae\x19\x8f\x15\x55" + "\xdd\x47\x4a\x35\xb9\x0c\xb4\x4e" + "\xa9\xce\x2f\xfa\x8f\xc1\x8a\x5e" + "\x5b\xec\xa5\x81\x3b\xb3\x43\x06" + "\x24\x81\xf4\x24\xe2\x21\xfa\xcb" + "\x49\xa8\xf8\xbd\x31\x4a\x5b\x2d" + "\x64\x0a\x07\xf0\x80\xc9\x0d\x81" + "\x14\x58\x54\x2b\xba\x22\x31\xba" + "\xef\x66\xc9\x49\x69\x69\x83\x0d" + "\xf2\xf9\x80\x9d\x30\x36\xfb\xe3" + "\xc0\x72\x2b\xcc\x5a\x81\x2c\x5d" + "\x3b\x5e\xf8\x2b\xd3\x14\x28\x73" + "\xf9\x1c\x70\xe6\xd8\xbb\xac\x30" + "\xf9\xd9\xa0\xe2\x33\x7c\x33\x34" + "\xa5\x6a\x77\x6d\xd5\xaf\xf4\xf3" + "\xc7\xb3\x0e\x83\x3d\xcb\x01\xcc" + "\x81\xc0\xf9\x4a\xae\x36\x92\xf7" + "\x69\x7b\x65\x01\xc3\xc8\xb8\xae" + "\x16\xd8\x30\xbb\xba\x6d\x78\x6e" + "\x0d\xf0\x7d\x84\xb7\x87\xda\x28" + "\x7a\x18\x10\x0b\x29\xec\x29\xf3" + "\xb0\x7b\xa1\x28\xbf\xbc\x2b\x2c" + "\x92\x2c\x16\xfb\x02\x39\xf9\xa6" + "\xa2\x15\x05\xa6\x72\x10\xbc\x62" + "\x4a\x6e\xb8\xb5\x5d\x59\xae\x3c" + "\x32\xd3\x68\xd7\x8e\x5a\xcd\x1b" + "\xef\xf6\xa7\x5e\x10\x51\x15\x4b" + "\x2c\xe3\xba\x70\x4f\x2c\xa0\x1c" + "\x7b\x97\xd7\xb2\xa5\x05\x17\xcc" + "\xf7\x3a\x29\x6f\xd5\x4b\xb8\x24" + "\xf4\x65\x95\x12\xc0\x86\xd1\x64" + "\x81\xdf\x46\x55\x0d\x22\x06\x77" + "\xd8\xca\x8d\xc8\x87\xc3\xfa\xb9" + "\xe1\x98\x94\xe6\x7b\xed\x65\x66" + "\x0e\xc7\x25\x15\xee\x4a\xe6\x7e" + "\xea\x1b\x58\xee\x96\xa0\x75\x9a" + "\xa3\x00\x9e\x42\xc2\x26\x20\x8c" + "\x3d\x22\x1f\x94\x3e\x74\x43\x72" + "\xe9\x1d\xa6\xa1\x6c\xa7\xb8\x03" + "\xdf\xb9\x7a\xaf\xe9\xe9\x3b\xfe" + "\xdf\x91\xc1\x01\xa8\xba\x5d\x29" + "\xa5\xe0\x98\x9b\x13\xe5\x13\x11" + "\x7c\x04\x3a\xe8\x44\x7e\x78\xfc" + "\xd6\x96\xa8\xbc\x7d\xc1\x89\x3d" + "\x75\x64\xa9\x0e\x86\x33\xfb\x73" + "\xf7\x15\xbc\x2c\x9a\x3f\x29\xce" + "\x1c\x9d\x10\x4e\x85\xe1\x77\x41" + "\x01\xe2\xbc\x88\xec\x81\xef\xc2" + "\x6a\xed\x4f\xf7\xdf\xac\x10\x71" + "\x94\xed\x71\xa4\x01\xd4\xd6\xbe" + "\xfe\x3e\xc3\x92\x6a\xf2\x2b\xb5" + "\xab\x15\x96\xb7\x88\x2c\xc2\xe1" + "\xb0\x04\x22\xe7\x3d\xa9\xc9\x7d" + "\x2c\x7c\x21\xff\x97\x86\x6b\x0c" + "\x2b\x5b\xe0\xb6\x48\x74\x8f\x24" + "\xef\x8e\xdd\x0f\x2a\x5f\xff\x33" + "\xf4\x8e\xc5\xeb\x9c\xd7\x2a\x45" + "\xf3\x50\xf1\xc0\x91\x8f\xc7\xf9" + "\x97\xc1\x3c\x9c\xf4\xed\x8a\x23" + "\x61\x5b\x40\x1a\x09\xee\x23\xa8" + "\x7c\x7a\x96\xe1\x31\x55\x3d\x12" + "\x04\x1f\x21\x78\x72\xf0\x0f\xa5" + "\x80\x58\x7c\x2f\x37\xb5\x67\x24" + "\x2f\xce\xf9\xf6\x86\x9f\xb3\x34" + "\x0c\xfe\x0a\xaf\x27\xe6\x5e\x0a" + "\x21\x44\x68\xe1\x5d\x84\x25\xae" + "\x2c\x5a\x94\x66\x9a\x3f\x0e\x5a" + "\xd0\x60\x2a\xd5\x3a\x4e\x2f\x40" + "\x87\xe9\x27\x3e\xee\x92\xe1\x07" + "\x22\x43\x52\xed\x67\x49\x13\xdd" + "\x68\xd7\x54\xc2\x76\x72\x7e\x75" + "\xaf\x24\x98\x5c\xe8\x22\xaa\x35" + "\x0f\x9a\x1c\x4c\x0b\x43\x68\x99" + "\x45\xdd\xbf\x82\xa5\x6f\x0a\xef" + "\x44\x90\x85\xe7\x57\x23\x22\x41" + "\x2e\xda\x24\x28\x65\x7f\x96\x85" + "\x9f\x4b\x0d\x43\xb9\xa8\xbd\x84" + "\xad\x0b\x09\xcc\x2c\x4a\x0c\xec" + "\x71\x58\xba\xf1\xfc\x49\x4c\xca" + "\x5c\x5d\xb2\x77\x0c\x99\xae\x1c" + "\xce\x70\x05\x5b\x73\x6b\x7c\x28" + "\x3b\xeb\x21\x3f\xa3\x71\xe1\x6a" + "\xf4\x87\xd0\xbf\x73\xaa\x0b\x0b" + "\xed\x70\xb3\xd4\xa3\xca\x76\x3a" + "\xdb\xfa\xd8\x08\x95\xec\xac\x59" + "\xd0\x79\x90\xc2\x33\x7b\xcc\x28" + "\x65\xb6\x5f\x92\xc4\xac\x23\x40" + "\xd1\x20\x44\x1f\xd7\x29\xab\x46" + "\x79\x32\xc6\x8f\x79\xe5\xaa\x2c" + "\xa6\x76\x70\x3a\x9e\x46\x3f\x8c" + "\x1a\x89\x32\x28\x61\x5c\xcf\x93" + "\x1e\xde\x9e\x98\xbe\x06\x30\x23" + "\xc4\x8b\xda\x1c\xd1\x67\x46\x93" + "\x9d\x41\xa2\x8c\x03\x22\xbd\x55" + "\x7e\x91\x51\x13\xdc\xcf\x5c\x1e" + "\xcb\x5d\xfb\x14\x16\x1a\x44\x56" + "\x27\x77\xfd\xed\x7d\xbd\xd1\x49" + "\x7f\x0d\xc3\x59\x48\x6b\x3c\x02" + "\x6b\xb5\xd0\x83\xd5\x81\x29\xe7" + "\xe0\xc9\x36\x23\x8d\x41\x33\x77" + "\xff\x5f\x54\xde\x4d\x3f\xd2\x4e" + "\xb6\x4d\xdd\x85\xf8\x9b\x20\x7d" + "\x39\x27\x68\x63\xd3\x8e\x61\x39" + "\xfa\xe1\xc3\x04\x74\x27\x5a\x34" + "\x7f\xec\x59\x2d\xc5\x6e\x54\x23" + "\xf5\x7b\x4b\xbe\x58\x2b\xf2\x81" + "\x93\x63\xcc\x13\xd9\x90\xbb\x6a" + "\x41\x03\x8d\x95\xeb\xbb\x5d\x06" + "\x38\x4c\x0e\xd6\xa9\x5b\x84\x97" + "\x3e\x64\x72\xe9\x96\x07\x0f\x73" + "\x6e\xc6\x3b\x32\xbe\xac\x13\x14" + "\xd0\x0a\x17\x5f\xb9\x9c\x3e\x34" + "\xd9\xec\xd6\x8f\x89\xbf\x1e\xd3" + "\xda\x80\xb2\x29\xff\x28\x96\xb3" + "\x46\x50\x5b\x15\x80\x97\xee\x1f" + "\x6c\xd8\xe8\xe0\xbd\x09\xe7\x20" + "\x8c\x23\x8e\xd9\xbb\x92\xfa\x82" + "\xaa\x0f\xb5\xf8\x78\x60\x11\xf0", + .ksize = 1088, + .plaintext = "\x0b\xb2\x31\x2d\xad\xfe\xce\xf9" + "\xec\x5d\x3d\x64\x5f\x3f\x75\x43" + "\x05\x5b\x97", + .psize = 19, + .digest = "\x5f\x02\xae\x65\x6c\x13\x21\x67" + "\x77\x9e\xc4\x43\x58\x68\xde\x8f", }, { .key = "\x65\x4d\xe3\xf8\xd2\x4c\xac\x28" "\x68\xf5\xb3\x81\x71\x4b\xa1\xfa" @@ -6267,8 +6337,6 @@ static const struct hash_testvec nhpoly1305_tv_template[] = { .psize = 1024, .digest = "\x64\x3a\xbc\xc3\x3f\x74\x40\x51" "\x6e\x56\x01\x1a\x51\xec\x36\xde", - .np = 8, - .tap = { 64, 203, 267, 28, 263, 62, 54, 83 }, }, { .key = "\x1b\x82\x2e\x1b\x17\x23\xb9\x6d" "\xdc\x9c\xda\x99\x07\xe3\x5f\xd8" @@ -6989,18 +7057,6 @@ static const struct cipher_testvec des_tv_template[] = { .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d" "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b", .len = 16, - .np = 2, - .tap = { 8, 8 } - }, { - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .klen = 8, - .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7" - "\xa3\x99\x7b\xca\xaf\x69\xa0\xf5", - .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d" - "\x69\x0f\x5b\x0d\x9a\x26\x93\x9b", - .len = 16, - .np = 2, - .tap = { 8, 8 } }, { .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", .klen = 8, @@ -7009,8 +7065,6 @@ static const struct cipher_testvec des_tv_template[] = { .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d" "\x69\x0f\x5b\x0d\x9a\x26\x93\x9b", .len = 16, - .np = 3, - .tap = { 3, 12, 1 } }, { /* Four blocks -- for testing encryption with chunking */ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", .klen = 8, @@ -7023,38 +7077,6 @@ static const struct cipher_testvec des_tv_template[] = { "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90" "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b", .len = 32, - .np = 3, - .tap = { 14, 10, 8 } - }, { - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .klen = 8, - .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7" - "\x22\x33\x44\x55\x66\x77\x88\x99" - "\xca\xfe\xba\xbe\xfe\xed\xbe\xef", - .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d" - "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b" - "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90", - .len = 24, - .np = 4, - .tap = { 2, 1, 3, 18 } - }, { - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .klen = 8, - .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7" - "\x22\x33\x44\x55\x66\x77\x88\x99", - .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d" - "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b", - .len = 16, - .np = 5, - .tap = { 2, 2, 2, 2, 8 } - }, { - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .klen = 8, - .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", - .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", - .len = 8, - .np = 8, - .tap = { 1, 1, 1, 1, 1, 1, 1, 1 } }, { /* Generated with Crypto++ */ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", .klen = 8, @@ -7121,9 +7143,6 @@ static const struct cipher_testvec des_tv_template[] = { "\xE1\x58\x39\x09\xB4\x8B\x40\xAC" "\x5F\x62\xC7\x72\xD9\xFC\xCB\x9A", .len = 248, - .also_non_np = 1, - .np = 3, - .tap = { 248 - 10, 2, 8 }, }, }; @@ -7132,6 +7151,7 @@ static const struct cipher_testvec des_cbc_tv_template[] = { .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", .klen = 8, .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10", + .iv_out = "\x46\x8e\x91\x15\x78\x88\xba\x68", .ptext = "\x37\x36\x35\x34\x33\x32\x31\x20" "\x4e\x6f\x77\x20\x69\x73\x20\x74" "\x68\x65\x20\x74\x69\x6d\x65\x20", @@ -7143,6 +7163,7 @@ static const struct cipher_testvec des_cbc_tv_template[] = { .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", .klen = 8, .iv = "\x12\x34\x56\x78\x90\xab\xcd\xef", + .iv_out = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c", .ptext = "\x4e\x6f\x77\x20\x69\x73\x20\x74", .ctext = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c", .len = 8, @@ -7150,6 +7171,7 @@ static const struct cipher_testvec des_cbc_tv_template[] = { .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", .klen = 8, .iv = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c", + .iv_out = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f", .ptext = "\x68\x65\x20\x74\x69\x6d\x65\x20", .ctext = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f", .len = 8, @@ -7157,30 +7179,15 @@ static const struct cipher_testvec des_cbc_tv_template[] = { .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", .klen = 8, .iv = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f", + .iv_out = "\x68\x37\x88\x49\x9a\x7c\x05\xf6", .ptext = "\x66\x6f\x72\x20\x61\x6c\x6c\x20", .ctext = "\x68\x37\x88\x49\x9a\x7c\x05\xf6", .len = 8, - .np = 2, - .tap = { 4, 4 }, - .also_non_np = 1, - }, { /* Copy of openssl vector for chunk testing */ - /* From OpenSSL */ - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .klen = 8, - .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10", - .ptext = "\x37\x36\x35\x34\x33\x32\x31\x20" - "\x4e\x6f\x77\x20\x69\x73\x20\x74" - "\x68\x65\x20\x74\x69\x6d\x65\x20", - .ctext = "\xcc\xd1\x73\xff\xab\x20\x39\xf4" - "\xac\xd8\xae\xfd\xdf\xd8\xa1\xeb" - "\x46\x8e\x91\x15\x78\x88\xba\x68", - .len = 24, - .np = 2, - .tap = { 13, 11 } }, { /* Generated with Crypto++ */ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", .klen = 8, .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47", + .iv_out = "\xC6\x4A\xF3\x55\xC7\x29\x2E\x63", .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75" "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03" "\x6C\xF8\x61\xCA\x33\xBF\x28\x91" @@ -7244,9 +7251,6 @@ static const struct cipher_testvec des_cbc_tv_template[] = { "\x82\xA9\xBD\x6A\x31\x91\x39\x11" "\xC6\x4A\xF3\x55\xC7\x29\x2E\x63", .len = 248, - .also_non_np = 1, - .np = 3, - .tap = { 248 - 10, 2, 8 }, }, }; @@ -7255,6 +7259,7 @@ static const struct cipher_testvec des_ctr_tv_template[] = { .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", .klen = 8, .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD", + .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x1C", .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75" "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03" "\x6C\xF8\x61\xCA\x33\xBF\x28\x91" @@ -7318,13 +7323,11 @@ static const struct cipher_testvec des_ctr_tv_template[] = { "\x19\x7F\x99\x19\x53\xCE\x1D\x14" "\x69\x74\xA1\x06\x46\x0F\x4E\x75", .len = 248, - .also_non_np = 1, - .np = 3, - .tap = { 248 - 10, 2, 8 }, }, { /* Generated with Crypto++ */ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", .klen = 8, .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47", + .iv_out = "\xE7\x82\x1D\xB8\x53\x11\xAC\x66", .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75" "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03" "\x6C\xF8\x61\xCA\x33\xBF\x28\x91" @@ -7388,9 +7391,6 @@ static const struct cipher_testvec des_ctr_tv_template[] = { "\xA5\xA6\xE7\xB0\x51\x36\x52\x37" "\x91\x45\x05\x3E\x58\xBF\x32", .len = 247, - .also_non_np = 1, - .np = 2, - .tap = { 247 - 8, 8 }, }, }; @@ -7549,9 +7549,6 @@ static const struct cipher_testvec des3_ede_tv_template[] = { "\x93\x03\xD7\x51\x09\xFA\xBE\x68" "\xD8\x45\xFF\x33\xBA\xBB\x2B\x63", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -7562,6 +7559,7 @@ static const struct cipher_testvec des3_ede_cbc_tv_template[] = { "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", .klen = 24, .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", + .iv_out = "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19", .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" @@ -7602,6 +7600,7 @@ static const struct cipher_testvec des3_ede_cbc_tv_template[] = { .klen = 24, .iv = "\xB2\xD7\x48\xED\x06\x44\xF9\x12" "\xB7\x28\x4D\x83\x24\x59\xF2\x17", + .iv_out = "\x95\x63\x73\xA2\x44\xAC\xF8\xA5", .ptext = "\x05\xEC\x77\xFB\x42\xD5\x59\x20" "\x8B\x12\x86\x69\xF0\x5B\xCF\x56" "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4" @@ -7727,9 +7726,6 @@ static const struct cipher_testvec des3_ede_cbc_tv_template[] = { "\x83\x70\xFF\x86\xE6\xAA\x0F\x1F" "\x95\x63\x73\xA2\x44\xAC\xF8\xA5", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -7739,8 +7735,8 @@ static const struct cipher_testvec des3_ede_ctr_tv_template[] = { "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE" "\xEB\xB4\x51\x72\xB4\x51\x72\x1F", .klen = 24, - .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" - "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD", + .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", + .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x3D", .ptext = "\x05\xEC\x77\xFB\x42\xD5\x59\x20" "\x8B\x12\x86\x69\xF0\x5B\xCF\x56" "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4" @@ -7866,16 +7862,13 @@ static const struct cipher_testvec des3_ede_ctr_tv_template[] = { "\xFD\x51\xB0\xC6\x2C\x63\x13\x78" "\x5C\xEE\xFC\xCF\xC4\x70\x00\x34", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, { /* Generated with Crypto++ */ .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00" "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE" "\xEB\xB4\x51\x72\xB4\x51\x72\x1F", .klen = 24, - .iv = "\xB2\xD7\x48\xED\x06\x44\xF9\x12" - "\xB7\x28\x4D\x83\x24\x59\xF2\x17", + .iv = "\xB2\xD7\x48\xED\x06\x44\xF9\x12", + .iv_out = "\xB2\xD7\x48\xED\x06\x44\xF9\x51", .ptext = "\x05\xEC\x77\xFB\x42\xD5\x59\x20" "\x8B\x12\x86\x69\xF0\x5B\xCF\x56" "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4" @@ -8003,9 +7996,6 @@ static const struct cipher_testvec des3_ede_ctr_tv_template[] = { "\x32\x0F\x05\x2F\xF2\x4C\x95\x3B" "\xF2\x79\xD9", .len = 499, - .also_non_np = 1, - .np = 2, - .tap = { 499 - 16, 16 }, }, }; @@ -8191,9 +8181,6 @@ static const struct cipher_testvec bf_tv_template[] = { "\x56\xEB\x36\x77\x3D\xAA\xB8\xF5" "\xC9\x1A\xFB\x5D\xDE\xBB\x43\xF4", .len = 504, - .also_non_np = 1, - .np = 3, - .tap = { 504 - 10, 2, 8 }, }, }; @@ -8203,6 +8190,7 @@ static const struct cipher_testvec bf_cbc_tv_template[] = { "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87", .klen = 16, .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10", + .iv_out = "\x59\xf1\x65\x2b\xd5\xff\x92\xcc", .ptext = "\x37\x36\x35\x34\x33\x32\x31\x20" "\x4e\x6f\x77\x20\x69\x73\x20\x74" "\x68\x65\x20\x74\x69\x6d\x65\x20" @@ -8219,6 +8207,7 @@ static const struct cipher_testvec bf_cbc_tv_template[] = { "\x78\xBE\x9B\x78\x55\x32\x0F\x55", .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", + .iv_out = "\xB4\x98\xD8\x6B\x74\xE7\x65\xF4", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -8346,9 +8335,6 @@ static const struct cipher_testvec bf_cbc_tv_template[] = { "\x93\x9B\xEE\xB5\x97\x41\xD2\xA0" "\xB4\x98\xD8\x6B\x74\xE7\x65\xF4", .len = 504, - .also_non_np = 1, - .np = 3, - .tap = { 504 - 10, 2, 8 }, }, }; @@ -8360,6 +8346,7 @@ static const struct cipher_testvec bf_ctr_tv_template[] = { "\x78\xBE\x9B\x78\x55\x32\x0F\x55", .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x9E", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -8494,6 +8481,7 @@ static const struct cipher_testvec bf_ctr_tv_template[] = { "\x78\xBE\x9B\x78\x55\x32\x0F\x55", .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x9E", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -8621,9 +8609,6 @@ static const struct cipher_testvec bf_ctr_tv_template[] = { "\x32\x44\x96\x1C\xD8\xEB\x95\xD2" "\xF3\x71\xEF\xEB\x4E\xBB\x4D", .len = 503, - .also_non_np = 1, - .np = 2, - .tap = { 503 - 8, 8 }, }, { /* Generated with Crypto++ */ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" @@ -8631,6 +8616,7 @@ static const struct cipher_testvec bf_ctr_tv_template[] = { "\x78\xBE\x9B\x78\x55\x32\x0F\x55", .klen = 32, .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD", + .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x3C", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -8922,9 +8908,6 @@ static const struct cipher_testvec tf_tv_template[] = { "\x58\x33\x9B\x78\xC7\x58\x48\x6B" "\x2C\x75\x64\xC4\xCA\xC1\x7E\xD5", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -8933,6 +8916,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = { .key = zeroed_string, .klen = 16, .iv = zeroed_string, + .iv_out = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32" + "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a", .ptext = zeroed_string, .ctext = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32" "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a", @@ -8942,6 +8927,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = { .klen = 16, .iv = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32" "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a", + .iv_out = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e" + "\x86\xcb\x08\x6b\x78\x9f\x54\x19", .ptext = zeroed_string, .ctext = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e" "\x86\xcb\x08\x6b\x78\x9f\x54\x19", @@ -8951,6 +8938,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = { .klen = 16, .iv = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e" "\x86\xcb\x08\x6b\x78\x9f\x54\x19", + .iv_out = "\x05\xef\x8c\x61\xa8\x11\x58\x26" + "\x34\xba\x5c\xb7\x10\x6a\xa6\x41", .ptext = zeroed_string, .ctext = "\x05\xef\x8c\x61\xa8\x11\x58\x26" "\x34\xba\x5c\xb7\x10\x6a\xa6\x41", @@ -8959,6 +8948,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = { .key = zeroed_string, .klen = 16, .iv = zeroed_string, + .iv_out = "\x05\xef\x8c\x61\xa8\x11\x58\x26" + "\x34\xba\x5c\xb7\x10\x6a\xa6\x41", .ptext = zeroed_string, .ctext = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32" "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a" @@ -8975,6 +8966,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\x30\x70\x56\xA4\x37\xDD\x7C\xC0" + "\x0A\xA3\x30\x10\x26\x25\x41\x2C", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -9100,9 +9093,6 @@ static const struct cipher_testvec tf_cbc_tv_template[] = { "\x30\x70\x56\xA4\x37\xDD\x7C\xC0" "\x0A\xA3\x30\x10\x26\x25\x41\x2C", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -9115,6 +9105,8 @@ static const struct cipher_testvec tf_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x83", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -9248,6 +9240,8 @@ static const struct cipher_testvec tf_ctr_tv_template[] = { .klen = 32, .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD", + .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x1C", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -9381,6 +9375,8 @@ static const struct cipher_testvec tf_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x84", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -9508,9 +9504,6 @@ static const struct cipher_testvec tf_ctr_tv_template[] = { "\xC5\xC9\x7F\x9E\xCF\x33\x7A\xDF" "\x6C\x82\x9D", .len = 499, - .also_non_np = 1, - .np = 2, - .tap = { 499 - 16, 16 }, }, }; @@ -9752,9 +9745,6 @@ static const struct cipher_testvec tf_lrw_tv_template[] = { "\x80\x18\xc4\x6c\x03\xd3\xb7\xba" "\x11\xd7\xb8\x6e\xea\xe1\x80\x30", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -10089,9 +10079,6 @@ static const struct cipher_testvec tf_xts_tv_template[] = { "\xa4\x05\x0b\xb2\xb3\xa8\x30\x97" "\x37\x30\xe1\x91\x8d\xb3\x2a\xff", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -10264,9 +10251,6 @@ static const struct cipher_testvec serpent_tv_template[] = { "\x75\x55\x9B\xFF\x36\x73\xAB\x7C" "\xF4\x46\x2E\xEB\xAC\xF3\xD2\xB7", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -10358,6 +10342,8 @@ static const struct cipher_testvec serpent_cbc_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xFC\x66\xAA\x37\xF2\x37\x39\x6B" + "\xBC\x08\x3A\xA2\x29\xB3\xDF\xD1", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -10483,9 +10469,6 @@ static const struct cipher_testvec serpent_cbc_tv_template[] = { "\xFC\x66\xAA\x37\xF2\x37\x39\x6B" "\xBC\x08\x3A\xA2\x29\xB3\xDF\xD1", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -10498,6 +10481,8 @@ static const struct cipher_testvec serpent_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x83", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -10631,6 +10616,8 @@ static const struct cipher_testvec serpent_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x84", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -10758,9 +10745,6 @@ static const struct cipher_testvec serpent_ctr_tv_template[] = { "\x40\x53\x77\x8C\x15\xF8\x8D\x13" "\x38\xE2\xE5", .len = 499, - .also_non_np = 1, - .np = 2, - .tap = { 499 - 16, 16 }, }, { /* Generated with Crypto++ */ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" @@ -10769,6 +10753,8 @@ static const struct cipher_testvec serpent_ctr_tv_template[] = { .klen = 32, .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD", + .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x1C", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -11135,9 +11121,6 @@ static const struct cipher_testvec serpent_lrw_tv_template[] = { "\x5c\xc6\x84\xfe\x7c\xcb\x26\xfd" "\xd9\x51\x0f\xd7\x94\x2f\xc5\xa7", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -11472,9 +11455,6 @@ static const struct cipher_testvec serpent_xts_tv_template[] = { "\xaf\x43\x0b\xc5\x20\x41\x92\x20" "\xd4\xa0\x91\x98\x11\x5f\x4d\xb1", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -11579,6 +11559,8 @@ static const struct cipher_testvec sm4_cbc_tv_template[] = { "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb", .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .iv_out = "\x4C\xB7\x01\x69\x51\x90\x92\x26" + "\x97\x9B\x0D\x15\xDC\x6A\x8F\x6D", .ctext = "\x78\xEB\xB1\x1C\xC4\x0B\x0A\x48" "\x31\x2A\xAE\xB2\x04\x02\x44\xCB" "\x4C\xB7\x01\x69\x51\x90\x92\x26" @@ -11594,6 +11576,8 @@ static const struct cipher_testvec sm4_cbc_tv_template[] = { "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb", .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .iv_out = "\x91\xf2\xc1\x47\x91\x1a\x41\x44" + "\x66\x5e\x1f\xa1\xd4\x0b\xae\x38", .ctext = "\x0d\x3a\x6d\xdc\x2d\x21\xc6\x98" "\x85\x72\x15\x58\x7b\x7b\xb5\x9a" "\x91\xf2\xc1\x47\x91\x1a\x41\x44" @@ -11617,6 +11601,8 @@ static const struct cipher_testvec sm4_ctr_tv_template[] = { "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb", .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .iv_out = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x13", .ctext = "\xac\x32\x36\xcb\x97\x0c\xc2\x07" "\x91\x36\x4c\x39\x5a\x13\x42\xd1" "\xa3\xcb\xc1\x87\x8c\x6f\x30\xcd" @@ -11640,6 +11626,8 @@ static const struct cipher_testvec sm4_ctr_tv_template[] = { "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb", .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .iv_out = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x13", .ctext = "\x5d\xcc\xcd\x25\xb9\x5a\xb0\x74" "\x17\xa0\x85\x12\xee\x16\x0e\x2f" "\x8f\x66\x15\x21\xcb\xba\xb4\x4c" @@ -11814,9 +11802,6 @@ static const struct cipher_testvec cast6_tv_template[] = { "\x84\x52\x6D\x68\xDE\xC6\x64\xB2" "\x11\x74\x93\x57\xB4\x7E\xC6\x00", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -11829,6 +11814,8 @@ static const struct cipher_testvec cast6_cbc_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\x4D\x59\x7D\xC5\x28\x69\xFA\x92" + "\x22\x46\x89\x2D\x0F\x2B\x08\x24", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -11954,9 +11941,6 @@ static const struct cipher_testvec cast6_cbc_tv_template[] = { "\x4D\x59\x7D\xC5\x28\x69\xFA\x92" "\x22\x46\x89\x2D\x0F\x2B\x08\x24", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -11969,6 +11953,8 @@ static const struct cipher_testvec cast6_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x66", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A", @@ -11984,6 +11970,8 @@ static const struct cipher_testvec cast6_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x83", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -12109,9 +12097,6 @@ static const struct cipher_testvec cast6_ctr_tv_template[] = { "\x0E\x74\x33\x30\x62\xB9\x89\xDF" "\xF9\xC5\xDD\x27\xB3\x39\xCB\xCB", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -12255,9 +12240,6 @@ static const struct cipher_testvec cast6_lrw_tv_template[] = { "\x8D\xD9\xCD\x3B\x22\x67\x18\xC7" "\xC4\xF5\x99\x61\xBC\xBB\x5B\x46", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -12403,9 +12385,6 @@ static const struct cipher_testvec cast6_xts_tv_template[] = { "\xA1\xAC\xE8\xCF\xC6\x74\xCF\xDC" "\x22\x60\x4E\xE8\xA4\x5D\x85\xB9", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -12574,9 +12553,6 @@ static const struct cipher_testvec aes_tv_template[] = { "\x09\x79\xA0\x43\x5C\x0D\x08\x58" "\x17\xBB\xC0\x6B\x62\x3F\x56\xE9", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -12587,19 +12563,20 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { .klen = 16, .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" "\xb4\x22\xda\x80\x2c\x9f\xac\x41", + .iv_out = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" + "\x27\x08\x94\x2d\xbe\x77\x18\x1a", .ptext = "Single block msg", .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" "\x27\x08\x94\x2d\xbe\x77\x18\x1a", .len = 16, - .also_non_np = 1, - .np = 8, - .tap = { 3, 2, 3, 2, 3, 1, 1, 1 }, }, { .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0" "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a", .klen = 16, .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", + .iv_out = "\x75\x86\x60\x2d\x25\x3c\xff\xf9" + "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1", .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" @@ -12616,6 +12593,8 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { .klen = 24, .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .iv_out = "\x08\xb0\xe2\x79\x88\x59\x88\x81" + "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd", .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" @@ -12641,6 +12620,8 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { .klen = 32, .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .iv_out = "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc" + "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b", .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" @@ -12666,6 +12647,8 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { .klen = 32, .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47" "\xE2\x7D\x18\xD6\x71\x0C\xA7\x42", + .iv_out = "\xE0\x1F\x91\xF8\x82\x96\x2D\x65" + "\xA3\xAA\x13\xCC\x50\xFF\x7B\x02", .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75" "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03" "\x6C\xF8\x61\xCA\x33\xBF\x28\x91" @@ -12791,9 +12774,6 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { "\xE0\x1F\x91\xF8\x82\x96\x2D\x65" "\xA3\xAA\x13\xCC\x50\xFF\x7B\x02", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -12870,10 +12850,32 @@ static const struct cipher_testvec aes_cfb_tv_template[] = { "\x75\xa3\x85\x74\x1a\xb9\xce\xf8" "\x20\x31\x62\x3d\x55\xb1\xe4\x71", .len = 64, + }, { /* > 16 bytes, not a multiple of 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" + "\xc8", + .len = 17, + }, { /* < 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad", + .len = 7, }, }; -static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { +static const struct aead_testvec hmac_md5_ecb_cipher_null_tv_template[] = { { /* Input data from RFC 2410 Case 1 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -12887,12 +12889,12 @@ static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { "\x00\x00\x00\x00\x00\x00\x00\x00", .klen = 8 + 16 + 0, .iv = "", - .input = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .ilen = 8, - .result = "\x01\x23\x45\x67\x89\xab\xcd\xef" + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .plen = 8, + .ctext = "\x01\x23\x45\x67\x89\xab\xcd\xef" "\xaa\x42\xfe\x43\x8d\xea\xa3\x5a" "\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae", - .rlen = 8 + 16, + .clen = 8 + 16, }, { /* Input data from RFC 2410 Case 2 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -12906,58 +12908,16 @@ static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { "\x00\x00\x00\x00\x00\x00\x00\x00", .klen = 8 + 16 + 0, .iv = "", - .input = "Network Security People Have A Strange Sense Of Humor", - .ilen = 53, - .result = "Network Security People Have A Strange Sense Of Humor" - "\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a" - "\x8e\xb5\x5f\x90\x8e\xfe\x13\x23", - .rlen = 53 + 16, - }, -}; - -static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = { - { -#ifdef __LITTLE_ENDIAN - .key = "\x08\x00" /* rta length */ - "\x01\x00" /* rta type */ -#else - .key = "\x00\x08" /* rta length */ - "\x00\x01" /* rta type */ -#endif - "\x00\x00\x00\x00" /* enc key length */ - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 8 + 16 + 0, - .iv = "", - .input = "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\xaa\x42\xfe\x43\x8d\xea\xa3\x5a" - "\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae", - .ilen = 8 + 16, - .result = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .rlen = 8, - }, { -#ifdef __LITTLE_ENDIAN - .key = "\x08\x00" /* rta length */ - "\x01\x00" /* rta type */ -#else - .key = "\x00\x08" /* rta length */ - "\x00\x01" /* rta type */ -#endif - "\x00\x00\x00\x00" /* enc key length */ - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 8 + 16 + 0, - .iv = "", - .input = "Network Security People Have A Strange Sense Of Humor" + .ptext = "Network Security People Have A Strange Sense Of Humor", + .plen = 53, + .ctext = "Network Security People Have A Strange Sense Of Humor" "\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a" "\x8e\xb5\x5f\x90\x8e\xfe\x13\x23", - .ilen = 53 + 16, - .result = "Network Security People Have A Strange Sense Of Humor", - .rlen = 53, + .clen = 53 + 16, }, }; -static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha1_aes_cbc_tv_temp[] = { { /* RFC 3602 Case 1 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -12978,14 +12938,14 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { .assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" "\xb4\x22\xda\x80\x2c\x9f\xac\x41", .alen = 16, - .input = "Single block msg", - .ilen = 16, - .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" "\x27\x08\x94\x2d\xbe\x77\x18\x1a" "\x1b\x13\xcb\xaf\x89\x5e\xe1\x2c" "\x13\xc5\x2e\xa3\xcc\xed\xdc\xb5" "\x03\x71\xa2\x06", - .rlen = 16 + 20, + .clen = 16 + 20, }, { /* RFC 3602 Case 2 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13006,19 +12966,19 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { .assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", .alen = 16, - .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", - .ilen = 32, - .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" + .plen = 32, + .ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a" "\x75\x86\x60\x2d\x25\x3c\xff\xf9" "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1" "\xad\x9b\x4c\x5c\x85\xe1\xda\xae" "\xee\x81\x4e\xd7\xdb\x74\xcf\x58" "\x65\x39\xf8\xde", - .rlen = 32 + 20, + .clen = 32 + 20, }, { /* RFC 3602 Case 3 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13039,9 +12999,9 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { .assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", .alen = 16, - .input = "This is a 48-byte message (exactly 3 AES blocks)", - .ilen = 48, - .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" + .ptext = "This is a 48-byte message (exactly 3 AES blocks)", + .plen = 48, + .ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" "\xd4\x93\x66\x5d\x33\xf0\xe8\x86" "\x2d\xea\x54\xcd\xb2\x93\xab\xc7" "\x50\x69\x39\x27\x67\x72\xf8\xd5" @@ -13050,7 +13010,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\xc2\xec\x0c\xf8\x7f\x05\xba\xca" "\xff\xee\x4c\xd0\x93\xe6\x36\x7f" "\x8d\x62\xf2\x1e", - .rlen = 48 + 20, + .clen = 48 + 20, }, { /* RFC 3602 Case 4 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13071,7 +13031,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { .assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", .alen = 16, - .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + .ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" @@ -13079,8 +13039,8 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf", - .ilen = 64, - .result = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" + .plen = 64, + .ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa" "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6" "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e" @@ -13091,7 +13051,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\x1c\x45\x57\xa9\x56\xcb\xa9\x2d" "\x18\xac\xf1\xc7\x5d\xd1\xcd\x0d" "\x1d\xbe\xc6\xe9", - .rlen = 64 + 20, + .clen = 64 + 20, }, { /* RFC 3602 Case 5 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13113,7 +13073,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\xe9\x6e\x8c\x08\xab\x46\x57\x63" "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", .alen = 24, - .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" + .ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" @@ -13123,8 +13083,8 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\x30\x31\x32\x33\x34\x35\x36\x37" "\x01\x02\x03\x04\x05\x06\x07\x08" "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01", - .ilen = 80, - .result = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" + .plen = 80, + .ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" "\xa9\x45\x3e\x19\x4e\x12\x08\x49" "\xa4\x87\x0b\x66\xcc\x6b\x99\x65" "\x33\x00\x13\xb4\x89\x8d\xc8\x56" @@ -13137,7 +13097,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\x58\xc6\x84\x75\xe4\xe9\x6b\x0c" "\xe1\xc5\x0b\x73\x4d\x82\x55\xa8" "\x85\xe1\x59\xf7", - .rlen = 80 + 20, + .clen = 80 + 20, }, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13159,7 +13119,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .alen = 16, - .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" @@ -13167,8 +13127,8 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ilen = 64, - .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" + .plen = 64, + .ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" "\x71\x78\x18\x3a\x9f\xa0\x71\xe8" "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4" "\xe5\xe7\x38\x76\x3f\x69\x14\x5a" @@ -13179,7 +13139,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\x73\xe3\x19\x3f\x8b\xc9\xc6\xf4" "\x5a\xf1\x5b\xa8\x98\x07\xc5\x36" "\x47\x4c\xfc\x36", - .rlen = 64 + 20, + .clen = 64 + 20, }, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13202,7 +13162,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .alen = 16, - .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" @@ -13210,8 +13170,8 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ilen = 64, - .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" + .plen = 64, + .ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6" "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d" "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d" @@ -13222,11 +13182,11 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\xa3\xe8\x9b\x17\xe3\xf4\x7f\xde" "\x1b\x9f\xc6\x81\x26\x43\x4a\x87" "\x51\xee\xd6\x4e", - .rlen = 64 + 20, + .clen = 64 + 20, }, }; -static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha1_ecb_cipher_null_tv_temp[] = { { /* Input data from RFC 2410 Case 1 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13241,13 +13201,13 @@ static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = { "\x00\x00\x00\x00", .klen = 8 + 20 + 0, .iv = "", - .input = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .ilen = 8, - .result = "\x01\x23\x45\x67\x89\xab\xcd\xef" + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .plen = 8, + .ctext = "\x01\x23\x45\x67\x89\xab\xcd\xef" "\x40\xc3\x0a\xa1\xc9\xa0\x28\xab" "\x99\x5e\x19\x04\xd1\x72\xef\xb8" "\x8c\x5e\xe4\x08", - .rlen = 8 + 20, + .clen = 8 + 20, }, { /* Input data from RFC 2410 Case 2 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13262,63 +13222,17 @@ static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = { "\x00\x00\x00\x00", .klen = 8 + 20 + 0, .iv = "", - .input = "Network Security People Have A Strange Sense Of Humor", - .ilen = 53, - .result = "Network Security People Have A Strange Sense Of Humor" - "\x75\x6f\x42\x1e\xf8\x50\x21\xd2" - "\x65\x47\xee\x8e\x1a\xef\x16\xf6" - "\x91\x56\xe4\xd6", - .rlen = 53 + 20, - }, -}; - -static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = { - { -#ifdef __LITTLE_ENDIAN - .key = "\x08\x00" /* rta length */ - "\x01\x00" /* rta type */ -#else - .key = "\x00\x08" /* rta length */ - "\x00\x01" /* rta type */ -#endif - "\x00\x00\x00\x00" /* enc key length */ - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00", - .klen = 8 + 20 + 0, - .iv = "", - .input = "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\x40\xc3\x0a\xa1\xc9\xa0\x28\xab" - "\x99\x5e\x19\x04\xd1\x72\xef\xb8" - "\x8c\x5e\xe4\x08", - .ilen = 8 + 20, - .result = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .rlen = 8, - }, { -#ifdef __LITTLE_ENDIAN - .key = "\x08\x00" /* rta length */ - "\x01\x00" /* rta type */ -#else - .key = "\x00\x08" /* rta length */ - "\x00\x01" /* rta type */ -#endif - "\x00\x00\x00\x00" /* enc key length */ - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00", - .klen = 8 + 20 + 0, - .iv = "", - .input = "Network Security People Have A Strange Sense Of Humor" + .ptext = "Network Security People Have A Strange Sense Of Humor", + .plen = 53, + .ctext = "Network Security People Have A Strange Sense Of Humor" "\x75\x6f\x42\x1e\xf8\x50\x21\xd2" "\x65\x47\xee\x8e\x1a\xef\x16\xf6" "\x91\x56\xe4\xd6", - .ilen = 53 + 20, - .result = "Network Security People Have A Strange Sense Of Humor", - .rlen = 53, + .clen = 53 + 20, }, }; -static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha256_aes_cbc_tv_temp[] = { { /* RFC 3602 Case 1 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13340,15 +13254,15 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { .assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" "\xb4\x22\xda\x80\x2c\x9f\xac\x41", .alen = 16, - .input = "Single block msg", - .ilen = 16, - .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" "\x27\x08\x94\x2d\xbe\x77\x18\x1a" "\xcc\xde\x2d\x6a\xae\xf1\x0b\xcc" "\x38\x06\x38\x51\xb4\xb8\xf3\x5b" "\x5c\x34\xa6\xa3\x6e\x0b\x05\xe5" "\x6a\x6d\x44\xaa\x26\xa8\x44\xa5", - .rlen = 16 + 32, + .clen = 16 + 32, }, { /* RFC 3602 Case 2 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13370,12 +13284,12 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { .assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", .alen = 16, - .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", - .ilen = 32, - .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" + .plen = 32, + .ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a" "\x75\x86\x60\x2d\x25\x3c\xff\xf9" "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1" @@ -13383,7 +13297,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\x0e\x06\x58\x8f\xba\xf6\x06\xda" "\x49\x69\x0d\x5b\xd4\x36\x06\x62" "\x35\x5e\x54\x58\x53\x4d\xdf\xbf", - .rlen = 32 + 32, + .clen = 32 + 32, }, { /* RFC 3602 Case 3 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13405,9 +13319,9 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { .assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", .alen = 16, - .input = "This is a 48-byte message (exactly 3 AES blocks)", - .ilen = 48, - .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" + .ptext = "This is a 48-byte message (exactly 3 AES blocks)", + .plen = 48, + .ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" "\xd4\x93\x66\x5d\x33\xf0\xe8\x86" "\x2d\xea\x54\xcd\xb2\x93\xab\xc7" "\x50\x69\x39\x27\x67\x72\xf8\xd5" @@ -13417,7 +13331,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\xe7\xc6\xce\x10\x31\x2f\x9b\x1d" "\x24\x78\xfb\xbe\x02\xe0\x4f\x40" "\x10\xbd\xaa\xc6\xa7\x79\xe0\x1a", - .rlen = 48 + 32, + .clen = 48 + 32, }, { /* RFC 3602 Case 4 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13439,7 +13353,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { .assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", .alen = 16, - .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + .ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" @@ -13447,8 +13361,8 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf", - .ilen = 64, - .result = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" + .plen = 64, + .ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa" "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6" "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e" @@ -13460,7 +13374,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\xe0\x93\xec\xc9\x9f\xf7\xce\xd8" "\x3f\x54\xe2\x49\x39\xe3\x71\x25" "\x2b\x6c\xe9\x5d\xec\xec\x2b\x64", - .rlen = 64 + 32, + .clen = 64 + 32, }, { /* RFC 3602 Case 5 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13483,7 +13397,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\xe9\x6e\x8c\x08\xab\x46\x57\x63" "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", .alen = 24, - .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" + .ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" @@ -13493,8 +13407,8 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\x30\x31\x32\x33\x34\x35\x36\x37" "\x01\x02\x03\x04\x05\x06\x07\x08" "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01", - .ilen = 80, - .result = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" + .plen = 80, + .ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" "\xa9\x45\x3e\x19\x4e\x12\x08\x49" "\xa4\x87\x0b\x66\xcc\x6b\x99\x65" "\x33\x00\x13\xb4\x89\x8d\xc8\x56" @@ -13508,7 +13422,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\x3a\xd2\xe1\x03\x86\xa5\x59\xb7" "\x73\xc3\x46\x20\x2c\xb1\xef\x68" "\xbb\x8a\x32\x7e\x12\x8c\x69\xcf", - .rlen = 80 + 32, + .clen = 80 + 32, }, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13531,7 +13445,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .alen = 16, - .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" @@ -13539,8 +13453,8 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ilen = 64, - .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" + .plen = 64, + .ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" "\x71\x78\x18\x3a\x9f\xa0\x71\xe8" "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4" "\xe5\xe7\x38\x76\x3f\x69\x14\x5a" @@ -13552,7 +13466,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\x61\x81\x31\xea\x5b\x3d\x8e\xfb" "\xca\x71\x85\x93\xf7\x85\x55\x8b" "\x7a\xe4\x94\xca\x8b\xba\x19\x33", - .rlen = 64 + 32, + .clen = 64 + 32, }, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13576,7 +13490,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .alen = 16, - .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" @@ -13584,8 +13498,8 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ilen = 64, - .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" + .plen = 64, + .ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6" "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d" "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d" @@ -13597,11 +13511,11 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\x8f\x74\xbd\x17\x92\x03\xbe\x8f" "\xf3\x61\xde\x1c\xe9\xdb\xcd\xd0" "\xcc\xce\xe9\x85\x57\xcf\x6f\x5f", - .rlen = 64 + 32, + .clen = 64 + 32, }, }; -static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha512_aes_cbc_tv_temp[] = { { /* RFC 3602 Case 1 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13627,9 +13541,9 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { .assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" "\xb4\x22\xda\x80\x2c\x9f\xac\x41", .alen = 16, - .input = "Single block msg", - .ilen = 16, - .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" "\x27\x08\x94\x2d\xbe\x77\x18\x1a" "\x3f\xdc\xad\x90\x03\x63\x5e\x68" "\xc3\x13\xdd\xa4\x5c\x4d\x54\xa7" @@ -13639,7 +13553,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xe8\x9a\x7c\x06\x3d\xcb\xff\xb2" "\xfa\x20\x89\xdd\x9c\xac\x9e\x16" "\x18\x8a\xa0\x6d\x01\x6c\xa3\x3a", - .rlen = 16 + 64, + .clen = 16 + 64, }, { /* RFC 3602 Case 2 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13665,12 +13579,12 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { .assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", .alen = 16, - .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", - .ilen = 32, - .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" + .plen = 32, + .ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a" "\x75\x86\x60\x2d\x25\x3c\xff\xf9" "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1" @@ -13682,7 +13596,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\x46\x32\x7c\x41\x9c\x59\x3e\xe9" "\x8f\x9f\xd4\x31\xd6\x22\xbd\xf8" "\xf7\x0a\x94\xe5\xa9\xc3\xf6\x9d", - .rlen = 32 + 64, + .clen = 32 + 64, }, { /* RFC 3602 Case 3 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13708,9 +13622,9 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { .assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", .alen = 16, - .input = "This is a 48-byte message (exactly 3 AES blocks)", - .ilen = 48, - .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" + .ptext = "This is a 48-byte message (exactly 3 AES blocks)", + .plen = 48, + .ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" "\xd4\x93\x66\x5d\x33\xf0\xe8\x86" "\x2d\xea\x54\xcd\xb2\x93\xab\xc7" "\x50\x69\x39\x27\x67\x72\xf8\xd5" @@ -13724,7 +13638,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\x08\xea\x29\x6c\x74\x67\x3f\xb0" "\xac\x7f\x5c\x1d\xf5\xee\x22\x66" "\x27\xa6\xb6\x13\xba\xba\xf0\xc2", - .rlen = 48 + 64, + .clen = 48 + 64, }, { /* RFC 3602 Case 4 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13750,7 +13664,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { .assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", .alen = 16, - .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + .ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" @@ -13758,8 +13672,8 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf", - .ilen = 64, - .result = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" + .plen = 64, + .ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa" "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6" "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e" @@ -13775,7 +13689,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xbc\x6f\xed\xd5\x8d\xde\x23\x7c" "\x62\x98\x14\xd7\x2f\x37\x8d\xdf" "\xf4\x33\x80\xeb\x8e\xb4\xa4\xda", - .rlen = 64 + 64, + .clen = 64 + 64, }, { /* RFC 3602 Case 5 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13802,7 +13716,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xe9\x6e\x8c\x08\xab\x46\x57\x63" "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", .alen = 24, - .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" + .ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" @@ -13812,8 +13726,8 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\x30\x31\x32\x33\x34\x35\x36\x37" "\x01\x02\x03\x04\x05\x06\x07\x08" "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01", - .ilen = 80, - .result = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" + .plen = 80, + .ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" "\xa9\x45\x3e\x19\x4e\x12\x08\x49" "\xa4\x87\x0b\x66\xcc\x6b\x99\x65" "\x33\x00\x13\xb4\x89\x8d\xc8\x56" @@ -13831,7 +13745,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\x92\x26\xc1\x76\x20\x11\xeb\xba" "\x62\x4f\x9a\x62\x25\xc3\x75\x80" "\xb7\x0a\x17\xf5\xd7\x94\xb4\x14", - .rlen = 80 + 64, + .clen = 80 + 64, }, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13858,7 +13772,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .alen = 16, - .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" @@ -13866,8 +13780,8 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ilen = 64, - .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" + .plen = 64, + .ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" "\x71\x78\x18\x3a\x9f\xa0\x71\xe8" "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4" "\xe5\xe7\x38\x76\x3f\x69\x14\x5a" @@ -13883,7 +13797,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xba\x03\xd5\x32\xfa\x5f\x41\x58" "\x8d\x43\x98\xa7\x94\x16\x07\x02" "\x0f\xb6\x81\x50\x28\x95\x2e\x75", - .rlen = 64 + 64, + .clen = 64 + 64, }, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13911,7 +13825,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .alen = 16, - .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" @@ -13919,8 +13833,8 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ilen = 64, - .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" + .plen = 64, + .ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6" "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d" "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d" @@ -13936,11 +13850,11 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xe3\x8d\x64\xc3\x8d\xff\x7c\x8c" "\xdb\xbf\xa0\xb4\x01\xa2\xa8\xa2" "\x2c\xb1\x62\x2c\x10\xca\xf1\x21", - .rlen = 64 + 64, + .clen = 64 + 64, }, }; -static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha1_des_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13959,7 +13873,7 @@ static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -13975,8 +13889,8 @@ static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" + .plen = 128, + .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" "\x54\x31\x85\x37\xed\x6b\x01\x8d" "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" "\x41\xaa\x33\x91\xa7\x7d\x99\x88" @@ -13995,11 +13909,11 @@ static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { "\x95\x16\x20\x09\xf5\x95\x19\xfd" "\x3c\xc7\xe0\x42\xc0\x14\x69\xfa" "\x5c\x44\xa9\x37", - .rlen = 128 + 20, + .clen = 128 + 20, }, }; -static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha224_des_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14018,7 +13932,7 @@ static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14034,8 +13948,8 @@ static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" + .plen = 128, + .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" "\x54\x31\x85\x37\xed\x6b\x01\x8d" "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" "\x41\xaa\x33\x91\xa7\x7d\x99\x88" @@ -14054,11 +13968,11 @@ static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { "\x9c\x2d\x7e\xee\x20\x34\x55\x0a" "\xce\xb5\x4e\x64\x53\xe7\xbf\x91" "\xab\xd4\xd9\xda\xc9\x12\xae\xf7", - .rlen = 128 + 24, + .clen = 128 + 24, }, }; -static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha256_des_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14078,7 +13992,7 @@ static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14094,8 +14008,8 @@ static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" + .plen = 128, + .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" "\x54\x31\x85\x37\xed\x6b\x01\x8d" "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" "\x41\xaa\x33\x91\xa7\x7d\x99\x88" @@ -14115,11 +14029,11 @@ static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { "\x50\xf6\x5d\xab\x4b\x51\x4e\x5e" "\xde\x63\xde\x76\x52\xde\x9f\xba" "\x90\xcf\x15\xf2\xbb\x6e\x84\x00", - .rlen = 128 + 32, + .clen = 128 + 32, }, }; -static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha384_des_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14141,7 +14055,7 @@ static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14157,8 +14071,8 @@ static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" + .plen = 128, + .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" "\x54\x31\x85\x37\xed\x6b\x01\x8d" "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" "\x41\xaa\x33\x91\xa7\x7d\x99\x88" @@ -14180,11 +14094,11 @@ static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { "\x5e\x67\xb5\x74\xe7\xe7\x85\x61" "\x6a\x95\x26\x75\xcc\x53\x89\xf3" "\x74\xc9\x2a\x76\x20\xa2\x64\x62", - .rlen = 128 + 48, + .clen = 128 + 48, }, }; -static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha512_des_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14208,7 +14122,7 @@ static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14224,8 +14138,8 @@ static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" + .plen = 128, + .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" "\x54\x31\x85\x37\xed\x6b\x01\x8d" "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" "\x41\xaa\x33\x91\xa7\x7d\x99\x88" @@ -14249,11 +14163,11 @@ static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { "\x97\xe2\xe3\xb8\xaa\x48\x85\xee" "\x8c\xf6\x07\x95\x1f\xa6\x6c\x96" "\x99\xc7\x5c\x8d\xd8\xb5\x68\x7b", - .rlen = 128 + 64, + .clen = 128 + 64, }, }; -static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha1_des3_ede_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14274,7 +14188,7 @@ static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14290,8 +14204,8 @@ static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" + .plen = 128, + .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" "\x12\x56\x5c\x53\x96\xb6\x00\x7d" @@ -14310,11 +14224,11 @@ static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { "\x67\x6d\xb1\xf5\xb8\x10\xdc\xc6" "\x75\x86\x96\x6b\xb1\xc5\xe4\xcf" "\xd1\x60\x91\xb3", - .rlen = 128 + 20, + .clen = 128 + 20, }, }; -static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha224_des3_ede_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14335,7 +14249,7 @@ static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14351,8 +14265,8 @@ static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" + .plen = 128, + .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" "\x12\x56\x5c\x53\x96\xb6\x00\x7d" @@ -14371,11 +14285,11 @@ static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { "\x15\x24\x7f\x5a\x45\x4a\x66\xce" "\x2b\x0b\x93\x99\x2f\x9d\x0c\x6c" "\x56\x1f\xe1\xa6\x41\xb2\x4c\xd0", - .rlen = 128 + 24, + .clen = 128 + 24, }, }; -static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha256_des3_ede_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14397,7 +14311,7 @@ static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14413,8 +14327,8 @@ static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" + .plen = 128, + .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" "\x12\x56\x5c\x53\x96\xb6\x00\x7d" @@ -14434,11 +14348,11 @@ static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { "\x56\x38\x44\xc0\xdb\xe3\x4f\x71" "\xf7\xce\xd1\xd3\xf8\xbd\x3e\x4f" "\xca\x43\x95\xdf\x80\x61\x81\xa9", - .rlen = 128 + 32, + .clen = 128 + 32, }, }; -static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha384_des3_ede_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14462,7 +14376,7 @@ static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14478,8 +14392,8 @@ static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" + .plen = 128, + .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" "\x12\x56\x5c\x53\x96\xb6\x00\x7d" @@ -14501,11 +14415,11 @@ static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { "\xa4\x32\x8a\x0b\x46\xd7\xf0\x39" "\x36\x5d\x13\x2f\x86\x10\x78\xd6" "\xd6\xbe\x5c\xb9\x15\x89\xf9\x1b", - .rlen = 128 + 48, + .clen = 128 + 48, }, }; -static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha512_des3_ede_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14531,7 +14445,7 @@ static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14547,8 +14461,8 @@ static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" + .plen = 128, + .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" "\x12\x56\x5c\x53\x96\xb6\x00\x7d" @@ -14572,7 +14486,7 @@ static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { "\x2a\x74\xd4\x65\x12\xcb\x55\xf2" "\xd5\x02\x6d\xe6\xaf\xc9\x2f\xf2" "\x57\xaa\x85\xf7\xf3\x6a\xcb\xdb", - .rlen = 128 + 64, + .clen = 128 + 64, }, }; @@ -14836,9 +14750,6 @@ static const struct cipher_testvec aes_lrw_tv_template[] = { "\xcd\x7e\x2b\x5d\x43\xea\x42\xe7" "\x74\x3f\x7d\x58\x88\x75\xde\x3e", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, } }; @@ -15174,9 +15085,6 @@ static const struct cipher_testvec aes_xts_tv_template[] = { "\xc4\xf3\x6f\xfd\xa9\xfc\xea\x70" "\xb9\xc6\xe6\x93\xe1\x48\xc1\x51", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, } }; @@ -15187,6 +15095,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { .klen = 16, .iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .iv_out = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xff\x03", .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" @@ -15211,6 +15121,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { .klen = 24, .iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .iv_out = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xff\x03", .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" @@ -15236,6 +15148,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { .klen = 32, .iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .iv_out = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xff\x03", .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" @@ -15261,6 +15175,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { .klen = 32, .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD", + .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x1C", .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75" "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03" "\x6C\xF8\x61\xCA\x33\xBF\x28\x91" @@ -15386,9 +15302,6 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { "\xFA\x3A\x05\x4C\xFA\xD1\xFF\xFE" "\xF1\x4C\xE5\xB2\x91\x64\x0C\x51", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, { /* Generated with Crypto++ */ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55" "\x0F\x32\x55\x78\x9B\xBE\x78\x9B" @@ -15397,6 +15310,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { .klen = 32, .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47" "\xE2\x7D\x18\xD6\x71\x0C\xA7\x42", + .iv_out = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47" + "\xE2\x7D\x18\xD6\x71\x0C\xA7\x62", .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75" "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03" "\x6C\xF8\x61\xCA\x33\xBF\x28\x91" @@ -15524,9 +15439,6 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { "\xD8\xFE\xC9\x5B\x5C\x25\xE5\x76" "\xFB\xF2\x3F", .len = 499, - .also_non_np = 1, - .np = 2, - .tap = { 499 - 16, 16 }, }, }; @@ -16650,14 +16562,11 @@ static const struct cipher_testvec aes_ctr_rfc3686_tv_template[] = { "\x4b\xef\x31\x18\xea\xac\xb1\x84" "\x21\xed\xda\x86", .len = 4100, - .np = 2, - .tap = { 4064, 36 }, }, }; static const struct cipher_testvec aes_ofb_tv_template[] = { - /* From NIST Special Publication 800-38A, Appendix F.5 */ - { + { /* From NIST Special Publication 800-38A, Appendix F.5 */ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", .klen = 16, @@ -16680,33 +16589,55 @@ static const struct cipher_testvec aes_ofb_tv_template[] = { "\x30\x4c\x65\x28\xf6\x59\xc7\x78" "\x66\xa5\x10\xd9\xc1\xd6\xae\x5e", .len = 64, + }, { /* > 16 bytes, not a multiple of 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" + "\x77", + .len = 17, + }, { /* < 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad", + .len = 7, } }; -static const struct aead_testvec aes_gcm_enc_tv_template[] = { +static const struct aead_testvec aes_gcm_tv_template[] = { { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ .key = zeroed_string, .klen = 16, - .result = "\x58\xe2\xfc\xce\xfa\x7e\x30\x61" + .ctext = "\x58\xe2\xfc\xce\xfa\x7e\x30\x61" "\x36\x7f\x1d\x57\xa4\xe7\x45\x5a", - .rlen = 16, + .clen = 16, }, { .key = zeroed_string, .klen = 16, - .input = zeroed_string, - .ilen = 16, - .result = "\x03\x88\xda\xce\x60\xb6\xa3\x92" + .ptext = zeroed_string, + .plen = 16, + .ctext = "\x03\x88\xda\xce\x60\xb6\xa3\x92" "\xf3\x28\xc2\xb9\x71\xb2\xfe\x78" "\xab\x6e\x47\xd4\x2c\xec\x13\xbd" "\xf5\x3a\x67\xb2\x12\x57\xbd\xdf", - .rlen = 32, + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08", .klen = 16, .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" "\xde\xca\xf8\x88", - .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" "\x86\xa7\xa9\x53\x15\x34\xf7\xda" "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" @@ -16714,8 +16645,8 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", - .ilen = 64, - .result = "\x42\x83\x1e\xc2\x21\x77\x74\x24" + .plen = 64, + .ctext = "\x42\x83\x1e\xc2\x21\x77\x74\x24" "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c" "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0" "\x35\xc1\x7e\x23\x29\xac\xa1\x2e" @@ -16725,14 +16656,14 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { "\x3d\x58\xe0\x91\x47\x3f\x59\x85" "\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6" "\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4", - .rlen = 80, + .clen = 80, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08", .klen = 16, .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" "\xde\xca\xf8\x88", - .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" "\x86\xa7\xa9\x53\x15\x34\xf7\xda" "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" @@ -16740,12 +16671,12 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" "\xba\x63\x7b\x39", - .ilen = 60, + .plen = 60, .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" "\xfe\xed\xfa\xce\xde\xad\xbe\xef" "\xab\xad\xda\xd2", .alen = 20, - .result = "\x42\x83\x1e\xc2\x21\x77\x74\x24" + .ctext = "\x42\x83\x1e\xc2\x21\x77\x74\x24" "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c" "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0" "\x35\xc1\x7e\x23\x29\xac\xa1\x2e" @@ -16755,23 +16686,23 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { "\x3d\x58\xe0\x91" "\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb" "\x94\xfa\xe9\x5a\xe7\x12\x1a\x47", - .rlen = 76, + .clen = 76, }, { .key = zeroed_string, .klen = 24, - .result = "\xcd\x33\xb2\x8a\xc7\x73\xf7\x4b" + .ctext = "\xcd\x33\xb2\x8a\xc7\x73\xf7\x4b" "\xa0\x0e\xd1\xf3\x12\x57\x24\x35", - .rlen = 16, + .clen = 16, }, { .key = zeroed_string, .klen = 24, - .input = zeroed_string, - .ilen = 16, - .result = "\x98\xe7\x24\x7c\x07\xf0\xfe\x41" + .ptext = zeroed_string, + .plen = 16, + .ctext = "\x98\xe7\x24\x7c\x07\xf0\xfe\x41" "\x1c\x26\x7e\x43\x84\xb0\xf6\x00" "\x2f\xf5\x8d\x80\x03\x39\x27\xab" "\x8e\xf4\xd4\x58\x75\x14\xf0\xfb", - .rlen = 32, + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" @@ -16779,7 +16710,7 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { .klen = 24, .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" "\xde\xca\xf8\x88", - .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" "\x86\xa7\xa9\x53\x15\x34\xf7\xda" "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" @@ -16787,8 +16718,8 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", - .ilen = 64, - .result = "\x39\x80\xca\x0b\x3c\x00\xe8\x41" + .plen = 64, + .ctext = "\x39\x80\xca\x0b\x3c\x00\xe8\x41" "\xeb\x06\xfa\xc4\x87\x2a\x27\x57" "\x85\x9e\x1c\xea\xa6\xef\xd9\x84" "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c" @@ -16798,71 +16729,41 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { "\xcc\xda\x27\x10\xac\xad\xe2\x56" "\x99\x24\xa7\xc8\x58\x73\x36\xbf" "\xb1\x18\x02\x4d\xb8\x67\x4a\x14", - .rlen = 80, + .clen = 80, + }, { + .key = zeroed_string, + .klen = 32, + .ctext = "\x53\x0f\x8a\xfb\xc7\x45\x36\xb9" + "\xa9\x63\xb4\xf1\xc4\xcb\x73\x8b", + .clen = 16, + }, { + .key = zeroed_string, + .klen = 32, + .ptext = zeroed_string, + .plen = 16, + .ctext = "\xce\xa7\x40\x3d\x4d\x60\x6b\x6e" + "\x07\x4e\xc5\xd3\xba\xf3\x9d\x18" + "\xd0\xd1\xc8\xa7\x99\x99\x6b\xf0" + "\x26\x5b\x98\xb5\xd4\x8a\xb9\x19", + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\xfe\xff\xe9\x92\x86\x65\x73\x1c", - .klen = 24, + "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08", + .klen = 32, .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" "\xde\xca\xf8\x88", - .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" "\x86\xa7\xa9\x53\x15\x34\xf7\xda" "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" "\x1c\x3c\x0c\x95\x95\x68\x09\x53" "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" - "\xba\x63\x7b\x39", - .ilen = 60, - .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" - "\xfe\xed\xfa\xce\xde\xad\xbe\xef" - "\xab\xad\xda\xd2", - .alen = 20, - .result = "\x39\x80\xca\x0b\x3c\x00\xe8\x41" - "\xeb\x06\xfa\xc4\x87\x2a\x27\x57" - "\x85\x9e\x1c\xea\xa6\xef\xd9\x84" - "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c" - "\x7d\x77\x3d\x00\xc1\x44\xc5\x25" - "\xac\x61\x9d\x18\xc8\x4a\x3f\x47" - "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9" - "\xcc\xda\x27\x10" - "\x25\x19\x49\x8e\x80\xf1\x47\x8f" - "\x37\xba\x55\xbd\x6d\x27\x61\x8c", - .rlen = 76, - .np = 2, - .tap = { 32, 28 }, - .anp = 2, - .atap = { 8, 12 } - }, { - .key = zeroed_string, - .klen = 32, - .result = "\x53\x0f\x8a\xfb\xc7\x45\x36\xb9" - "\xa9\x63\xb4\xf1\xc4\xcb\x73\x8b", - .rlen = 16, - } -}; - -static const struct aead_testvec aes_gcm_dec_tv_template[] = { - { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ - .key = zeroed_string, - .klen = 32, - .input = "\xce\xa7\x40\x3d\x4d\x60\x6b\x6e" - "\x07\x4e\xc5\xd3\xba\xf3\x9d\x18" - "\xd0\xd1\xc8\xa7\x99\x99\x6b\xf0" - "\x26\x5b\x98\xb5\xd4\x8a\xb9\x19", - .ilen = 32, - .result = zeroed_string, - .rlen = 16, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08", - .klen = 32, - .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" - "\xde\xca\xf8\x88", - .input = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07" + "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", + .plen = 64, + .ctext = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07" "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d" "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9" "\x75\x98\xa2\xbd\x25\x55\xd1\xaa" @@ -16872,16 +16773,7 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = { "\xbc\xc9\xf6\x62\x89\x80\x15\xad" "\xb0\x94\xda\xc5\xd9\x34\x71\xbd" "\xec\x1a\x50\x22\x70\xe3\xcc\x6c", - .ilen = 80, - .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" - "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" - "\x86\xa7\xa9\x53\x15\x34\xf7\xda" - "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" - "\x1c\x3c\x0c\x95\x95\x68\x09\x53" - "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" - "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" - "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", - .rlen = 64, + .clen = 80, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" @@ -16890,22 +16782,7 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = { .klen = 32, .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" "\xde\xca\xf8\x88", - .input = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07" - "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d" - "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9" - "\x75\x98\xa2\xbd\x25\x55\xd1\xaa" - "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d" - "\xa7\xb0\x8b\x10\x56\x82\x88\x38" - "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a" - "\xbc\xc9\xf6\x62" - "\x76\xfc\x6e\xce\x0f\x4e\x17\x68" - "\xcd\xdf\x88\x53\xbb\x2d\x55\x1b", - .ilen = 76, - .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" - "\xfe\xed\xfa\xce\xde\xad\xbe\xef" - "\xab\xad\xda\xd2", - .alen = 20, - .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" "\x86\xa7\xa9\x53\x15\x34\xf7\xda" "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" @@ -16913,77 +16790,22 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = { "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" "\xba\x63\x7b\x39", - .rlen = 60, - .np = 2, - .tap = { 48, 28 }, - .anp = 3, - .atap = { 8, 8, 4 } - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08", - .klen = 16, - .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" - "\xde\xca\xf8\x88", - .input = "\x42\x83\x1e\xc2\x21\x77\x74\x24" - "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c" - "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0" - "\x35\xc1\x7e\x23\x29\xac\xa1\x2e" - "\x21\xd5\x14\xb2\x54\x66\x93\x1c" - "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05" - "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97" - "\x3d\x58\xe0\x91\x47\x3f\x59\x85" - "\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6" - "\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4", - .ilen = 80, - .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" - "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" - "\x86\xa7\xa9\x53\x15\x34\xf7\xda" - "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" - "\x1c\x3c\x0c\x95\x95\x68\x09\x53" - "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" - "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" - "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", - .rlen = 64, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08", - .klen = 16, - .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" - "\xde\xca\xf8\x88", - .input = "\x42\x83\x1e\xc2\x21\x77\x74\x24" - "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c" - "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0" - "\x35\xc1\x7e\x23\x29\xac\xa1\x2e" - "\x21\xd5\x14\xb2\x54\x66\x93\x1c" - "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05" - "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97" - "\x3d\x58\xe0\x91" - "\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb" - "\x94\xfa\xe9\x5a\xe7\x12\x1a\x47", - .ilen = 76, + .plen = 60, .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" "\xfe\xed\xfa\xce\xde\xad\xbe\xef" "\xab\xad\xda\xd2", .alen = 20, - .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" - "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" - "\x86\xa7\xa9\x53\x15\x34\xf7\xda" - "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" - "\x1c\x3c\x0c\x95\x95\x68\x09\x53" - "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" - "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" - "\xba\x63\x7b\x39", - .rlen = 60, - }, { - .key = zeroed_string, - .klen = 24, - .input = "\x98\xe7\x24\x7c\x07\xf0\xfe\x41" - "\x1c\x26\x7e\x43\x84\xb0\xf6\x00" - "\x2f\xf5\x8d\x80\x03\x39\x27\xab" - "\x8e\xf4\xd4\x58\x75\x14\xf0\xfb", - .ilen = 32, - .result = zeroed_string, - .rlen = 16, + .ctext = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07" + "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d" + "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9" + "\x75\x98\xa2\xbd\x25\x55\xd1\xaa" + "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d" + "\xa7\xb0\x8b\x10\x56\x82\x88\x38" + "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a" + "\xbc\xc9\xf6\x62" + "\x76\xfc\x6e\xce\x0f\x4e\x17\x68" + "\xcd\xdf\x88\x53\xbb\x2d\x55\x1b", + .clen = 76, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" @@ -16991,34 +16813,20 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = { .klen = 24, .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" "\xde\xca\xf8\x88", - .input = "\x39\x80\xca\x0b\x3c\x00\xe8\x41" - "\xeb\x06\xfa\xc4\x87\x2a\x27\x57" - "\x85\x9e\x1c\xea\xa6\xef\xd9\x84" - "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c" - "\x7d\x77\x3d\x00\xc1\x44\xc5\x25" - "\xac\x61\x9d\x18\xc8\x4a\x3f\x47" - "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9" - "\xcc\xda\x27\x10\xac\xad\xe2\x56" - "\x99\x24\xa7\xc8\x58\x73\x36\xbf" - "\xb1\x18\x02\x4d\xb8\x67\x4a\x14", - .ilen = 80, - .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" "\x86\xa7\xa9\x53\x15\x34\xf7\xda" "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" "\x1c\x3c\x0c\x95\x95\x68\x09\x53" "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" - "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", - .rlen = 64, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\xfe\xff\xe9\x92\x86\x65\x73\x1c", - .klen = 24, - .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" - "\xde\xca\xf8\x88", - .input = "\x39\x80\xca\x0b\x3c\x00\xe8\x41" + "\xba\x63\x7b\x39", + .plen = 60, + .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" + "\xfe\xed\xfa\xce\xde\xad\xbe\xef" + "\xab\xad\xda\xd2", + .alen = 20, + .ctext = "\x39\x80\xca\x0b\x3c\x00\xe8\x41" "\xeb\x06\xfa\xc4\x87\x2a\x27\x57" "\x85\x9e\x1c\xea\xa6\xef\xd9\x84" "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c" @@ -17028,53 +16836,40 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = { "\xcc\xda\x27\x10" "\x25\x19\x49\x8e\x80\xf1\x47\x8f" "\x37\xba\x55\xbd\x6d\x27\x61\x8c", - .ilen = 76, - .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" - "\xfe\xed\xfa\xce\xde\xad\xbe\xef" - "\xab\xad\xda\xd2", - .alen = 20, - .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" - "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" - "\x86\xa7\xa9\x53\x15\x34\xf7\xda" - "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" - "\x1c\x3c\x0c\x95\x95\x68\x09\x53" - "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" - "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" - "\xba\x63\x7b\x39", - .rlen = 60, + .clen = 76, } }; -static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { +static const struct aead_testvec aes_gcm_rfc4106_tv_template[] = { { /* Generated using Crypto++ */ .key = zeroed_string, .klen = 20, .iv = zeroed_string, - .input = zeroed_string, - .ilen = 16, + .ptext = zeroed_string, + .plen = 16, .assoc = zeroed_string, .alen = 16, - .result = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" + .ctext = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78" "\x97\xFE\x4C\x23\x37\x42\x01\xE0" "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B", - .rlen = 32, + .clen = 32, },{ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" "\x00\x00\x00\x00", .klen = 20, .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = zeroed_string, - .ilen = 16, + .ptext = zeroed_string, + .plen = 16, .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x01", .alen = 16, - .result = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" + .ctext = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28" "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D" "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF", - .rlen = 32, + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" @@ -17082,57 +16877,57 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x00\x00\x00\x00", .klen = 20, .iv = zeroed_string, - .input = "\x01\x01\x01\x01\x01\x01\x01\x01" + .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01", - .ilen = 16, + .plen = 16, .assoc = zeroed_string, .alen = 16, - .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" + .ctext = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C" "\xB1\x68\xFD\x14\x52\x64\x61\xB2", - .rlen = 32, + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" "\x00\x00\x00\x00", .klen = 20, .iv = zeroed_string, - .input = "\x01\x01\x01\x01\x01\x01\x01\x01" + .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01", - .ilen = 16, + .plen = 16, .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x00\x00\x00\x00\x00\x00\x00\x00", .alen = 16, - .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" + .ctext = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63" "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5", - .rlen = 32, + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" "\x00\x00\x00\x00", .klen = 20, .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = "\x01\x01\x01\x01\x01\x01\x01\x01" + .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01", - .ilen = 16, + .plen = 16, .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x00\x00\x00\x00\x00\x00\x00\x01", .alen = 16, - .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" + .ctext = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" "\x64\x50\xF9\x32\x13\xFB\x74\x61" "\xF4\xED\x52\xD3\xC5\x10\x55\x3C", - .rlen = 32, + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" "\x00\x00\x00\x00", .klen = 20, .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = "\x01\x01\x01\x01\x01\x01\x01\x01" + .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01" @@ -17140,11 +16935,11 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01", - .ilen = 64, + .plen = 64, .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x00\x00\x00\x00\x00\x00\x00\x01", .alen = 16, - .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" + .ctext = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" "\x98\x14\xA1\x42\x37\x80\xFD\x90" "\x68\x12\x01\xA8\x91\x89\xB9\x83" @@ -17154,14 +16949,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD" "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85" "\xBD\xCF\x62\x98\x58\x14\xE5\xBD", - .rlen = 80, + .clen = 80, }, { .key = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x00\x00\x00\x00", .klen = 20, .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef", - .input = "\xff\xff\xff\xff\xff\xff\xff\xff" + .ptext = "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff" @@ -17185,12 +16980,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff", - .ilen = 192, + .plen = 192, .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\x00\x00\x45\x67" "\x89\xab\xcd\xef", .alen = 20, - .result = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" + .ctext = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A" "\x44\x6D\xC3\x88\x46\x2E\xC2\x01" "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82" @@ -17216,14 +17011,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B" "\x37\x08\x1C\xCF\xBA\x5D\x71\x46" "\x80\x72\xB0\x4C\x82\x0D\x60\x3C", - .rlen = 208, + .clen = 208, }, { /* From draft-mcgrew-gcm-test-01 */ .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" "\x2E\x44\x3B\x68", .klen = 20, .iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE", - .input = "\x45\x00\x00\x48\x69\x9A\x00\x00" + .ptext = "\x45\x00\x00\x48\x69\x9A\x00\x00" "\x80\x11\x4D\xB7\xC0\xA8\x01\x02" "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56" "\x38\xD3\x01\x00\x00\x01\x00\x00" @@ -17232,12 +17027,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x69\x70\x09\x63\x79\x62\x65\x72" "\x63\x69\x74\x79\x02\x64\x6B\x00" "\x00\x21\x00\x01\x01\x02\x02\x01", - .ilen = 72, + .plen = 72, .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" "\x00\x00\x00\x00\x49\x56\xED\x7E" "\x3B\x24\x4C\xFE", .alen = 20, - .result = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07" + .ctext = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07" "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76" "\x8D\x1B\x98\x73\x66\x96\xA6\xFD" "\x34\x85\x09\xFA\x13\xCE\xAC\x34" @@ -17248,14 +17043,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x61\xBC\x17\xD7\x68\xFD\x97\x32" "\x45\x90\x18\x14\x8F\x6C\xBE\x72" "\x2F\xD0\x47\x96\x56\x2D\xFD\xB4", - .rlen = 88, + .clen = 88, }, { .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" "\x6D\x6A\x8F\x94\x67\x30\x83\x08" "\xCA\xFE\xBA\xBE", .klen = 20, .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .input = "\x45\x00\x00\x3E\x69\x8F\x00\x00" + .ptext = "\x45\x00\x00\x3E\x69\x8F\x00\x00" "\x80\x11\x4D\xCC\xC0\xA8\x01\x02" "\xC0\xA8\x01\x01\x0A\x98\x00\x35" "\x00\x2A\x23\x43\xB2\xD0\x01\x00" @@ -17263,11 +17058,11 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x03\x73\x69\x70\x09\x63\x79\x62" "\x65\x72\x63\x69\x74\x79\x02\x64" "\x6B\x00\x00\x01\x00\x01\x00\x01", - .ilen = 64, + .plen = 64, .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", .alen = 16, - .result = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1" + .ctext = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1" "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04" "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F" "\x1B\xA7\x6D\x5D\x11\x4D\x2A\x5C" @@ -17277,7 +17072,7 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xEC\x3B\x9B\xA9\x5D\x91\x8B\xD1" "\x83\xB7\x0D\x3A\xA8\xBC\x6E\xE4" "\xC3\x09\xE9\xD8\x5A\x41\xAD\x4A", - .rlen = 80, + .clen = 80, }, { .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" "\x34\x45\x56\x67\x78\x89\x9A\xAB" @@ -17286,18 +17081,18 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x11\x22\x33\x44", .klen = 36, .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", - .input = "\x45\x00\x00\x30\x69\xA6\x40\x00" + .ptext = "\x45\x00\x00\x30\x69\xA6\x40\x00" "\x80\x06\x26\x90\xC0\xA8\x01\x02" "\x93\x89\x15\x5E\x0A\x9E\x00\x8B" "\x2D\xC5\x7E\xE0\x00\x00\x00\x00" "\x70\x02\x40\x00\x20\xBF\x00\x00" "\x02\x04\x05\xB4\x01\x01\x04\x02" "\x01\x02\x02\x01", - .ilen = 52, + .plen = 52, .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02" "\x01\x02\x03\x04\x05\x06\x07\x08", .alen = 16, - .result = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF" + .ctext = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF" "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D" "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF" "\x06\xEF\xAE\x9D\x65\xA5\xD7\x63" @@ -17306,14 +17101,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xEF\x84\x2D\x8E\xB3\x35\xF4\xEE" "\xCF\xDB\xF8\x31\x82\x4B\x4C\x49" "\x15\x95\x6C\x96", - .rlen = 68, + .clen = 68, }, { .key = "\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00", .klen = 20, .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", - .input = "\x45\x00\x00\x3C\x99\xC5\x00\x00" + .ptext = "\x45\x00\x00\x3C\x99\xC5\x00\x00" "\x80\x01\xCB\x7A\x40\x67\x93\x18" "\x01\x01\x01\x01\x08\x00\x07\x5C" "\x02\x00\x44\x00\x61\x62\x63\x64" @@ -17321,11 +17116,11 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x6D\x6E\x6F\x70\x71\x72\x73\x74" "\x75\x76\x77\x61\x62\x63\x64\x65" "\x66\x67\x68\x69\x01\x02\x02\x01", - .ilen = 64, + .plen = 64, .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01" "\x00\x00\x00\x00\x00\x00\x00\x00", .alen = 16, - .result = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92" + .ctext = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92" "\x73\x29\x09\xC3\x31\xD5\x6D\x60" "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F" "\xF5\xFD\xCD\xFF\xF5\xE9\xA2\x84" @@ -17335,14 +17130,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x1D\x19\xD4\xD5\xC8\xC1\x8A\xF3" "\xF8\x21\xD4\x96\xEE\xB0\x96\xE9" "\x8A\xD2\xB6\x9E\x47\x99\xC7\x1D", - .rlen = 80, + .clen = 80, }, { .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" "\x57\x69\x0E\x43", .klen = 20, .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .input = "\x45\x00\x00\x3C\x99\xC3\x00\x00" + .ptext = "\x45\x00\x00\x3C\x99\xC3\x00\x00" "\x80\x01\xCB\x7C\x40\x67\x93\x18" "\x01\x01\x01\x01\x08\x00\x08\x5C" "\x02\x00\x43\x00\x61\x62\x63\x64" @@ -17350,12 +17145,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x6D\x6E\x6F\x70\x71\x72\x73\x74" "\x75\x76\x77\x61\x62\x63\x64\x65" "\x66\x67\x68\x69\x01\x02\x02\x01", - .ilen = 64, + .plen = 64, .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" "\x10\x10\x10\x10\x4E\x28\x00\x00" "\xA2\xFC\xA1\xA3", .alen = 20, - .result = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0" + .ctext = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0" "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0" "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0" "\x0D\x11\x38\xEC\x9C\x35\x79\x17" @@ -17365,29 +17160,29 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x1F\x5E\x22\x73\x95\x30\x32\x0A" "\xE0\xD7\x31\xCC\x97\x8E\xCA\xFA" "\xEA\xE8\x8F\x00\xE8\x0D\x6E\x48", - .rlen = 80, + .clen = 80, }, { .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" "\x57\x69\x0E\x43", .klen = 20, .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .input = "\x45\x00\x00\x1C\x42\xA2\x00\x00" + .ptext = "\x45\x00\x00\x1C\x42\xA2\x00\x00" "\x80\x01\x44\x1F\x40\x67\x93\xB6" "\xE0\x00\x00\x02\x0A\x00\xF5\xFF" "\x01\x02\x02\x01", - .ilen = 28, + .plen = 28, .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" "\x10\x10\x10\x10\x4E\x28\x00\x00" "\xA2\xFC\xA1\xA3", .alen = 20, - .result = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0" + .ctext = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0" "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E" "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13" "\x0E\x13\x79\xED\x36\x9F\x07\x1F" "\x35\xE0\x34\xBE\x95\xF1\x12\xE4" "\xE7\xD0\x5D\x35", - .rlen = 44, + .clen = 44, }, { .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" "\x6D\x6A\x8F\x94\x67\x30\x83\x08" @@ -17395,30 +17190,30 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xCA\xFE\xBA\xBE", .klen = 28, .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .input = "\x45\x00\x00\x28\xA4\xAD\x40\x00" + .ptext = "\x45\x00\x00\x28\xA4\xAD\x40\x00" "\x40\x06\x78\x80\x0A\x01\x03\x8F" "\x0A\x01\x06\x12\x80\x23\x06\xB8" "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" "\x50\x10\x16\xD0\x75\x68\x00\x01", - .ilen = 40, + .plen = 40, .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", .alen = 16, - .result = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4" + .ctext = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4" "\x0E\x59\x8B\x81\x22\xDE\x02\x42" "\x09\x38\xB3\xAB\x33\xF8\x28\xE6" "\x87\xB8\x85\x8B\x5B\xFB\xDB\xD0" "\x31\x5B\x27\x45\x21\x44\xCC\x77" "\x95\x45\x7B\x96\x52\x03\x7F\x53" "\x18\x02\x7B\x5B\x4C\xD7\xA6\x36", - .rlen = 56, + .clen = 56, }, { .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" "\x34\x45\x56\x67\x78\x89\x9A\xAB" "\xDE\xCA\xF8\x88", .klen = 20, .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", - .input = "\x45\x00\x00\x49\x33\xBA\x00\x00" + .ptext = "\x45\x00\x00\x49\x33\xBA\x00\x00" "\x7F\x11\x91\x06\xC3\xFB\x1D\x10" "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" "\x00\x35\xDD\x7B\x80\x03\x02\xD5" @@ -17428,12 +17223,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x92\xC9\x63\xBA\xC0\x46\xEC\x95" "\x9B\x62\x66\xC0\x47\x22\xB1\x49" "\x23\x01\x01\x01", - .ilen = 76, + .plen = 76, .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" "\xCE\xFA\xCE\x74", .alen = 20, - .result = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A" + .ctext = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A" "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14" "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1" "\xC3\x49\xC1\xD2\xFB\xEC\x16\x8F" @@ -17445,7 +17240,7 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xE7\x84\x5D\x68\x65\x1F\x57\xE6" "\x5F\x35\x4F\x75\xFF\x17\x01\x57" "\x69\x62\x34\x36", - .rlen = 92, + .clen = 92, }, { .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" "\x34\x45\x56\x67\x78\x89\x9A\xAB" @@ -17454,31 +17249,31 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x73\x61\x6C\x74", .klen = 36, .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", - .input = "\x45\x08\x00\x28\x73\x2C\x00\x00" + .ptext = "\x45\x08\x00\x28\x73\x2C\x00\x00" "\x40\x06\xE9\xF9\x0A\x01\x06\x12" "\x0A\x01\x03\x8F\x06\xB8\x80\x23" "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02" "\x50\x10\x1F\x64\x6D\x54\x00\x01", - .ilen = 40, + .plen = 40, .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" "\x69\x76\x65\x63", .alen = 20, - .result = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B" + .ctext = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B" "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D" "\x1F\x27\x8F\xDE\x98\xEF\x67\x54" "\x9D\x52\x4A\x30\x18\xD9\xA5\x7F" "\xF4\xD3\xA3\x1C\xE6\x73\x11\x9E" "\x45\x16\x26\xC2\x41\x57\x71\xE3" "\xB7\xEE\xBC\xA6\x14\xC8\x9B\x35", - .rlen = 56, + .clen = 56, }, { .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" "\x57\x69\x0E\x43", .klen = 20, .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .input = "\x45\x00\x00\x49\x33\x3E\x00\x00" + .ptext = "\x45\x00\x00\x49\x33\x3E\x00\x00" "\x7F\x11\x91\x82\xC3\xFB\x1D\x10" "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" "\x00\x35\xCB\x45\x80\x03\x02\x5B" @@ -17488,12 +17283,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32" "\x5A\xE2\x70\xC0\x38\x99\x49\x39" "\x15\x01\x01\x01", - .ilen = 76, + .plen = 76, .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" "\x10\x10\x10\x10\x4E\x28\x00\x00" "\xA2\xFC\xA1\xA3", .alen = 20, - .result = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0" + .ctext = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0" "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8" "\x3D\x77\x84\xB6\x07\x32\x3D\x22" "\x0F\x24\xB0\xA9\x7D\x54\x18\x28" @@ -17505,7 +17300,7 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xE5\x16\x09\x75\xCD\xB6\x08\xC5" "\x76\x91\x89\x60\x97\x63\xB8\xE1" "\x8C\xAA\x81\xE2", - .rlen = 92, + .clen = 92, }, { .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" "\x34\x45\x56\x67\x78\x89\x9A\xAB" @@ -17514,7 +17309,7 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x73\x61\x6C\x74", .klen = 36, .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", - .input = "\x63\x69\x73\x63\x6F\x01\x72\x75" + .ptext = "\x63\x69\x73\x63\x6F\x01\x72\x75" "\x6C\x65\x73\x01\x74\x68\x65\x01" "\x6E\x65\x74\x77\x65\x01\x64\x65" "\x66\x69\x6E\x65\x01\x74\x68\x65" @@ -17523,12 +17318,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x74\x77\x69\x6C\x6C\x01\x64\x65" "\x66\x69\x6E\x65\x74\x6F\x6D\x6F" "\x72\x72\x6F\x77\x01\x02\x02\x01", - .ilen = 72, + .plen = 72, .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" "\x69\x76\x65\x63", .alen = 20, - .result = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E" + .ctext = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E" "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E" "\x7B\x43\xF8\x26\xFB\x56\x83\x12" "\x26\x50\x8B\xEB\xD2\xDC\xEB\x18" @@ -17539,42 +17334,42 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x12\xA4\x93\x63\x41\x23\x64\xF8" "\xC0\xCA\xC5\x87\xF2\x49\xE5\x6B" "\x11\xE2\x4F\x30\xE4\x4C\xCC\x76", - .rlen = 88, + .clen = 88, }, { .key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25" "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47" "\xD9\x66\x42\x67", .klen = 20, .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", - .input = "\x01\x02\x02\x01", - .ilen = 4, + .ptext = "\x01\x02\x02\x01", + .plen = 4, .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF" "\x43\x45\x7E\x91\x82\x44\x3B\xC6", .alen = 16, - .result = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F" + .ctext = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F" "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45" "\x04\xBE\xF2\x70", - .rlen = 20, + .clen = 20, }, { .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" "\x34\x45\x56\x67\x78\x89\x9A\xAB" "\xDE\xCA\xF8\x88", .klen = 20, .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", - .input = "\x74\x6F\x01\x62\x65\x01\x6F\x72" + .ptext = "\x74\x6F\x01\x62\x65\x01\x6F\x72" "\x01\x6E\x6F\x74\x01\x74\x6F\x01" "\x62\x65\x00\x01", - .ilen = 20, + .plen = 20, .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" "\xCE\xFA\xCE\x74", .alen = 20, - .result = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38" + .ctext = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38" "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05" "\x43\x33\x21\x64\x41\x25\x03\x52" "\x43\x03\xED\x3C\x6C\x5F\x28\x38" "\x43\xAF\x8C\x3E", - .rlen = 36, + .clen = 36, }, { .key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65" "\x6D\x61\x72\x69\x6A\x75\x61\x6E" @@ -17583,19 +17378,19 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x74\x75\x72\x6E", .klen = 36, .iv = "\x33\x30\x21\x69\x67\x65\x74\x6D", - .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00" + .ptext = "\x45\x00\x00\x30\xDA\x3A\x00\x00" "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" "\x02\x00\x07\x00\x61\x62\x63\x64" "\x65\x66\x67\x68\x69\x6A\x6B\x6C" "\x6D\x6E\x6F\x70\x71\x72\x73\x74" "\x01\x02\x02\x01", - .ilen = 52, + .plen = 52, .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" "\xFF\xFF\xFF\xFF\x33\x30\x21\x69" "\x67\x65\x74\x6D", .alen = 20, - .result = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC" + .ctext = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC" "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D" "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6" "\x4A\x27\x4B\x39\xB4\x9C\x3A\x86" @@ -17604,26 +17399,26 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x1D\xCC\x63\xB9\xD0\x93\x7B\xA2" "\x94\x5F\x66\x93\x68\x66\x1A\x32" "\x9F\xB4\xC0\x53", - .rlen = 68, + .clen = 68, }, { .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" "\x57\x69\x0E\x43", .klen = 20, .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00" + .ptext = "\x45\x00\x00\x30\xDA\x3A\x00\x00" "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" "\x02\x00\x07\x00\x61\x62\x63\x64" "\x65\x66\x67\x68\x69\x6A\x6B\x6C" "\x6D\x6E\x6F\x70\x71\x72\x73\x74" "\x01\x02\x02\x01", - .ilen = 52, + .plen = 52, .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" "\x10\x10\x10\x10\x4E\x28\x00\x00" "\xA2\xFC\xA1\xA3", .alen = 20, - .result = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0" + .ctext = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0" "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD" "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21" "\x0D\x11\x7C\xEC\x9C\x35\x79\x17" @@ -17632,7262 +17427,2757 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x63\x21\x93\x06\x84\xEE\xCA\xDB" "\x56\x91\x25\x46\xE7\xA9\x5C\x97" "\x40\xD7\xCB\x05", - .rlen = 68, + .clen = 68, }, { .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" "\x22\x43\x3C\x64", .klen = 20, .iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD", - .input = "\x08\x00\xC6\xCD\x02\x00\x07\x00" + .ptext = "\x08\x00\xC6\xCD\x02\x00\x07\x00" "\x61\x62\x63\x64\x65\x66\x67\x68" "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70" "\x71\x72\x73\x74\x01\x02\x02\x01", - .ilen = 32, + .plen = 32, .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" "\x00\x00\x00\x07\x48\x55\xEC\x7D" "\x3A\x23\x4B\xFD", .alen = 20, - .result = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C" + .ctext = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C" "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF" "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6" "\xAC\xDA\x68\x94\xBC\x61\x90\x69" "\xEF\x9C\xBC\x28\xFE\x1B\x56\xA7" "\xC4\xE0\xD5\x8C\x86\xCD\x2B\xC0", - .rlen = 48, + .clen = 48, } }; -static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { - { /* Generated using Crypto++ */ - .key = zeroed_string, - .klen = 20, - .iv = zeroed_string, - .input = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" - "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78" - "\x97\xFE\x4C\x23\x37\x42\x01\xE0" - "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B", - .ilen = 32, - .assoc = zeroed_string, - .alen = 16, - .result = zeroed_string, - .rlen = 16, - - },{ - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00\x00", - .klen = 20, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" - "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28" - "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D" - "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF", - .ilen = 32, - .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x01", - .alen = 16, - .result = zeroed_string, - .rlen = 16, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00\x00", +static const struct aead_testvec aes_gcm_rfc4543_tv_template[] = { + { /* From draft-mcgrew-gcm-test-01 */ + .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" + "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" + "\x22\x43\x3c\x64", .klen = 20, - .iv = zeroed_string, - .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" - "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" - "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C" - "\xB1\x68\xFD\x14\x52\x64\x61\xB2", - .ilen = 32, - .assoc = zeroed_string, - .alen = 16, - .result = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .rlen = 16, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00\x00", + .iv = zeroed_string, + .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .alen = 16, + .ptext = "\x45\x00\x00\x30\xda\x3a\x00\x00" + "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" + "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" + "\x02\x00\x07\x00\x61\x62\x63\x64" + "\x65\x66\x67\x68\x69\x6a\x6b\x6c" + "\x6d\x6e\x6f\x70\x71\x72\x73\x74" + "\x01\x02\x02\x01", + .plen = 52, + .ctext = "\x45\x00\x00\x30\xda\x3a\x00\x00" + "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" + "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" + "\x02\x00\x07\x00\x61\x62\x63\x64" + "\x65\x66\x67\x68\x69\x6a\x6b\x6c" + "\x6d\x6e\x6f\x70\x71\x72\x73\x74" + "\x01\x02\x02\x01\xf2\xa9\xa8\x36" + "\xe1\x55\x10\x6a\xa8\xdc\xd6\x18" + "\xe4\x09\x9a\xaa", + .clen = 68, + }, { /* nearly same as previous, but should fail */ + .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" + "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" + "\x22\x43\x3c\x64", .klen = 20, - .iv = zeroed_string, - .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" - "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" - "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63" - "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5", - .ilen = 32, - .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" + .iv = zeroed_string, + .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07" "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .result = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .rlen = 16, + .alen = 16, + .ptext = "\x45\x00\x00\x30\xda\x3a\x00\x00" + "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" + "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" + "\x02\x00\x07\x00\x61\x62\x63\x64" + "\x65\x66\x67\x68\x69\x6a\x6b\x6c" + "\x6d\x6e\x6f\x70\x71\x72\x73\x74" + "\x01\x02\x02\x01", + .plen = 52, + .novrfy = 1, + .ctext = "\x45\x00\x00\x30\xda\x3a\x00\x00" + "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" + "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" + "\x02\x00\x07\x00\x61\x62\x63\x64" + "\x65\x66\x67\x68\x69\x6a\x6b\x6c" + "\x6d\x6e\x6f\x70\x71\x72\x73\x74" + "\x01\x02\x02\x01\xf2\xa9\xa8\x36" + "\xe1\x55\x10\x6a\xa8\xdc\xd6\x18" + "\x00\x00\x00\x00", + .clen = 68, + }, +}; +static const struct aead_testvec aes_ccm_tv_template[] = { + { /* From RFC 3610 */ + .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", + .klen = 16, + .iv = "\x01\x00\x00\x00\x03\x02\x01\x00" + "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07", + .alen = 8, + .ptext = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e", + .plen = 23, + .ctext = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2" + "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80" + "\x6d\x5f\x6b\x61\xda\xc3\x84\x17" + "\xe8\xd1\x2c\xfd\xf9\x26\xe0", + .clen = 31, }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00\x00", - .klen = 20, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" - "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" - "\x64\x50\xF9\x32\x13\xFB\x74\x61" - "\xF4\xED\x52\xD3\xC5\x10\x55\x3C", - .ilen = 32, - .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x00\x00\x00\x00\x00\x00\x00\x01", - .alen = 16, - .result = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .rlen = 16, + .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", + .klen = 16, + .iv = "\x01\x00\x00\x00\x07\x06\x05\x04" + "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b", + .alen = 12, + .ptext = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13" + "\x14\x15\x16\x17\x18\x19\x1a\x1b" + "\x1c\x1d\x1e\x1f", + .plen = 20, + .ctext = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb" + "\x9d\x4e\x13\x12\x53\x65\x8a\xd8" + "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07" + "\x7d\x9c\x2d\x93", + .clen = 28, }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00\x00", - .klen = 20, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" - "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" - "\x98\x14\xA1\x42\x37\x80\xFD\x90" - "\x68\x12\x01\xA8\x91\x89\xB9\x83" - "\x5B\x11\x77\x12\x9B\xFF\x24\x89" - "\x94\x5F\x18\x12\xBA\x27\x09\x39" - "\x99\x96\x76\x42\x15\x1C\xCD\xCB" - "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD" - "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85" - "\xBD\xCF\x62\x98\x58\x14\xE5\xBD", - .ilen = 80, - .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x00\x00\x00\x00\x00\x00\x00\x01", - .alen = 16, - .result = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .rlen = 64, + .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", + .klen = 16, + .iv = "\x01\x00\x00\x00\x0b\x0a\x09\x08" + "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07", + .alen = 8, + .ptext = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20", + .plen = 25, + .ctext = "\x82\x53\x1a\x60\xcc\x24\x94\x5a" + "\x4b\x82\x79\x18\x1a\xb5\xc8\x4d" + "\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1" + "\x97\xea\x9c\x07\xe5\x6b\x5e\xb1" + "\x7e\x5f\x4e", + .clen = 35, }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x00\x00\x00\x00", - .klen = 20, - .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef", - .input = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" - "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A" - "\x44\x6D\xC3\x88\x46\x2E\xC2\x01" - "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82" - "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44" - "\x41\xA9\x82\x6F\x22\xA1\x23\x1A" - "\xA8\xE3\x16\xFD\x31\x5C\x27\x31" - "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1" - "\xCF\x07\x57\x41\x67\xD0\xC4\x42" - "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F" - "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B" - "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE" - "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C" - "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF" - "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59" - "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA" - "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25" - "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B" - "\x7E\x13\x06\x82\x08\x17\xA4\x35" - "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F" - "\xA3\x05\x38\x95\x20\x1A\x47\x04" - "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35" - "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E" - "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B" - "\x37\x08\x1C\xCF\xBA\x5D\x71\x46" - "\x80\x72\xB0\x4C\x82\x0D\x60\x3C", - .ilen = 208, - .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\x00\x00\x45\x67" - "\x89\xab\xcd\xef", - .alen = 20, - .result = "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff", - .rlen = 192, + .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", + .klen = 16, + .iv = "\x01\x00\x00\x00\x0c\x0b\x0a\x09" + "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b", + .alen = 12, + .ptext = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13" + "\x14\x15\x16\x17\x18\x19\x1a\x1b" + "\x1c\x1d\x1e", + .plen = 19, + .ctext = "\x07\x34\x25\x94\x15\x77\x85\x15" + "\x2b\x07\x40\x98\x33\x0a\xbb\x14" + "\x1b\x94\x7b\x56\x6a\xa9\x40\x6b" + "\x4d\x99\x99\x88\xdd", + .clen = 29, }, { - .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" - "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" - "\x2E\x44\x3B\x68", - .klen = 20, - .iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE", - .result = "\x45\x00\x00\x48\x69\x9A\x00\x00" - "\x80\x11\x4D\xB7\xC0\xA8\x01\x02" - "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56" - "\x38\xD3\x01\x00\x00\x01\x00\x00" - "\x00\x00\x00\x00\x04\x5F\x73\x69" - "\x70\x04\x5F\x75\x64\x70\x03\x73" - "\x69\x70\x09\x63\x79\x62\x65\x72" - "\x63\x69\x74\x79\x02\x64\x6B\x00" - "\x00\x21\x00\x01\x01\x02\x02\x01", - .rlen = 72, - .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" - "\x00\x00\x00\x00\x49\x56\xED\x7E" - "\x3B\x24\x4C\xFE", - .alen = 20, - .input = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07" - "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76" - "\x8D\x1B\x98\x73\x66\x96\xA6\xFD" - "\x34\x85\x09\xFA\x13\xCE\xAC\x34" - "\xCF\xA2\x43\x6F\x14\xA3\xF3\xCF" - "\x65\x92\x5B\xF1\xF4\xA1\x3C\x5D" - "\x15\xB2\x1E\x18\x84\xF5\xFF\x62" - "\x47\xAE\xAB\xB7\x86\xB9\x3B\xCE" - "\x61\xBC\x17\xD7\x68\xFD\x97\x32" - "\x45\x90\x18\x14\x8F\x6C\xBE\x72" - "\x2F\xD0\x47\x96\x56\x2D\xFD\xB4", - .ilen = 88, + .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3" + "\x25\xa7\x62\x36\xdf\x93\xcc\x6b", + .klen = 16, + .iv = "\x01\x00\x33\x56\x8e\xf7\xb2\x63" + "\x3c\x96\x96\x76\x6c\xfa\x00\x00", + .assoc = "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb", + .alen = 8, + .ptext = "\x90\x20\xea\x6f\x91\xbd\xd8\x5a" + "\xfa\x00\x39\xba\x4b\xaf\xf9\xbf" + "\xb7\x9c\x70\x28\x94\x9c\xd0\xec", + .plen = 24, + .ctext = "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa" + "\xa0\x72\x6c\x55\xd3\x78\x06\x12" + "\x98\xc8\x5c\x92\x81\x4a\xbc\x33" + "\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a", + .clen = 32, }, { - .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" - "\x6D\x6A\x8F\x94\x67\x30\x83\x08" - "\xCA\xFE\xBA\xBE", - .klen = 20, - .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .result = "\x45\x00\x00\x3E\x69\x8F\x00\x00" - "\x80\x11\x4D\xCC\xC0\xA8\x01\x02" - "\xC0\xA8\x01\x01\x0A\x98\x00\x35" - "\x00\x2A\x23\x43\xB2\xD0\x01\x00" - "\x00\x01\x00\x00\x00\x00\x00\x00" - "\x03\x73\x69\x70\x09\x63\x79\x62" - "\x65\x72\x63\x69\x74\x79\x02\x64" - "\x6B\x00\x00\x01\x00\x01\x00\x01", - .rlen = 64, - .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" - "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .alen = 16, - .input = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1" - "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04" - "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F" - "\x1B\xA7\x6D\x5D\x11\x4D\x2A\x5C" - "\x3D\xE8\x18\x27\xC1\x0E\x9A\x4F" - "\x51\x33\x0D\x0E\xEC\x41\x66\x42" - "\xCF\xBB\x85\xA5\xB4\x7E\x48\xA4" - "\xEC\x3B\x9B\xA9\x5D\x91\x8B\xD1" - "\x83\xB7\x0D\x3A\xA8\xBC\x6E\xE4" - "\xC3\x09\xE9\xD8\x5A\x41\xAD\x4A", - .ilen = 80, + .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3" + "\x25\xa7\x62\x36\xdf\x93\xcc\x6b", + .klen = 16, + .iv = "\x01\x00\xd5\x60\x91\x2d\x3f\x70" + "\x3c\x96\x96\x76\x6c\xfa\x00\x00", + .assoc = "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81" + "\x20\xea\x60\xc0", + .alen = 12, + .ptext = "\x64\x35\xac\xba\xfb\x11\xa8\x2e" + "\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9" + "\x3a\x80\x3b\xa8\x7f", + .plen = 21, + .ctext = "\x00\x97\x69\xec\xab\xdf\x48\x62" + "\x55\x94\xc5\x92\x51\xe6\x03\x57" + "\x22\x67\x5e\x04\xc8\x47\x09\x9e" + "\x5a\xe0\x70\x45\x51", + .clen = 29, }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\x11\x22\x33\x44", - .klen = 36, - .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", - .result = "\x45\x00\x00\x30\x69\xA6\x40\x00" - "\x80\x06\x26\x90\xC0\xA8\x01\x02" - "\x93\x89\x15\x5E\x0A\x9E\x00\x8B" - "\x2D\xC5\x7E\xE0\x00\x00\x00\x00" - "\x70\x02\x40\x00\x20\xBF\x00\x00" - "\x02\x04\x05\xB4\x01\x01\x04\x02" - "\x01\x02\x02\x01", - .rlen = 52, - .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02" - "\x01\x02\x03\x04\x05\x06\x07\x08", - .alen = 16, - .input = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF" - "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D" - "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF" - "\x06\xEF\xAE\x9D\x65\xA5\xD7\x63" - "\x74\x8A\x63\x79\x85\x77\x1D\x34" - "\x7F\x05\x45\x65\x9F\x14\xE9\x9D" - "\xEF\x84\x2D\x8E\xB3\x35\xF4\xEE" - "\xCF\xDB\xF8\x31\x82\x4B\x4C\x49" - "\x15\x95\x6C\x96", - .ilen = 68, + .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3" + "\x25\xa7\x62\x36\xdf\x93\xcc\x6b", + .klen = 16, + .iv = "\x01\x00\x42\xff\xf8\xf1\x95\x1c" + "\x3c\x96\x96\x76\x6c\xfa\x00\x00", + .assoc = "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8", + .alen = 8, + .ptext = "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01" + "\x8e\x5e\x67\x01\xc9\x17\x87\x65" + "\x98\x09\xd6\x7d\xbe\xdd\x18", + .plen = 23, + .ctext = "\xbc\x21\x8d\xaa\x94\x74\x27\xb6" + "\xdb\x38\x6a\x99\xac\x1a\xef\x23" + "\xad\xe0\xb5\x29\x39\xcb\x6a\x63" + "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6" + "\xba", + .clen = 33, }, { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00", - .klen = 20, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", - .result = "\x45\x00\x00\x3C\x99\xC5\x00\x00" - "\x80\x01\xCB\x7A\x40\x67\x93\x18" - "\x01\x01\x01\x01\x08\x00\x07\x5C" - "\x02\x00\x44\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x75\x76\x77\x61\x62\x63\x64\x65" - "\x66\x67\x68\x69\x01\x02\x02\x01", - .rlen = 64, - .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .input = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92" - "\x73\x29\x09\xC3\x31\xD5\x6D\x60" - "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F" - "\xF5\xFD\xCD\xFF\xF5\xE9\xA2\x84" - "\x45\x64\x76\x49\x27\x19\xFF\xB6" - "\x4D\xE7\xD9\xDC\xA1\xE1\xD8\x94" - "\xBC\x3B\xD5\x78\x73\xED\x4D\x18" - "\x1D\x19\xD4\xD5\xC8\xC1\x8A\xF3" - "\xF8\x21\xD4\x96\xEE\xB0\x96\xE9" - "\x8A\xD2\xB6\x9E\x47\x99\xC7\x1D", - .ilen = 80, - }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E\x43", - .klen = 20, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .result = "\x45\x00\x00\x3C\x99\xC3\x00\x00" - "\x80\x01\xCB\x7C\x40\x67\x93\x18" - "\x01\x01\x01\x01\x08\x00\x08\x5C" - "\x02\x00\x43\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x75\x76\x77\x61\x62\x63\x64\x65" - "\x66\x67\x68\x69\x01\x02\x02\x01", - .rlen = 64, - .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .input = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0" - "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0" - "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0" - "\x0D\x11\x38\xEC\x9C\x35\x79\x17" - "\x65\xAC\xBD\x87\x01\xAD\x79\x84" - "\x5B\xF9\xFE\x3F\xBA\x48\x7B\xC9" - "\x17\x55\xE6\x66\x2B\x4C\x8D\x0D" - "\x1F\x5E\x22\x73\x95\x30\x32\x0A" - "\xE0\xD7\x31\xCC\x97\x8E\xCA\xFA" - "\xEA\xE8\x8F\x00\xE8\x0D\x6E\x48", - .ilen = 80, + /* This is taken from FIPS CAVS. */ + .key = "\x83\xac\x54\x66\xc2\xeb\xe5\x05" + "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e", + .klen = 16, + .iv = "\x03\x96\xac\x59\x30\x07\xa1\xe2\xa2\xc7\x55\x24\0\0\0\0", + .alen = 0, + .ptext = "\x19\xc8\x81\xf6\xe9\x86\xff\x93" + "\x0b\x78\x67\xe5\xbb\xb7\xfc\x6e" + "\x83\x77\xb3\xa6\x0c\x8c\x9f\x9c" + "\x35\x2e\xad\xe0\x62\xf9\x91\xa1", + .plen = 32, + .ctext = "\xab\x6f\xe1\x69\x1d\x19\x99\xa8" + "\x92\xa0\xc4\x6f\x7e\xe2\x8b\xb1" + "\x70\xbb\x8c\xa6\x4c\x6e\x97\x8a" + "\x57\x2b\xbe\x5d\x98\xa6\xb1\x32" + "\xda\x24\xea\xd9\xa1\x39\x98\xfd" + "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8", + .clen = 48, }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E\x43", - .klen = 20, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .result = "\x45\x00\x00\x1C\x42\xA2\x00\x00" - "\x80\x01\x44\x1F\x40\x67\x93\xB6" - "\xE0\x00\x00\x02\x0A\x00\xF5\xFF" - "\x01\x02\x02\x01", - .rlen = 28, - .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .input = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0" - "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E" - "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13" - "\x0E\x13\x79\xED\x36\x9F\x07\x1F" - "\x35\xE0\x34\xBE\x95\xF1\x12\xE4" - "\xE7\xD0\x5D\x35", - .ilen = 44, + .key = "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0" + "\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3", + .klen = 16, + .iv = "\x03\x4f\xa3\x19\xd3\x01\x5a\xd8" + "\x30\x60\x15\x56\x00\x00\x00\x00", + .assoc = "\xda\xe6\x28\x9c\x45\x2d\xfd\x63" + "\x5e\xda\x4c\xb6\xe6\xfc\xf9\xb7" + "\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1" + "\x0a\x63\x09\x78\xbc\x2c\x55\xde", + .alen = 32, + .ptext = "\x87\xa3\x36\xfd\x96\xb3\x93\x78" + "\xa9\x28\x63\xba\x12\xa3\x14\x85" + "\x57\x1e\x06\xc9\x7b\x21\xef\x76" + "\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e", + .plen = 32, + .ctext = "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19" + "\xfc\x70\xc4\x6d\x8e\xb7\x99\xab" + "\xc5\x4b\xa2\xac\xd3\xf3\x48\xff" + "\x3b\xb5\xce\x53\xef\xde\xbb\x02" + "\xa9\x86\x15\x6c\x13\xfe\xda\x0a" + "\x22\xb8\x29\x3d\xd8\x39\x9a\x23", + .clen = 48, }, { - .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" - "\x6D\x6A\x8F\x94\x67\x30\x83\x08" - "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" - "\xCA\xFE\xBA\xBE", - .klen = 28, - .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .result = "\x45\x00\x00\x28\xA4\xAD\x40\x00" - "\x40\x06\x78\x80\x0A\x01\x03\x8F" - "\x0A\x01\x06\x12\x80\x23\x06\xB8" - "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" - "\x50\x10\x16\xD0\x75\x68\x00\x01", - .rlen = 40, - .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" - "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .alen = 16, - .input = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4" - "\x0E\x59\x8B\x81\x22\xDE\x02\x42" - "\x09\x38\xB3\xAB\x33\xF8\x28\xE6" - "\x87\xB8\x85\x8B\x5B\xFB\xDB\xD0" - "\x31\x5B\x27\x45\x21\x44\xCC\x77" - "\x95\x45\x7B\x96\x52\x03\x7F\x53" - "\x18\x02\x7B\x5B\x4C\xD7\xA6\x36", - .ilen = 56, + .key = "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1" + "\xa3\xf0\xff\xdd\x4e\x4b\x12\x75" + "\x53\x14\x73\x66\x8d\x88\xf6\x80", + .klen = 24, + .iv = "\x03\xa0\x20\x35\x26\xf2\x21\x8d" + "\x50\x20\xda\xe2\x00\x00\x00\x00", + .assoc = "\x5b\x9e\x13\x67\x02\x5e\xef\xc1" + "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47" + "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d" + "\xd8\x9e\x2b\x56\x10\x23\x56\xe7", + .alen = 32, + .ctext = "\x36\xea\x7a\x70\x08\xdc\x6a\xbc" + "\xad\x0c\x7a\x63\xf6\x61\xfd\x9b", + .clen = 16, }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xDE\xCA\xF8\x88", - .klen = 20, - .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", - .result = "\x45\x00\x00\x49\x33\xBA\x00\x00" - "\x7F\x11\x91\x06\xC3\xFB\x1D\x10" - "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" - "\x00\x35\xDD\x7B\x80\x03\x02\xD5" - "\x00\x00\x4E\x20\x00\x1E\x8C\x18" - "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47" - "\x6B\x91\xB9\x24\xB2\x80\x38\x9D" - "\x92\xC9\x63\xBA\xC0\x46\xEC\x95" - "\x9B\x62\x66\xC0\x47\x22\xB1\x49" - "\x23\x01\x01\x01", - .rlen = 76, - .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" - "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" - "\xCE\xFA\xCE\x74", - .alen = 20, - .input = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A" - "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14" - "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1" - "\xC3\x49\xC1\xD2\xFB\xEC\x16\x8F" - "\x91\x90\xFE\xEB\xAF\x2C\xB0\x19" - "\x84\xE6\x58\x63\x96\x5D\x74\x72" - "\xB7\x9D\xA3\x45\xE0\xE7\x80\x19" - "\x1F\x0D\x2F\x0E\x0F\x49\x6C\x22" - "\x6F\x21\x27\xB2\x7D\xB3\x57\x24" - "\xE7\x84\x5D\x68\x65\x1F\x57\xE6" - "\x5F\x35\x4F\x75\xFF\x17\x01\x57" - "\x69\x62\x34\x36", - .ilen = 92, + .key = "\x56\xdf\x5c\x8f\x26\x3f\x0e\x42" + "\xef\x7a\xd3\xce\xfc\x84\x60\x62" + "\xca\xb4\x40\xaf\x5f\xc9\xc9\x01", + .klen = 24, + .iv = "\x03\xd6\x3c\x8c\x86\x84\xb6\xcd" + "\xef\x09\x2e\x94\x00\x00\x00\x00", + .assoc = "\x02\x65\x78\x3c\xe9\x21\x30\x91" + "\xb1\xb9\xda\x76\x9a\x78\x6d\x95" + "\xf2\x88\x32\xa3\xf2\x50\xcb\x4c" + "\xe3\x00\x73\x69\x84\x69\x87\x79", + .alen = 32, + .ptext = "\x9f\xd2\x02\x4b\x52\x49\x31\x3c" + "\x43\x69\x3a\x2d\x8e\x70\xad\x7e" + "\xe0\xe5\x46\x09\x80\x89\x13\xb2" + "\x8c\x8b\xd9\x3f\x86\xfb\xb5\x6b", + .plen = 32, + .ctext = "\x39\xdf\x7c\x3c\x5a\x29\xb9\x62" + "\x5d\x51\xc2\x16\xd8\xbd\x06\x9f" + "\x9b\x6a\x09\x70\xc1\x51\x83\xc2" + "\x66\x88\x1d\x4f\x9a\xda\xe0\x1e" + "\xc7\x79\x11\x58\xe5\x6b\x20\x40" + "\x7a\xea\x46\x42\x8b\xe4\x6f\xe1", + .clen = 48, }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\x73\x61\x6C\x74", - .klen = 36, - .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", - .result = "\x45\x08\x00\x28\x73\x2C\x00\x00" - "\x40\x06\xE9\xF9\x0A\x01\x06\x12" - "\x0A\x01\x03\x8F\x06\xB8\x80\x23" - "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02" - "\x50\x10\x1F\x64\x6D\x54\x00\x01", - .rlen = 40, - .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" - "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" - "\x69\x76\x65\x63", - .alen = 20, - .input = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B" - "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D" - "\x1F\x27\x8F\xDE\x98\xEF\x67\x54" - "\x9D\x52\x4A\x30\x18\xD9\xA5\x7F" - "\xF4\xD3\xA3\x1C\xE6\x73\x11\x9E" - "\x45\x16\x26\xC2\x41\x57\x71\xE3" - "\xB7\xEE\xBC\xA6\x14\xC8\x9B\x35", - .ilen = 56, + .key = "\xe0\x8d\x99\x71\x60\xd7\x97\x1a" + "\xbd\x01\x99\xd5\x8a\xdf\x71\x3a" + "\xd3\xdf\x24\x4b\x5e\x3d\x4b\x4e" + "\x30\x7a\xb9\xd8\x53\x0a\x5e\x2b", + .klen = 32, + .iv = "\x03\x1e\x29\x91\xad\x8e\xc1\x53" + "\x0a\xcf\x2d\xbe\x00\x00\x00\x00", + .assoc = "\x19\xb6\x1f\x57\xc4\xf3\xf0\x8b" + "\x78\x2b\x94\x02\x29\x0f\x42\x27" + "\x6b\x75\xcb\x98\x34\x08\x7e\x79" + "\xe4\x3e\x49\x0d\x84\x8b\x22\x87", + .alen = 32, + .ptext = "\xe1\xd9\xd8\x13\xeb\x3a\x75\x3f" + "\x9d\xbd\x5f\x66\xbe\xdc\xbb\x66" + "\xbf\x17\x99\x62\x4a\x39\x27\x1f" + "\x1d\xdc\x24\xae\x19\x2f\x98\x4c", + .plen = 32, + .ctext = "\x19\xb8\x61\x33\x45\x2b\x43\x96" + "\x6f\x51\xd0\x20\x30\x7d\x9b\xc6" + "\x26\x3d\xf8\xc9\x65\x16\xa8\x9f" + "\xf0\x62\x17\x34\xf2\x1e\x8d\x75" + "\x4e\x13\xcc\xc0\xc3\x2a\x54\x2d", + .clen = 40, }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E\x43", - .klen = 20, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .result = "\x45\x00\x00\x49\x33\x3E\x00\x00" - "\x7F\x11\x91\x82\xC3\xFB\x1D\x10" - "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" - "\x00\x35\xCB\x45\x80\x03\x02\x5B" - "\x00\x00\x01\xE0\x00\x1E\x8C\x18" - "\xD6\x57\x59\xD5\x22\x84\xA0\x35" - "\x2C\x71\x47\x5C\x88\x80\x39\x1C" - "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32" - "\x5A\xE2\x70\xC0\x38\x99\x49\x39" - "\x15\x01\x01\x01", - .rlen = 76, - .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .input = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0" - "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8" - "\x3D\x77\x84\xB6\x07\x32\x3D\x22" - "\x0F\x24\xB0\xA9\x7D\x54\x18\x28" - "\x00\xCA\xDB\x0F\x68\xD9\x9E\xF0" - "\xE0\xC0\xC8\x9A\xE9\xBE\xA8\x88" - "\x4E\x52\xD6\x5B\xC1\xAF\xD0\x74" - "\x0F\x74\x24\x44\x74\x7B\x5B\x39" - "\xAB\x53\x31\x63\xAA\xD4\x55\x0E" - "\xE5\x16\x09\x75\xCD\xB6\x08\xC5" - "\x76\x91\x89\x60\x97\x63\xB8\xE1" - "\x8C\xAA\x81\xE2", - .ilen = 92, + .key = "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c" + "\x45\x41\xb8\xbd\x5c\xa7\xc2\x32" + "\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c" + "\x09\x75\x9a\x9b\x3c\x9b\x27\x39", + .klen = 32, + .iv = "\x03\xf9\xd9\x4e\x63\xb5\x3d\x9d" + "\x43\xf6\x1e\x50\0\0\0\0", + .assoc = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b" + "\x13\x02\x01\x0c\x83\x4c\x96\x35" + "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94" + "\xb0\x39\x36\xe6\x8f\x57\xe0\x13", + .alen = 32, + .ptext = "\x3b\x6c\x29\x36\xb6\xef\x07\xa6" + "\x83\x72\x07\x4f\xcf\xfa\x66\x89" + "\x5f\xca\xb1\xba\xd5\x8f\x2c\x27" + "\x30\xdb\x75\x09\x93\xd4\x65\xe4", + .plen = 32, + .ctext = "\xb0\x88\x5a\x33\xaa\xe5\xc7\x1d" + "\x85\x23\xc7\xc6\x2f\xf4\x1e\x3d" + "\xcc\x63\x44\x25\x07\x78\x4f\x9e" + "\x96\xb8\x88\xeb\xbc\x48\x1f\x06" + "\x39\xaf\x39\xac\xd8\x4a\x80\x39" + "\x7b\x72\x8a\xf7", + .clen = 44, }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\x73\x61\x6C\x74", - .klen = 36, - .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", - .result = "\x63\x69\x73\x63\x6F\x01\x72\x75" - "\x6C\x65\x73\x01\x74\x68\x65\x01" - "\x6E\x65\x74\x77\x65\x01\x64\x65" - "\x66\x69\x6E\x65\x01\x74\x68\x65" - "\x74\x65\x63\x68\x6E\x6F\x6C\x6F" - "\x67\x69\x65\x73\x01\x74\x68\x61" - "\x74\x77\x69\x6C\x6C\x01\x64\x65" - "\x66\x69\x6E\x65\x74\x6F\x6D\x6F" - "\x72\x72\x6F\x77\x01\x02\x02\x01", - .rlen = 72, - .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" - "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" - "\x69\x76\x65\x63", - .alen = 20, - .input = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E" - "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E" - "\x7B\x43\xF8\x26\xFB\x56\x83\x12" - "\x26\x50\x8B\xEB\xD2\xDC\xEB\x18" - "\xD0\xA6\xDF\x10\xE5\x48\x7D\xF0" - "\x74\x11\x3E\x14\xC6\x41\x02\x4E" - "\x3E\x67\x73\xD9\x1A\x62\xEE\x42" - "\x9B\x04\x3A\x10\xE3\xEF\xE6\xB0" - "\x12\xA4\x93\x63\x41\x23\x64\xF8" - "\xC0\xCA\xC5\x87\xF2\x49\xE5\x6B" - "\x11\xE2\x4F\x30\xE4\x4C\xCC\x76", - .ilen = 88, - }, { - .key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25" - "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47" - "\xD9\x66\x42\x67", - .klen = 20, - .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", - .result = "\x01\x02\x02\x01", - .rlen = 4, - .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF" - "\x43\x45\x7E\x91\x82\x44\x3B\xC6", - .alen = 16, - .input = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F" - "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45" - "\x04\xBE\xF2\x70", - .ilen = 20, - }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xDE\xCA\xF8\x88", - .klen = 20, - .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", - .result = "\x74\x6F\x01\x62\x65\x01\x6F\x72" - "\x01\x6E\x6F\x74\x01\x74\x6F\x01" - "\x62\x65\x00\x01", - .rlen = 20, - .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" - "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" - "\xCE\xFA\xCE\x74", - .alen = 20, - .input = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38" - "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05" - "\x43\x33\x21\x64\x41\x25\x03\x52" - "\x43\x03\xED\x3C\x6C\x5F\x28\x38" - "\x43\xAF\x8C\x3E", - .ilen = 36, - }, { - .key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65" - "\x6D\x61\x72\x69\x6A\x75\x61\x6E" - "\x61\x61\x6E\x64\x64\x6F\x69\x74" - "\x62\x65\x66\x6F\x72\x65\x69\x61" - "\x74\x75\x72\x6E", - .klen = 36, - .iv = "\x33\x30\x21\x69\x67\x65\x74\x6D", - .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00" - "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" - "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .rlen = 52, - .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" - "\xFF\xFF\xFF\xFF\x33\x30\x21\x69" - "\x67\x65\x74\x6D", - .alen = 20, - .input = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC" - "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D" - "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6" - "\x4A\x27\x4B\x39\xB4\x9C\x3A\x86" - "\x4C\xD3\xD7\x8C\xA4\xAE\x68\xA3" - "\x2B\x42\x45\x8F\xB5\x7D\xBE\x82" - "\x1D\xCC\x63\xB9\xD0\x93\x7B\xA2" - "\x94\x5F\x66\x93\x68\x66\x1A\x32" - "\x9F\xB4\xC0\x53", - .ilen = 68, - }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E\x43", - .klen = 20, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00" - "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" - "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .rlen = 52, - .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .input = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0" - "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD" - "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21" - "\x0D\x11\x7C\xEC\x9C\x35\x79\x17" - "\x65\xAC\xBD\x87\x01\xAD\x79\x84" - "\x5B\xF9\xFE\x3F\xBA\x48\x7B\xC9" - "\x63\x21\x93\x06\x84\xEE\xCA\xDB" - "\x56\x91\x25\x46\xE7\xA9\x5C\x97" - "\x40\xD7\xCB\x05", - .ilen = 68, - }, { - .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" - "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" - "\x22\x43\x3C\x64", - .klen = 20, - .iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD", - .result = "\x08\x00\xC6\xCD\x02\x00\x07\x00" - "\x61\x62\x63\x64\x65\x66\x67\x68" - "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70" - "\x71\x72\x73\x74\x01\x02\x02\x01", - .rlen = 32, - .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" - "\x00\x00\x00\x07\x48\x55\xEC\x7D" - "\x3A\x23\x4B\xFD", - .alen = 20, - .input = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C" - "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF" - "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6" - "\xAC\xDA\x68\x94\xBC\x61\x90\x69" - "\xEF\x9C\xBC\x28\xFE\x1B\x56\xA7" - "\xC4\xE0\xD5\x8C\x86\xCD\x2B\xC0", - .ilen = 48, - } -}; - -static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = { - { /* From draft-mcgrew-gcm-test-01 */ - .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" - "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" - "\x22\x43\x3c\x64", - .klen = 20, - .iv = zeroed_string, - .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .input = "\x45\x00\x00\x30\xda\x3a\x00\x00" - "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" - "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6a\x6b\x6c" - "\x6d\x6e\x6f\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .ilen = 52, - .result = "\x45\x00\x00\x30\xda\x3a\x00\x00" - "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" - "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6a\x6b\x6c" - "\x6d\x6e\x6f\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01\xf2\xa9\xa8\x36" - "\xe1\x55\x10\x6a\xa8\xdc\xd6\x18" - "\xe4\x09\x9a\xaa", - .rlen = 68, - } -}; - -static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = { - { /* From draft-mcgrew-gcm-test-01 */ - .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" - "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" - "\x22\x43\x3c\x64", - .klen = 20, - .iv = zeroed_string, - .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .input = "\x45\x00\x00\x30\xda\x3a\x00\x00" - "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" - "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6a\x6b\x6c" - "\x6d\x6e\x6f\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01\xf2\xa9\xa8\x36" - "\xe1\x55\x10\x6a\xa8\xdc\xd6\x18" - "\xe4\x09\x9a\xaa", - .ilen = 68, - .result = "\x45\x00\x00\x30\xda\x3a\x00\x00" - "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" - "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6a\x6b\x6c" - "\x6d\x6e\x6f\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .rlen = 52, - }, { /* nearly same as previous, but should fail */ - .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" - "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" - "\x22\x43\x3c\x64", - .klen = 20, - .iv = zeroed_string, - .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .input = "\x45\x00\x00\x30\xda\x3a\x00\x00" - "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" - "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6a\x6b\x6c" - "\x6d\x6e\x6f\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01\xf2\xa9\xa8\x36" - "\xe1\x55\x10\x6a\xa8\xdc\xd6\x18" - "\x00\x00\x00\x00", - .ilen = 68, - .novrfy = 1, - .result = "\x45\x00\x00\x30\xda\x3a\x00\x00" - "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" - "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6a\x6b\x6c" - "\x6d\x6e\x6f\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .rlen = 52, - }, -}; - -static const struct aead_testvec aes_ccm_enc_tv_template[] = { - { /* From RFC 3610 */ - .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", - .klen = 16, - .iv = "\x01\x00\x00\x00\x03\x02\x01\x00" - "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", - .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07", - .alen = 8, - .input = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e", - .ilen = 23, - .result = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2" - "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80" - "\x6d\x5f\x6b\x61\xda\xc3\x84\x17" - "\xe8\xd1\x2c\xfd\xf9\x26\xe0", - .rlen = 31, - }, { - .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", - .klen = 16, - .iv = "\x01\x00\x00\x00\x07\x06\x05\x04" - "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", - .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b", - .alen = 12, - .input = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13" - "\x14\x15\x16\x17\x18\x19\x1a\x1b" - "\x1c\x1d\x1e\x1f", - .ilen = 20, - .result = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb" - "\x9d\x4e\x13\x12\x53\x65\x8a\xd8" - "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07" - "\x7d\x9c\x2d\x93", - .rlen = 28, - }, { - .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", - .klen = 16, - .iv = "\x01\x00\x00\