From e01966e643c97eb8916bbbbbeceb687afb11f345 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Tue, 26 Dec 2023 11:00:20 +0100 Subject: [PATCH 01/79] crypto: virtio - Less function calls in __virtio_crypto_akcipher_do_req() after error detection The kfree() function was called in up to two cases by the __virtio_crypto_akcipher_do_req() function during error handling even if the passed variable contained a null pointer. This issue was detected by using the Coccinelle software. * Adjust jump targets. * Delete two initialisations which became unnecessary with this refactoring. Signed-off-by: Markus Elfring Reviewed-by: Gonglei Reviewed-by: Justin Stitt Signed-off-by: Herbert Xu --- drivers/crypto/virtio/virtio_crypto_akcipher_algs.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c index 2621ff8a9376..057da5bd8d30 100644 --- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c @@ -224,11 +224,11 @@ static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request struct virtio_crypto *vcrypto = ctx->vcrypto; struct virtio_crypto_op_data_req *req_data = vc_req->req_data; struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg; - void *src_buf = NULL, *dst_buf = NULL; + void *src_buf, *dst_buf = NULL; unsigned int num_out = 0, num_in = 0; int node = dev_to_node(&vcrypto->vdev->dev); unsigned long flags; - int ret = -ENOMEM; + int ret; bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY; unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len; @@ -239,7 +239,7 @@ static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request /* src data */ src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node); if (!src_buf) - goto err; + return -ENOMEM; if (verify) { /* for verify operation, both src and dst data work as OUT direction */ @@ -254,7 +254,7 @@ static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request /* dst data */ dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node); if (!dst_buf) - goto err; + goto free_src; sg_init_one(&dstdata_sg, dst_buf, req->dst_len); sgs[num_out + num_in++] = &dstdata_sg; @@ -277,9 +277,9 @@ static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request return 0; err: - kfree(src_buf); kfree(dst_buf); - +free_src: + kfree(src_buf); return -ENOMEM; } From 109303336a0cc8ed903d8b1b83114d79b841d8de Mon Sep 17 00:00:00 2001 From: Danny Tsen Date: Tue, 2 Jan 2024 15:58:56 -0500 Subject: [PATCH 02/79] crypto: vmx - Move to arch/powerpc/crypto Relocate all crypto files in vmx driver to arch/powerpc/crypto directory and remove vmx directory. drivers/crypto/vmx/aes.c rename to arch/powerpc/crypto/aes.c drivers/crypto/vmx/aes_cbc.c rename to arch/powerpc/crypto/aes_cbc.c drivers/crypto/vmx/aes_ctr.c rename to arch/powerpc/crypto/aes_ctr.c drivers/crypto/vmx/aes_xts.c rename to arch/powerpc/crypto/aes_xts.c drivers/crypto/vmx/aesp8-ppc.h rename to arch/powerpc/crypto/aesp8-ppc.h drivers/crypto/vmx/aesp8-ppc.pl rename to arch/powerpc/crypto/aesp8-ppc.pl drivers/crypto/vmx/ghash.c rename to arch/powerpc/crypto/ghash.c drivers/crypto/vmx/ghashp8-ppc.pl rename to arch/powerpc/crypto/ghashp8-ppc.pl drivers/crypto/vmx/vmx.c rename to arch/powerpc/crypto/vmx.c deleted files: drivers/crypto/vmx/Makefile drivers/crypto/vmx/Kconfig drivers/crypto/vmx/ppc-xlate.pl This patch has been tested has passed the selftest. The patch is also tested with CONFIG_CRYPTO_MANAGER_EXTRA_TESTS enabled. Signed-off-by: Danny Tsen Signed-off-by: Herbert Xu --- arch/powerpc/crypto/Kconfig | 20 ++ arch/powerpc/crypto/Makefile | 20 +- .../crypto/vmx => arch/powerpc/crypto}/aes.c | 0 .../vmx => arch/powerpc/crypto}/aes_cbc.c | 0 .../vmx => arch/powerpc/crypto}/aes_ctr.c | 0 .../vmx => arch/powerpc/crypto}/aes_xts.c | 0 .../vmx => arch/powerpc/crypto}/aesp8-ppc.h | 0 .../vmx => arch/powerpc/crypto}/aesp8-ppc.pl | 0 .../vmx => arch/powerpc/crypto}/ghash.c | 0 .../powerpc/crypto}/ghashp8-ppc.pl | 0 .../crypto/vmx => arch/powerpc/crypto}/vmx.c | 0 drivers/crypto/Kconfig | 14 +- drivers/crypto/Makefile | 2 +- drivers/crypto/vmx/.gitignore | 3 - drivers/crypto/vmx/Kconfig | 14 -- drivers/crypto/vmx/Makefile | 23 -- drivers/crypto/vmx/ppc-xlate.pl | 231 ------------------ 17 files changed, 46 insertions(+), 281 deletions(-) rename {drivers/crypto/vmx => arch/powerpc/crypto}/aes.c (100%) rename {drivers/crypto/vmx => arch/powerpc/crypto}/aes_cbc.c (100%) rename {drivers/crypto/vmx => arch/powerpc/crypto}/aes_ctr.c (100%) rename {drivers/crypto/vmx => arch/powerpc/crypto}/aes_xts.c (100%) rename {drivers/crypto/vmx => arch/powerpc/crypto}/aesp8-ppc.h (100%) rename {drivers/crypto/vmx => arch/powerpc/crypto}/aesp8-ppc.pl (100%) rename {drivers/crypto/vmx => arch/powerpc/crypto}/ghash.c (100%) rename {drivers/crypto/vmx => arch/powerpc/crypto}/ghashp8-ppc.pl (100%) rename {drivers/crypto/vmx => arch/powerpc/crypto}/vmx.c (100%) delete mode 100644 drivers/crypto/vmx/.gitignore delete mode 100644 drivers/crypto/vmx/Kconfig delete mode 100644 drivers/crypto/vmx/Makefile delete mode 100644 drivers/crypto/vmx/ppc-xlate.pl diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig index 6fc2248ca561..1e201b7ae2fc 100644 --- a/arch/powerpc/crypto/Kconfig +++ b/arch/powerpc/crypto/Kconfig @@ -137,4 +137,24 @@ config CRYPTO_POLY1305_P10 - Power10 or later - Little-endian +config CRYPTO_DEV_VMX + bool "Support for VMX cryptographic acceleration instructions" + depends on PPC64 && VSX + help + Support for VMX cryptographic acceleration instructions. + +config CRYPTO_DEV_VMX_ENCRYPT + tristate "Encryption acceleration support on P8 CPU" + depends on CRYPTO_DEV_VMX + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_CTR + select CRYPTO_GHASH + select CRYPTO_XTS + default m + help + Support for VMX cryptographic acceleration instructions on Power8 CPU. + This module supports acceleration for AES and GHASH in hardware. If you + choose 'M' here, this module will be called vmx-crypto. + endmenu diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile index ebdac1b9eb9a..fca0e9739869 100644 --- a/arch/powerpc/crypto/Makefile +++ b/arch/powerpc/crypto/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o +obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o md5-ppc-y := md5-asm.o md5-glue.o @@ -27,14 +28,29 @@ crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o +vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o + +ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) +override flavour := linux-ppc64le +else +ifdef CONFIG_PPC64_ELF_ABI_V2 +override flavour := linux-ppc64-elfv2 +else +override flavour := linux-ppc64 +endif +endif quiet_cmd_perl = PERL $@ - cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@ + cmd_perl = $(PERL) $< $(flavour) > $@ -targets += aesp10-ppc.S ghashp10-ppc.S +targets += aesp10-ppc.S ghashp10-ppc.S aesp8-ppc.S ghashp8-ppc.S $(obj)/aesp10-ppc.S $(obj)/ghashp10-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE $(call if_changed,perl) +$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE + $(call if_changed,perl) + OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y +OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y diff --git a/drivers/crypto/vmx/aes.c b/arch/powerpc/crypto/aes.c similarity index 100% rename from drivers/crypto/vmx/aes.c rename to arch/powerpc/crypto/aes.c diff --git a/drivers/crypto/vmx/aes_cbc.c b/arch/powerpc/crypto/aes_cbc.c similarity index 100% rename from drivers/crypto/vmx/aes_cbc.c rename to arch/powerpc/crypto/aes_cbc.c diff --git a/drivers/crypto/vmx/aes_ctr.c b/arch/powerpc/crypto/aes_ctr.c similarity index 100% rename from drivers/crypto/vmx/aes_ctr.c rename to arch/powerpc/crypto/aes_ctr.c diff --git a/drivers/crypto/vmx/aes_xts.c b/arch/powerpc/crypto/aes_xts.c similarity index 100% rename from drivers/crypto/vmx/aes_xts.c rename to arch/powerpc/crypto/aes_xts.c diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/arch/powerpc/crypto/aesp8-ppc.h similarity index 100% rename from drivers/crypto/vmx/aesp8-ppc.h rename to arch/powerpc/crypto/aesp8-ppc.h diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/arch/powerpc/crypto/aesp8-ppc.pl similarity index 100% rename from drivers/crypto/vmx/aesp8-ppc.pl rename to arch/powerpc/crypto/aesp8-ppc.pl diff --git a/drivers/crypto/vmx/ghash.c b/arch/powerpc/crypto/ghash.c similarity index 100% rename from drivers/crypto/vmx/ghash.c rename to arch/powerpc/crypto/ghash.c diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/arch/powerpc/crypto/ghashp8-ppc.pl similarity index 100% rename from drivers/crypto/vmx/ghashp8-ppc.pl rename to arch/powerpc/crypto/ghashp8-ppc.pl diff --git a/drivers/crypto/vmx/vmx.c b/arch/powerpc/crypto/vmx.c similarity index 100% rename from drivers/crypto/vmx/vmx.c rename to arch/powerpc/crypto/vmx.c diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 0991f026cb07..3d02702456a5 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -611,13 +611,13 @@ config CRYPTO_DEV_QCOM_RNG To compile this driver as a module, choose M here. The module will be called qcom-rng. If unsure, say N. -config CRYPTO_DEV_VMX - bool "Support for VMX cryptographic acceleration instructions" - depends on PPC64 && VSX - help - Support for VMX cryptographic acceleration instructions. - -source "drivers/crypto/vmx/Kconfig" +#config CRYPTO_DEV_VMX +# bool "Support for VMX cryptographic acceleration instructions" +# depends on PPC64 && VSX +# help +# Support for VMX cryptographic acceleration instructions. +# +#source "drivers/crypto/vmx/Kconfig" config CRYPTO_DEV_IMGTEC_HASH tristate "Imagination Technologies hardware hash accelerator" diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index d859d6a5f3a4..95331bc6456b 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -42,7 +42,7 @@ obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/ obj-y += stm32/ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/ -obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ +#obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/ diff --git a/drivers/crypto/vmx/.gitignore b/drivers/crypto/vmx/.gitignore deleted file mode 100644 index 7aa71d83f739..000000000000 --- a/drivers/crypto/vmx/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -aesp8-ppc.S -ghashp8-ppc.S diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig deleted file mode 100644 index b2c28b87f14b..000000000000 --- a/drivers/crypto/vmx/Kconfig +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -config CRYPTO_DEV_VMX_ENCRYPT - tristate "Encryption acceleration support on P8 CPU" - depends on CRYPTO_DEV_VMX - select CRYPTO_AES - select CRYPTO_CBC - select CRYPTO_CTR - select CRYPTO_GHASH - select CRYPTO_XTS - default m - help - Support for VMX cryptographic acceleration instructions on Power8 CPU. - This module supports acceleration for AES and GHASH in hardware. If you - choose 'M' here, this module will be called vmx-crypto. diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile deleted file mode 100644 index 7257b8c44626..000000000000 --- a/drivers/crypto/vmx/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o -vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o - -ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) -override flavour := linux-ppc64le -else -ifdef CONFIG_PPC64_ELF_ABI_V2 -override flavour := linux-ppc64-elfv2 -else -override flavour := linux-ppc64 -endif -endif - -quiet_cmd_perl = PERL $@ - cmd_perl = $(PERL) $< $(flavour) > $@ - -targets += aesp8-ppc.S ghashp8-ppc.S - -$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE - $(call if_changed,perl) - -OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl deleted file mode 100644 index b583898c11ae..000000000000 --- a/drivers/crypto/vmx/ppc-xlate.pl +++ /dev/null @@ -1,231 +0,0 @@ -#!/usr/bin/env perl -# SPDX-License-Identifier: GPL-2.0 - -# PowerPC assembler distiller by . - -my $flavour = shift; -my $output = shift; -open STDOUT,">$output" || die "can't open $output: $!"; - -my %GLOBALS; -my $dotinlocallabels=($flavour=~/linux/)?1:0; -my $elfv2abi=(($flavour =~ /linux-ppc64le/) or ($flavour =~ /linux-ppc64-elfv2/))?1:0; -my $dotfunctions=($elfv2abi=~1)?0:1; - -################################################################ -# directives which need special treatment on different platforms -################################################################ -my $globl = sub { - my $junk = shift; - my $name = shift; - my $global = \$GLOBALS{$name}; - my $ret; - - $name =~ s|^[\.\_]||; - - SWITCH: for ($flavour) { - /aix/ && do { $name = ".$name"; - last; - }; - /osx/ && do { $name = "_$name"; - last; - }; - /linux/ - && do { $ret = "_GLOBAL($name)"; - last; - }; - } - - $ret = ".globl $name\nalign 5\n$name:" if (!$ret); - $$global = $name; - $ret; -}; -my $text = sub { - my $ret = ($flavour =~ /aix/) ? ".csect\t.text[PR],7" : ".text"; - $ret = ".abiversion 2\n".$ret if ($elfv2abi); - $ret; -}; -my $machine = sub { - my $junk = shift; - my $arch = shift; - if ($flavour =~ /osx/) - { $arch =~ s/\"//g; - $arch = ($flavour=~/64/) ? "ppc970-64" : "ppc970" if ($arch eq "any"); - } - ".machine $arch"; -}; -my $size = sub { - if ($flavour =~ /linux/) - { shift; - my $name = shift; $name =~ s|^[\.\_]||; - my $ret = ".size $name,.-".($dotfunctions?".":"").$name; - $ret .= "\n.size .$name,.-.$name" if ($dotfunctions); - $ret; - } - else - { ""; } -}; -my $asciz = sub { - shift; - my $line = join(",",@_); - if ($line =~ /^"(.*)"$/) - { ".byte " . join(",",unpack("C*",$1),0) . "\n.align 2"; } - else - { ""; } -}; -my $quad = sub { - shift; - my @ret; - my ($hi,$lo); - for (@_) { - if (/^0x([0-9a-f]*?)([0-9a-f]{1,8})$/io) - { $hi=$1?"0x$1":"0"; $lo="0x$2"; } - elsif (/^([0-9]+)$/o) - { $hi=$1>>32; $lo=$1&0xffffffff; } # error-prone with 32-bit perl - else - { $hi=undef; $lo=$_; } - - if (defined($hi)) - { push(@ret,$flavour=~/le$/o?".long\t$lo,$hi":".long\t$hi,$lo"); } - else - { push(@ret,".quad $lo"); } - } - join("\n",@ret); -}; - -################################################################ -# simplified mnemonics not handled by at least one assembler -################################################################ -my $cmplw = sub { - my $f = shift; - my $cr = 0; $cr = shift if ($#_>1); - # Some out-of-date 32-bit GNU assembler just can't handle cmplw... - ($flavour =~ /linux.*32/) ? - " .long ".sprintf "0x%x",31<<26|$cr<<23|$_[0]<<16|$_[1]<<11|64 : - " cmplw ".join(',',$cr,@_); -}; -my $bdnz = sub { - my $f = shift; - my $bo = $f=~/[\+\-]/ ? 16+9 : 16; # optional "to be taken" hint - " bc $bo,0,".shift; -} if ($flavour!~/linux/); -my $bltlr = sub { - my $f = shift; - my $bo = $f=~/\-/ ? 12+2 : 12; # optional "not to be taken" hint - ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints - " .long ".sprintf "0x%x",19<<26|$bo<<21|16<<1 : - " bclr $bo,0"; -}; -my $bnelr = sub { - my $f = shift; - my $bo = $f=~/\-/ ? 4+2 : 4; # optional "not to be taken" hint - ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints - " .long ".sprintf "0x%x",19<<26|$bo<<21|2<<16|16<<1 : - " bclr $bo,2"; -}; -my $beqlr = sub { - my $f = shift; - my $bo = $f=~/-/ ? 12+2 : 12; # optional "not to be taken" hint - ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints - " .long ".sprintf "0x%X",19<<26|$bo<<21|2<<16|16<<1 : - " bclr $bo,2"; -}; -# GNU assembler can't handle extrdi rA,rS,16,48, or when sum of last two -# arguments is 64, with "operand out of range" error. -my $extrdi = sub { - my ($f,$ra,$rs,$n,$b) = @_; - $b = ($b+$n)&63; $n = 64-$n; - " rldicl $ra,$rs,$b,$n"; -}; -my $vmr = sub { - my ($f,$vx,$vy) = @_; - " vor $vx,$vy,$vy"; -}; - -# Some ABIs specify vrsave, special-purpose register #256, as reserved -# for system use. -my $no_vrsave = ($elfv2abi); -my $mtspr = sub { - my ($f,$idx,$ra) = @_; - if ($idx == 256 && $no_vrsave) { - " or $ra,$ra,$ra"; - } else { - " mtspr $idx,$ra"; - } -}; -my $mfspr = sub { - my ($f,$rd,$idx) = @_; - if ($idx == 256 && $no_vrsave) { - " li $rd,-1"; - } else { - " mfspr $rd,$idx"; - } -}; - -# PowerISA 2.06 stuff -sub vsxmem_op { - my ($f, $vrt, $ra, $rb, $op) = @_; - " .long ".sprintf "0x%X",(31<<26)|($vrt<<21)|($ra<<16)|($rb<<11)|($op*2+1); -} -# made-up unaligned memory reference AltiVec/VMX instructions -my $lvx_u = sub { vsxmem_op(@_, 844); }; # lxvd2x -my $stvx_u = sub { vsxmem_op(@_, 972); }; # stxvd2x -my $lvdx_u = sub { vsxmem_op(@_, 588); }; # lxsdx -my $stvdx_u = sub { vsxmem_op(@_, 716); }; # stxsdx -my $lvx_4w = sub { vsxmem_op(@_, 780); }; # lxvw4x -my $stvx_4w = sub { vsxmem_op(@_, 908); }; # stxvw4x - -# PowerISA 2.07 stuff -sub vcrypto_op { - my ($f, $vrt, $vra, $vrb, $op) = @_; - " .long ".sprintf "0x%X",(4<<26)|($vrt<<21)|($vra<<16)|($vrb<<11)|$op; -} -my $vcipher = sub { vcrypto_op(@_, 1288); }; -my $vcipherlast = sub { vcrypto_op(@_, 1289); }; -my $vncipher = sub { vcrypto_op(@_, 1352); }; -my $vncipherlast= sub { vcrypto_op(@_, 1353); }; -my $vsbox = sub { vcrypto_op(@_, 0, 1480); }; -my $vshasigmad = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1730); }; -my $vshasigmaw = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1666); }; -my $vpmsumb = sub { vcrypto_op(@_, 1032); }; -my $vpmsumd = sub { vcrypto_op(@_, 1224); }; -my $vpmsubh = sub { vcrypto_op(@_, 1096); }; -my $vpmsumw = sub { vcrypto_op(@_, 1160); }; -my $vaddudm = sub { vcrypto_op(@_, 192); }; -my $vadduqm = sub { vcrypto_op(@_, 256); }; - -my $mtsle = sub { - my ($f, $arg) = @_; - " .long ".sprintf "0x%X",(31<<26)|($arg<<21)|(147*2); -}; - -print "#include \n" if $flavour =~ /linux/; - -while($line=<>) { - - $line =~ s|[#!;].*$||; # get rid of asm-style comments... - $line =~ s|/\*.*\*/||; # ... and C-style comments... - $line =~ s|^\s+||; # ... and skip white spaces in beginning... - $line =~ s|\s+$||; # ... and at the end - - { - $line =~ s|\b\.L(\w+)|L$1|g; # common denominator for Locallabel - $line =~ s|\bL(\w+)|\.L$1|g if ($dotinlocallabels); - } - - { - $line =~ s|^\s*(\.?)(\w+)([\.\+\-]?)\s*||; - my $c = $1; $c = "\t" if ($c eq ""); - my $mnemonic = $2; - my $f = $3; - my $opcode = eval("\$$mnemonic"); - $line =~ s/\b(c?[rf]|v|vs)([0-9]+)\b/$2/g if ($c ne "." and $flavour !~ /osx/); - if (ref($opcode) eq 'CODE') { $line = &$opcode($f,split(',',$line)); } - elsif ($mnemonic) { $line = $c.$mnemonic.$f."\t".$line; } - } - - print $line if ($line); - print "\n"; -} - -close STDOUT; From 23a22e831ed4e6aa0831312e8cc8b7c60a657f60 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 3 Jan 2024 17:26:02 +0100 Subject: [PATCH 03/79] crypto: qat - avoid memcpy() overflow warning The use of array_size() leads gcc to assume the memcpy() can have a larger limit than actually possible, which triggers a string fortification warning: In file included from include/linux/string.h:296, from include/linux/bitmap.h:12, from include/linux/cpumask.h:12, from include/linux/sched.h:16, from include/linux/delay.h:23, from include/linux/iopoll.h:12, from drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c:3: In function 'fortify_memcpy_chk', inlined from 'adf_gen4_init_thd2arb_map' at drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c:401:3: include/linux/fortify-string.h:579:4: error: call to '__write_overflow_field' declared with attribute warning: detected write beyond size of field (1st parameter); maybe use struct_group()? [-Werror=attribute-warning] 579 | __write_overflow_field(p_size_field, size); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/fortify-string.h:588:4: error: call to '__read_overflow2_field' declared with attribute warning: detected read beyond size of field (2nd parameter); maybe use struct_group()? [-Werror=attribute-warning] 588 | __read_overflow2_field(q_size_field, size); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add an explicit range check to avoid this. Fixes: 5da6a2d5353e ("crypto: qat - generate dynamically arbiter mappings") Signed-off-by: Arnd Bergmann Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 9985683056d5..f752653ccb47 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -398,6 +398,9 @@ int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) ADF_GEN4_ADMIN_ACCELENGINES; if (srv_id == SVC_DCC) { + if (ae_cnt > ICP_QAT_HW_AE_DELIMITER) + return -EINVAL; + memcpy(thd2arb_map, thrd_to_arb_map_dcc, array_size(sizeof(*thd2arb_map), ae_cnt)); return 0; From 5d7e1c411c0d703e51cc812a3dac22f1f0b11253 Mon Sep 17 00:00:00 2001 From: Luca Weiss Date: Fri, 5 Jan 2024 17:15:43 +0100 Subject: [PATCH 04/79] dt-bindings: qcom-qce: Add compatible for SM6350 Add a compatible for the crypto block found on the SM6350 SoC. Signed-off-by: Luca Weiss Acked-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- Documentation/devicetree/bindings/crypto/qcom-qce.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml index a48bd381063a..e285e382d4ec 100644 --- a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml +++ b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml @@ -45,6 +45,7 @@ properties: - items: - enum: - qcom,sc7280-qce + - qcom,sm6350-qce - qcom,sm8250-qce - qcom,sm8350-qce - qcom,sm8450-qce From 4bb439e8562d36af715f2c22d23c5c3ad5563061 Mon Sep 17 00:00:00 2001 From: Clay Chang Date: Sun, 7 Jan 2024 21:28:42 +0800 Subject: [PATCH 05/79] KEYS: include header for EINVAL definition This patch includes linux/errno.h to address the issue of 'EINVAL' being undeclared. Signed-off-by: Clay Chang Signed-off-by: Herbert Xu --- include/crypto/public_key.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index 462f8a34cdf8..b7f308977c84 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -10,6 +10,7 @@ #ifndef _LINUX_PUBLIC_KEY_H #define _LINUX_PUBLIC_KEY_H +#include #include #include From 3274819b3c81c18c01e3c0e0ea726870ec237ac0 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 8 Jan 2024 16:53:48 -0600 Subject: [PATCH 06/79] crypto: iaa - Remove header table code The header table and related code is currently unused - it was included and used for canned mode, but canned mode has been removed, so this code can be safely removed as well. This indirectly fixes a bug reported by Dan Carpenter. Reported-by: Dan Carpenter Closes: https://lore.kernel.org/linux-crypto/b2e0bd974981291e16882686a2b9b1db3986abe4.camel@linux.intel.com/T/#m4403253d6a4347a925fab4fc1cdb4ef7c095fb86 Signed-off-by: Tom Zanussi Reviewed-by: Dave Jiang Signed-off-by: Herbert Xu --- drivers/crypto/intel/iaa/iaa_crypto.h | 25 ---- .../crypto/intel/iaa/iaa_crypto_comp_fixed.c | 1 - drivers/crypto/intel/iaa/iaa_crypto_main.c | 108 +----------------- 3 files changed, 3 insertions(+), 131 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h index 014420f7beb0..2524091a5f70 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto.h +++ b/drivers/crypto/intel/iaa/iaa_crypto.h @@ -59,10 +59,8 @@ struct iaa_device_compression_mode { const char *name; struct aecs_comp_table_record *aecs_comp_table; - struct aecs_decomp_table_record *aecs_decomp_table; dma_addr_t aecs_comp_table_dma_addr; - dma_addr_t aecs_decomp_table_dma_addr; }; /* Representation of IAA device with wqs, populated by probe */ @@ -107,23 +105,6 @@ struct aecs_comp_table_record { u32 reserved_padding[2]; } __packed; -/* AECS for decompress */ -struct aecs_decomp_table_record { - u32 crc; - u32 xor_checksum; - u32 low_filter_param; - u32 high_filter_param; - u32 output_mod_idx; - u32 drop_init_decomp_out_bytes; - u32 reserved[36]; - u32 output_accum_data[2]; - u32 out_bits_valid; - u32 bit_off_indexing; - u32 input_accum_data[64]; - u8 size_qw[32]; - u32 decomp_state[1220]; -} __packed; - int iaa_aecs_init_fixed(void); void iaa_aecs_cleanup_fixed(void); @@ -136,9 +117,6 @@ struct iaa_compression_mode { int ll_table_size; u32 *d_table; int d_table_size; - u32 *header_table; - int header_table_size; - u16 gen_decomp_table_flags; iaa_dev_comp_init_fn_t init; iaa_dev_comp_free_fn_t free; }; @@ -148,9 +126,6 @@ int add_iaa_compression_mode(const char *name, int ll_table_size, const u32 *d_table, int d_table_size, - const u8 *header_table, - int header_table_size, - u16 gen_decomp_table_flags, iaa_dev_comp_init_fn_t init, iaa_dev_comp_free_fn_t free); diff --git a/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c index 45cf5d74f0fb..19d9a333ac49 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c @@ -78,7 +78,6 @@ int iaa_aecs_init_fixed(void) sizeof(fixed_ll_sym), fixed_d_sym, sizeof(fixed_d_sym), - NULL, 0, 0, init_fixed_mode, NULL); if (!ret) pr_debug("IAA fixed compression mode initialized\n"); diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index dfd3baf0a8d8..39a5fc905c4d 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -258,16 +258,14 @@ static void free_iaa_compression_mode(struct iaa_compression_mode *mode) kfree(mode->name); kfree(mode->ll_table); kfree(mode->d_table); - kfree(mode->header_table); kfree(mode); } /* - * IAA Compression modes are defined by an ll_table, a d_table, and an - * optional header_table. These tables are typically generated and - * captured using statistics collected from running actual - * compress/decompress workloads. + * IAA Compression modes are defined by an ll_table and a d_table. + * These tables are typically generated and captured using statistics + * collected from running actual compress/decompress workloads. * * A module or other kernel code can add and remove compression modes * with a given name using the exported @add_iaa_compression_mode() @@ -315,9 +313,6 @@ EXPORT_SYMBOL_GPL(remove_iaa_compression_mode); * @ll_table_size: The ll table size in bytes * @d_table: The d table * @d_table_size: The d table size in bytes - * @header_table: Optional header table - * @header_table_size: Optional header table size in bytes - * @gen_decomp_table_flags: Otional flags used to generate the decomp table * @init: Optional callback function to init the compression mode data * @free: Optional callback function to free the compression mode data * @@ -330,9 +325,6 @@ int add_iaa_compression_mode(const char *name, int ll_table_size, const u32 *d_table, int d_table_size, - const u8 *header_table, - int header_table_size, - u16 gen_decomp_table_flags, iaa_dev_comp_init_fn_t init, iaa_dev_comp_free_fn_t free) { @@ -370,16 +362,6 @@ int add_iaa_compression_mode(const char *name, mode->d_table_size = d_table_size; } - if (header_table) { - mode->header_table = kzalloc(header_table_size, GFP_KERNEL); - if (!mode->header_table) - goto free; - memcpy(mode->header_table, header_table, header_table_size); - mode->header_table_size = header_table_size; - } - - mode->gen_decomp_table_flags = gen_decomp_table_flags; - mode->init = init; mode->free = free; @@ -420,10 +402,6 @@ static void free_device_compression_mode(struct iaa_device *iaa_device, if (device_mode->aecs_comp_table) dma_free_coherent(dev, size, device_mode->aecs_comp_table, device_mode->aecs_comp_table_dma_addr); - if (device_mode->aecs_decomp_table) - dma_free_coherent(dev, size, device_mode->aecs_decomp_table, - device_mode->aecs_decomp_table_dma_addr); - kfree(device_mode); } @@ -440,73 +418,6 @@ static int check_completion(struct device *dev, bool compress, bool only_once); -static int decompress_header(struct iaa_device_compression_mode *device_mode, - struct iaa_compression_mode *mode, - struct idxd_wq *wq) -{ - dma_addr_t src_addr, src2_addr; - struct idxd_desc *idxd_desc; - struct iax_hw_desc *desc; - struct device *dev; - int ret = 0; - - idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); - if (IS_ERR(idxd_desc)) - return PTR_ERR(idxd_desc); - - desc = idxd_desc->iax_hw; - - dev = &wq->idxd->pdev->dev; - - src_addr = dma_map_single(dev, (void *)mode->header_table, - mode->header_table_size, DMA_TO_DEVICE); - dev_dbg(dev, "%s: mode->name %s, src_addr %llx, dev %p, src %p, slen %d\n", - __func__, mode->name, src_addr, dev, - mode->header_table, mode->header_table_size); - if (unlikely(dma_mapping_error(dev, src_addr))) { - dev_dbg(dev, "dma_map_single err, exiting\n"); - ret = -ENOMEM; - return ret; - } - - desc->flags = IAX_AECS_GEN_FLAG; - desc->opcode = IAX_OPCODE_DECOMPRESS; - - desc->src1_addr = (u64)src_addr; - desc->src1_size = mode->header_table_size; - - src2_addr = device_mode->aecs_decomp_table_dma_addr; - desc->src2_addr = (u64)src2_addr; - desc->src2_size = 1088; - dev_dbg(dev, "%s: mode->name %s, src2_addr %llx, dev %p, src2_size %d\n", - __func__, mode->name, desc->src2_addr, dev, desc->src2_size); - desc->max_dst_size = 0; // suppressed output - - desc->decompr_flags = mode->gen_decomp_table_flags; - - desc->priv = 0; - - desc->completion_addr = idxd_desc->compl_dma; - - ret = idxd_submit_desc(wq, idxd_desc); - if (ret) { - pr_err("%s: submit_desc failed ret=0x%x\n", __func__, ret); - goto out; - } - - ret = check_completion(dev, idxd_desc->iax_completion, false, false); - if (ret) - dev_dbg(dev, "%s: mode->name %s check_completion failed ret=%d\n", - __func__, mode->name, ret); - else - dev_dbg(dev, "%s: mode->name %s succeeded\n", __func__, - mode->name); -out: - dma_unmap_single(dev, src_addr, 1088, DMA_TO_DEVICE); - - return ret; -} - static int init_device_compression_mode(struct iaa_device *iaa_device, struct iaa_compression_mode *mode, int idx, struct idxd_wq *wq) @@ -529,24 +440,11 @@ static int init_device_compression_mode(struct iaa_device *iaa_device, if (!device_mode->aecs_comp_table) goto free; - device_mode->aecs_decomp_table = dma_alloc_coherent(dev, size, - &device_mode->aecs_decomp_table_dma_addr, GFP_KERNEL); - if (!device_mode->aecs_decomp_table) - goto free; - /* Add Huffman table to aecs */ memset(device_mode->aecs_comp_table, 0, sizeof(*device_mode->aecs_comp_table)); memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size); memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size); - if (mode->header_table) { - ret = decompress_header(device_mode, mode, wq); - if (ret) { - pr_debug("iaa header decompression failed: ret=%d\n", ret); - goto free; - } - } - if (mode->init) { ret = mode->init(device_mode); if (ret) From cc342dba0d39f226f4a5e26194404c3785481470 Mon Sep 17 00:00:00 2001 From: Minjie Du Date: Tue, 9 Jan 2024 10:19:14 +0800 Subject: [PATCH 07/79] crypto: iaa - Remove unnecessary debugfs_create_dir() error check in iaa_crypto_debugfs_init() This patch removes the debugfs_create_dir() error checking in iaa_crypto_debugfs_init(). Because the debugfs_create_dir() is developed in a way that the caller can safely handle the errors that occur during the creation of DebugFS nodes. Signed-off-by: Minjie Du Acked-by: Tom Zanussi Signed-off-by: Herbert Xu --- drivers/crypto/intel/iaa/iaa_crypto_stats.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c index 2e3b7b73af20..cbf87d0effe3 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c @@ -275,8 +275,6 @@ int __init iaa_crypto_debugfs_init(void) return -ENODEV; iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL); - if (!iaa_crypto_debugfs_root) - return -ENOMEM; debugfs_create_u64("max_comp_delay_ns", 0644, iaa_crypto_debugfs_root, &max_comp_delay_ns); From 8413fe3e7fdfb412baa9e31b3f1d2eaf6d21a373 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Fri, 12 Jan 2024 18:25:45 +0800 Subject: [PATCH 08/79] crypto: hisilicon/qm - support get device state Support get device current state. The value 0 indicates that the device is busy, and the value 1 indicates that the device is idle. When the device is in suspended, 1 is returned. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- Documentation/ABI/testing/debugfs-hisi-hpre | 7 +++++ Documentation/ABI/testing/debugfs-hisi-sec | 7 +++++ Documentation/ABI/testing/debugfs-hisi-zip | 7 +++++ drivers/crypto/hisilicon/debugfs.c | 29 +++++++++++++++++++++ 4 files changed, 50 insertions(+) diff --git a/Documentation/ABI/testing/debugfs-hisi-hpre b/Documentation/ABI/testing/debugfs-hisi-hpre index 8e8de49c5cc6..6ed9258605c7 100644 --- a/Documentation/ABI/testing/debugfs-hisi-hpre +++ b/Documentation/ABI/testing/debugfs-hisi-hpre @@ -111,6 +111,13 @@ Description: QM debug registers(regs) read hardware register value. This node is used to show the change of the qm register values. This node can be help users to check the change of register values. +What: /sys/kernel/debug/hisi_hpre//qm/qm_state +Date: Jan 2024 +Contact: linux-crypto@vger.kernel.org +Description: Dump the state of the device. + 0: busy, 1: idle. + Only available for PF, and take no other effect on HPRE. + What: /sys/kernel/debug/hisi_hpre//hpre_dfx/diff_regs Date: Mar 2022 Contact: linux-crypto@vger.kernel.org diff --git a/Documentation/ABI/testing/debugfs-hisi-sec b/Documentation/ABI/testing/debugfs-hisi-sec index deeefe2c735e..403f5de96318 100644 --- a/Documentation/ABI/testing/debugfs-hisi-sec +++ b/Documentation/ABI/testing/debugfs-hisi-sec @@ -91,6 +91,13 @@ Description: QM debug registers(regs) read hardware register value. This node is used to show the change of the qm register values. This node can be help users to check the change of register values. +What: /sys/kernel/debug/hisi_sec2//qm/qm_state +Date: Jan 2024 +Contact: linux-crypto@vger.kernel.org +Description: Dump the state of the device. + 0: busy, 1: idle. + Only available for PF, and take no other effect on SEC. + What: /sys/kernel/debug/hisi_sec2//sec_dfx/diff_regs Date: Mar 2022 Contact: linux-crypto@vger.kernel.org diff --git a/Documentation/ABI/testing/debugfs-hisi-zip b/Documentation/ABI/testing/debugfs-hisi-zip index 593714afaed2..2394e6a3cfe2 100644 --- a/Documentation/ABI/testing/debugfs-hisi-zip +++ b/Documentation/ABI/testing/debugfs-hisi-zip @@ -104,6 +104,13 @@ Description: QM debug registers(regs) read hardware register value. This node is used to show the change of the qm registers value. This node can be help users to check the change of register values. +What: /sys/kernel/debug/hisi_zip//qm/qm_state +Date: Jan 2024 +Contact: linux-crypto@vger.kernel.org +Description: Dump the state of the device. + 0: busy, 1: idle. + Only available for PF, and take no other effect on ZIP. + What: /sys/kernel/debug/hisi_zip//zip_dfx/diff_regs Date: Mar 2022 Contact: linux-crypto@vger.kernel.org diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 80ed4b2d209c..615c8e18d8b0 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -24,6 +24,8 @@ #define QM_DFX_QN_SHIFT 16 #define QM_DFX_CNT_CLR_CE 0x100118 #define QM_DBG_WRITE_LEN 1024 +#define QM_IN_IDLE_ST_REG 0x1040e4 +#define QM_IN_IDLE_STATE 0x1 static const char * const qm_debug_file_name[] = { [CURRENT_QM] = "current_qm", @@ -1001,6 +1003,30 @@ static int qm_diff_regs_show(struct seq_file *s, void *unused) } DEFINE_SHOW_ATTRIBUTE(qm_diff_regs); +static int qm_state_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + u32 val; + int ret; + + /* If device is in suspended, directly return the idle state. */ + ret = hisi_qm_get_dfx_access(qm); + if (!ret) { + val = readl(qm->io_base + QM_IN_IDLE_ST_REG); + hisi_qm_put_dfx_access(qm); + } else if (ret == -EAGAIN) { + val = QM_IN_IDLE_STATE; + } else { + return ret; + } + + seq_printf(s, "%u\n", val); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(qm_state); + static ssize_t qm_status_read(struct file *filp, char __user *buffer, size_t count, loff_t *pos) { @@ -1072,6 +1098,9 @@ void hisi_qm_debug_init(struct hisi_qm *qm) /* only show this in PF */ if (qm->fun_type == QM_HW_PF) { + debugfs_create_file("qm_state", 0444, qm->debug.qm_d, + qm, &qm_state_fops); + qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM); for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++) qm_create_debugfs_file(qm, qm->debug.qm_d, i); From 8db78dd6cca25f558676b31c31a84771bfa5b51d Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Fri, 12 Jan 2024 18:25:46 +0800 Subject: [PATCH 09/79] crypto: hisilicon/qm - dump important registers values before resetting Read the values of some device registers before the device is reset, these values help analyze the cause of the device exception. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/debugfs.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 615c8e18d8b0..06e67eda409f 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -83,6 +83,30 @@ static const struct debugfs_reg32 qm_dfx_regs[] = { {"QM_DFX_FF_ST5 ", 0x1040dc}, {"QM_DFX_FF_ST6 ", 0x1040e0}, {"QM_IN_IDLE_ST ", 0x1040e4}, + {"QM_CACHE_CTL ", 0x100050}, + {"QM_TIMEOUT_CFG ", 0x100070}, + {"QM_DB_TIMEOUT_CFG ", 0x100074}, + {"QM_FLR_PENDING_TIME_CFG ", 0x100078}, + {"QM_ARUSR_MCFG1 ", 0x100088}, + {"QM_AWUSR_MCFG1 ", 0x100098}, + {"QM_AXI_M_CFG_ENABLE ", 0x1000B0}, + {"QM_RAS_CE_THRESHOLD ", 0x1000F8}, + {"QM_AXI_TIMEOUT_CTRL ", 0x100120}, + {"QM_AXI_TIMEOUT_STATUS ", 0x100124}, + {"QM_CQE_AGGR_TIMEOUT_CTRL ", 0x100144}, + {"ACC_RAS_MSI_INT_SEL ", 0x1040fc}, + {"QM_CQE_OUT ", 0x104100}, + {"QM_EQE_OUT ", 0x104104}, + {"QM_AEQE_OUT ", 0x104108}, + {"QM_DB_INFO0 ", 0x104180}, + {"QM_DB_INFO1 ", 0x104184}, + {"QM_AM_CTRL_GLOBAL ", 0x300000}, + {"QM_AM_CURR_PORT_STS ", 0x300100}, + {"QM_AM_CURR_TRANS_RETURN ", 0x300150}, + {"QM_AM_CURR_RD_MAX_TXID ", 0x300154}, + {"QM_AM_CURR_WR_MAX_TXID ", 0x300158}, + {"QM_AM_ALARM_RRESP ", 0x300180}, + {"QM_AM_ALARM_BRESP ", 0x300184}, }; static const struct debugfs_reg32 qm_vf_dfx_regs[] = { From 1bfde2c572b93e28a8f10212367aa0d5a8164c6a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 16 Jan 2024 10:43:02 +0000 Subject: [PATCH 10/79] crypto: pcbc - remove redundant assignment to nbytes The assignment to nbytes is redundant, the while loop needs to just refer to the value in walk.nbytes and the value of nbytes is being re-assigned inside the loop on both paths of the following if-statement. Remove redundant assignment. Cleans up clang scan build warning: warning: Although the value stored to 'nbytes' is used in the enclosing expression, the value is never actually read from 'nbytes' [deadcode.DeadStores] Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- crypto/pcbc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/pcbc.c b/crypto/pcbc.c index 7030f59e46b6..ab469ba50c13 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -71,7 +71,7 @@ static int crypto_pcbc_encrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes)) { + while (walk.nbytes) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_pcbc_encrypt_inplace(req, &walk, cipher); @@ -138,7 +138,7 @@ static int crypto_pcbc_decrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes)) { + while (walk.nbytes) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_pcbc_decrypt_inplace(req, &walk, cipher); From f18483c987e16a96d9d04247da4050775d097132 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 18 Jan 2024 12:07:45 +0000 Subject: [PATCH 11/79] crypto: asymmetric_keys - remove redundant pointer secs The pointer secs is being assigned a value however secs is never read afterwards. The pointer secs is redundant and can be removed. Cleans up clang scan build warning: warning: Although the value stored to 'secs' is used in the enclosing expression, the value is never actually read from 'secs' [deadcode.DeadStores] Signed-off-by: Colin Ian King Reviewed-by: Jarkko Sakkinen Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/verify_pefile.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c index f440767bd727..2863984b6700 100644 --- a/crypto/asymmetric_keys/verify_pefile.c +++ b/crypto/asymmetric_keys/verify_pefile.c @@ -28,7 +28,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, const struct pe32plus_opt_hdr *pe64; const struct data_directory *ddir; const struct data_dirent *dde; - const struct section_header *secs, *sec; + const struct section_header *sec; size_t cursor, datalen = pelen; kenter(""); @@ -110,7 +110,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, ctx->n_sections = pe->sections; if (ctx->n_sections > (ctx->header_size - cursor) / sizeof(*sec)) return -ELIBBAD; - ctx->secs = secs = pebuf + cursor; + ctx->secs = pebuf + cursor; return 0; } From f722002441a1395d3e073f9fb5b6f1ab33ca9d66 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 18 Jan 2024 18:06:30 +0100 Subject: [PATCH 12/79] crypto: arm64/aes-ccm - Revert "Rewrite skcipher walker loop" This reverts commit 57ead1bf1c54, which updated the CCM code to only rely on walk.nbytes to check for failures returned from the skcipher walk API, mostly for the common good rather than to fix a particular problem in the code. This change introduces a problem of its own: the skcipher walk is started with the 'atomic' argument set to false, which means that the skcipher walk API is permitted to sleep. Subsequently, it invokes skcipher_walk_done() with preemption disabled on the final iteration of the loop. This appears to work by accident, but it is arguably a bad example, and providing a better example was the point of the original patch. Given that future changes to the CCM code will rely on the original behavior of entering the loop even for zero sized inputs, let's just revert this change entirely, and proceed from there. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/aes-ce-ccm-glue.c | 57 ++++++++++++++++------------- 1 file changed, 31 insertions(+), 26 deletions(-) diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 25cd3808ecbe..c4f14415f5f0 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -161,39 +161,43 @@ static int ccm_encrypt(struct aead_request *req) memcpy(buf, req->iv, AES_BLOCK_SIZE); err = skcipher_walk_aead_encrypt(&walk, req, false); + if (unlikely(err)) + return err; kernel_neon_begin(); if (req->assoclen) ccm_calculate_auth_mac(req, mac); - while (walk.nbytes) { + do { u32 tail = walk.nbytes % AES_BLOCK_SIZE; - bool final = walk.nbytes == walk.total; - if (final) + if (walk.nbytes == walk.total) tail = 0; ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes - tail, ctx->key_enc, num_rounds(ctx), mac, walk.iv); - if (!final) - kernel_neon_end(); - err = skcipher_walk_done(&walk, tail); - if (!final) - kernel_neon_begin(); - } + if (walk.nbytes == walk.total) + ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); - ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); + kernel_neon_end(); - kernel_neon_end(); + if (walk.nbytes) { + err = skcipher_walk_done(&walk, tail); + if (unlikely(err)) + return err; + if (unlikely(walk.nbytes)) + kernel_neon_begin(); + } + } while (walk.nbytes); /* copy authtag to end of dst */ scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen, crypto_aead_authsize(aead), 1); - return err; + return 0; } static int ccm_decrypt(struct aead_request *req) @@ -215,36 +219,37 @@ static int ccm_decrypt(struct aead_request *req) memcpy(buf, req->iv, AES_BLOCK_SIZE); err = skcipher_walk_aead_decrypt(&walk, req, false); + if (unlikely(err)) + return err; kernel_neon_begin(); if (req->assoclen) ccm_calculate_auth_mac(req, mac); - while (walk.nbytes) { + do { u32 tail = walk.nbytes % AES_BLOCK_SIZE; - bool final = walk.nbytes == walk.total; - if (final) + if (walk.nbytes == walk.total) tail = 0; ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes - tail, ctx->key_enc, num_rounds(ctx), mac, walk.iv); - if (!final) - kernel_neon_end(); - err = skcipher_walk_done(&walk, tail); - if (!final) - kernel_neon_begin(); - } + if (walk.nbytes == walk.total) + ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); - ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); + kernel_neon_end(); - kernel_neon_end(); - - if (unlikely(err)) - return err; + if (walk.nbytes) { + err = skcipher_walk_done(&walk, tail); + if (unlikely(err)) + return err; + if (unlikely(walk.nbytes)) + kernel_neon_begin(); + } + } while (walk.nbytes); /* compare calculated auth tag with the stored one */ scatterwalk_map_and_copy(buf, req->src, From 88c6d50f649b2805bbdfe0b616892f93db47e4fa Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 18 Jan 2024 18:06:31 +0100 Subject: [PATCH 13/79] crypto: arm64/aes-ccm - Keep NEON enabled during skcipher walk Now that kernel mode NEON no longer disables preemption, we no longer have to take care to disable and re-enable use of the NEON when calling into the skcipher walk API. So just keep it enabled until done. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/aes-ce-ccm-glue.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index c4f14415f5f0..b177ebea7d09 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -182,17 +182,16 @@ static int ccm_encrypt(struct aead_request *req) if (walk.nbytes == walk.total) ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); - kernel_neon_end(); - if (walk.nbytes) { err = skcipher_walk_done(&walk, tail); - if (unlikely(err)) - return err; - if (unlikely(walk.nbytes)) - kernel_neon_begin(); } } while (walk.nbytes); + kernel_neon_end(); + + if (unlikely(err)) + return err; + /* copy authtag to end of dst */ scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen, crypto_aead_authsize(aead), 1); @@ -240,17 +239,16 @@ static int ccm_decrypt(struct aead_request *req) if (walk.nbytes == walk.total) ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); - kernel_neon_end(); - if (walk.nbytes) { err = skcipher_walk_done(&walk, tail); - if (unlikely(err)) - return err; - if (unlikely(walk.nbytes)) - kernel_neon_begin(); } } while (walk.nbytes); + kernel_neon_end(); + + if (unlikely(err)) + return err; + /* compare calculated auth tag with the stored one */ scatterwalk_map_and_copy(buf, req->src, req->assoclen + req->cryptlen - authsize, From 97c4c10dafcd0d8c19bf01743df4b75dbdc484d7 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 18 Jan 2024 18:06:32 +0100 Subject: [PATCH 14/79] crypto: arm64/aes-ccm - Pass short inputs via stack buffer In preparation for optimizing the CCM core asm code using permutation vectors and overlapping loads and stores, ensure that inputs shorter than the size of a AES block are passed via a buffer on the stack, in a way that positions the data at the end of a 16 byte buffer. This removes the need for the asm code to reason about a rare corner case where the tail of the data cannot be read/written using a single NEON load/store instruction. While at it, tweak the copyright header and authorship to bring it up to date. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/aes-ce-ccm-glue.c | 57 ++++++++++++++++++++--------- 1 file changed, 40 insertions(+), 17 deletions(-) diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index b177ebea7d09..4710e59075f5 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -1,8 +1,11 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions + * aes-ce-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions * - * Copyright (C) 2013 - 2017 Linaro Ltd + * Copyright (C) 2013 - 2017 Linaro Ltd. + * Copyright (C) 2024 Google LLC + * + * Author: Ard Biesheuvel */ #include @@ -149,7 +152,7 @@ static int ccm_encrypt(struct aead_request *req) struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); struct skcipher_walk walk; u8 __aligned(8) mac[AES_BLOCK_SIZE]; - u8 buf[AES_BLOCK_SIZE]; + u8 orig_iv[AES_BLOCK_SIZE]; u32 len = req->cryptlen; int err; @@ -158,7 +161,7 @@ static int ccm_encrypt(struct aead_request *req) return err; /* preserve the original iv for the final round */ - memcpy(buf, req->iv, AES_BLOCK_SIZE); + memcpy(orig_iv, req->iv, AES_BLOCK_SIZE); err = skcipher_walk_aead_encrypt(&walk, req, false); if (unlikely(err)) @@ -171,16 +174,26 @@ static int ccm_encrypt(struct aead_request *req) do { u32 tail = walk.nbytes % AES_BLOCK_SIZE; + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + u8 buf[AES_BLOCK_SIZE]; if (walk.nbytes == walk.total) tail = 0; - ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes - tail, ctx->key_enc, - num_rounds(ctx), mac, walk.iv); + if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) + src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], + src, walk.nbytes); + + ce_aes_ccm_encrypt(dst, src, walk.nbytes - tail, + ctx->key_enc, num_rounds(ctx), + mac, walk.iv); + + if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) + memcpy(walk.dst.virt.addr, dst, walk.nbytes); if (walk.nbytes == walk.total) - ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); + ce_aes_ccm_final(mac, orig_iv, ctx->key_enc, num_rounds(ctx)); if (walk.nbytes) { err = skcipher_walk_done(&walk, tail); @@ -206,7 +219,7 @@ static int ccm_decrypt(struct aead_request *req) unsigned int authsize = crypto_aead_authsize(aead); struct skcipher_walk walk; u8 __aligned(8) mac[AES_BLOCK_SIZE]; - u8 buf[AES_BLOCK_SIZE]; + u8 orig_iv[AES_BLOCK_SIZE]; u32 len = req->cryptlen - authsize; int err; @@ -215,7 +228,7 @@ static int ccm_decrypt(struct aead_request *req) return err; /* preserve the original iv for the final round */ - memcpy(buf, req->iv, AES_BLOCK_SIZE); + memcpy(orig_iv, req->iv, AES_BLOCK_SIZE); err = skcipher_walk_aead_decrypt(&walk, req, false); if (unlikely(err)) @@ -228,16 +241,26 @@ static int ccm_decrypt(struct aead_request *req) do { u32 tail = walk.nbytes % AES_BLOCK_SIZE; + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + u8 buf[AES_BLOCK_SIZE]; if (walk.nbytes == walk.total) tail = 0; - ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes - tail, ctx->key_enc, - num_rounds(ctx), mac, walk.iv); + if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) + src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], + src, walk.nbytes); + + ce_aes_ccm_decrypt(dst, src, walk.nbytes - tail, + ctx->key_enc, num_rounds(ctx), + mac, walk.iv); + + if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) + memcpy(walk.dst.virt.addr, dst, walk.nbytes); if (walk.nbytes == walk.total) - ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); + ce_aes_ccm_final(mac, orig_iv, ctx->key_enc, num_rounds(ctx)); if (walk.nbytes) { err = skcipher_walk_done(&walk, tail); @@ -250,11 +273,11 @@ static int ccm_decrypt(struct aead_request *req) return err; /* compare calculated auth tag with the stored one */ - scatterwalk_map_and_copy(buf, req->src, + scatterwalk_map_and_copy(orig_iv, req->src, req->assoclen + req->cryptlen - authsize, authsize, 0); - if (crypto_memneq(mac, buf, authsize)) + if (crypto_memneq(mac, orig_iv, authsize)) return -EBADMSG; return 0; } @@ -293,6 +316,6 @@ module_init(aes_mod_init); module_exit(aes_mod_exit); MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions"); -MODULE_AUTHOR("Ard Biesheuvel "); +MODULE_AUTHOR("Ard Biesheuvel "); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("ccm(aes)"); From c131098d6d9c5d0a580456f527dedefaf61acb7b Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 18 Jan 2024 18:06:33 +0100 Subject: [PATCH 15/79] crypto: arm64/aes-ccm - Replace bytewise tail handling with NEON permute Implement the CCM tail handling using a single sequence that uses permute vectors and overlapping loads and stores, rather than going over the tail byte by byte in a loop, and using scalar operations. This is more efficient, even though the measured speedup is only around 1-2% on the CPUs I have tried. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/aes-ce-ccm-core.S | 59 +++++++++++++++++++---------- 1 file changed, 38 insertions(+), 21 deletions(-) diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index b03f7f71f893..b21a9b759ab2 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S @@ -1,8 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions + * aes-ce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions * - * Copyright (C) 2013 - 2017 Linaro Ltd + * Copyright (C) 2013 - 2017 Linaro Ltd. + * Copyright (C) 2024 Google LLC + * + * Author: Ard Biesheuvel */ #include @@ -168,13 +171,13 @@ CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ ld1 {v2.16b}, [x1], #16 /* load next input block */ .if \enc == 1 eor v2.16b, v2.16b, v5.16b /* final round enc+mac */ - eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */ + eor v6.16b, v1.16b, v2.16b /* xor with crypted ctr */ .else eor v2.16b, v2.16b, v1.16b /* xor with crypted ctr */ - eor v1.16b, v2.16b, v5.16b /* final round enc */ + eor v6.16b, v2.16b, v5.16b /* final round enc */ .endif eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */ - st1 {v1.16b}, [x0], #16 /* write output block */ + st1 {v6.16b}, [x0], #16 /* write output block */ bne 0b CPU_LE( rev x8, x8 ) st1 {v0.16b}, [x5] /* store mac */ @@ -183,25 +186,31 @@ CPU_LE( rev x8, x8 ) 6: eor v0.16b, v0.16b, v5.16b /* final round mac */ eor v1.16b, v1.16b, v5.16b /* final round enc */ - st1 {v0.16b}, [x5] /* store mac */ - add w2, w2, #16 /* process partial tail block */ -7: ldrb w9, [x1], #1 /* get 1 byte of input */ - umov w6, v1.b[0] /* get top crypted ctr byte */ - umov w7, v0.b[0] /* get top mac byte */ + + add x1, x1, w2, sxtw /* rewind the input pointer (w2 < 0) */ + add x0, x0, w2, sxtw /* rewind the output pointer */ + + adr_l x8, .Lpermute /* load permute vectors */ + add x9, x8, w2, sxtw + sub x8, x8, w2, sxtw + ld1 {v7.16b-v8.16b}, [x9] + ld1 {v9.16b}, [x8] + + ld1 {v2.16b}, [x1] /* load a full block of input */ + tbl v1.16b, {v1.16b}, v7.16b /* move keystream to end of register */ .if \enc == 1 - eor w7, w7, w9 - eor w9, w9, w6 + tbl v7.16b, {v2.16b}, v9.16b /* copy plaintext to start of v7 */ + eor v2.16b, v2.16b, v1.16b /* encrypt partial input block */ .else - eor w9, w9, w6 - eor w7, w7, w9 + eor v2.16b, v2.16b, v1.16b /* decrypt partial input block */ + tbl v7.16b, {v2.16b}, v9.16b /* copy plaintext to start of v7 */ .endif - strb w9, [x0], #1 /* store out byte */ - strb w7, [x5], #1 /* store mac byte */ - subs w2, w2, #1 - beq 5b - ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */ - ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */ - b 7b + eor v0.16b, v0.16b, v7.16b /* fold plaintext into mac */ + tbx v2.16b, {v6.16b}, v8.16b /* insert output from previous iteration */ + + st1 {v0.16b}, [x5] /* store mac */ + st1 {v2.16b}, [x0] /* store output block */ + ret .endm /* @@ -219,3 +228,11 @@ SYM_FUNC_END(ce_aes_ccm_encrypt) SYM_FUNC_START(ce_aes_ccm_decrypt) aes_ccm_do_crypt 0 SYM_FUNC_END(ce_aes_ccm_decrypt) + + .section ".rodata", "a" + .align 6 + .fill 15, 1, 0xff +.Lpermute: + .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 + .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf + .fill 15, 1, 0xff From 948ffc66e595e56c6ebf672db38d59c8a9efc108 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 18 Jan 2024 18:06:34 +0100 Subject: [PATCH 16/79] crypto: arm64/aes-ccm - Reuse existing MAC update for AAD input CCM combines the counter (CTR) encryption mode with a MAC based on the same block cipher. This MAC construction is a bit clunky: it invokes the block cipher in a way that cannot be parallelized, resulting in poor CPU pipeline efficiency. The arm64 CCM code mitigates this by interleaving the encryption and MAC at the AES round level, resulting in a substantial speedup. But this approach does not apply to the additional authenticated data (AAD) which is not encrypted. This means the special asm routine dealing with the AAD is not any better than the MAC update routine used by the arm64 AES block encryption driver, so let's reuse that, and drop the special AES-CCM version. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/Kconfig | 1 + arch/arm64/crypto/aes-ce-ccm-core.S | 71 ----------------------------- arch/arm64/crypto/aes-ce-ccm-glue.c | 49 ++++++++++++++++---- arch/arm64/crypto/aes-glue.c | 1 + 4 files changed, 43 insertions(+), 79 deletions(-) diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index eb7b423ba463..e7d9bd8e4709 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -268,6 +268,7 @@ config CRYPTO_AES_ARM64_CE_CCM depends on ARM64 && KERNEL_MODE_NEON select CRYPTO_ALGAPI select CRYPTO_AES_ARM64_CE + select CRYPTO_AES_ARM64_CE_BLK select CRYPTO_AEAD select CRYPTO_LIB_AES help diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index b21a9b759ab2..0132872bd780 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S @@ -14,77 +14,6 @@ .text .arch armv8-a+crypto - /* - * u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes, - * u32 macp, u8 const rk[], u32 rounds); - */ -SYM_FUNC_START(ce_aes_ccm_auth_data) - ld1 {v0.16b}, [x0] /* load mac */ - cbz w3, 1f - sub w3, w3, #16 - eor v1.16b, v1.16b, v1.16b -0: ldrb w7, [x1], #1 /* get 1 byte of input */ - subs w2, w2, #1 - add w3, w3, #1 - ins v1.b[0], w7 - ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */ - beq 8f /* out of input? */ - cbnz w3, 0b - eor v0.16b, v0.16b, v1.16b -1: ld1 {v3.4s}, [x4] /* load first round key */ - prfm pldl1strm, [x1] - cmp w5, #12 /* which key size? */ - add x6, x4, #16 - sub w7, w5, #2 /* modified # of rounds */ - bmi 2f - bne 5f - mov v5.16b, v3.16b - b 4f -2: mov v4.16b, v3.16b - ld1 {v5.4s}, [x6], #16 /* load 2nd round key */ -3: aese v0.16b, v4.16b - aesmc v0.16b, v0.16b -4: ld1 {v3.4s}, [x6], #16 /* load next round key */ - aese v0.16b, v5.16b - aesmc v0.16b, v0.16b -5: ld1 {v4.4s}, [x6], #16 /* load next round key */ - subs w7, w7, #3 - aese v0.16b, v3.16b - aesmc v0.16b, v0.16b - ld1 {v5.4s}, [x6], #16 /* load next round key */ - bpl 3b - aese v0.16b, v4.16b - subs w2, w2, #16 /* last data? */ - eor v0.16b, v0.16b, v5.16b /* final round */ - bmi 6f - ld1 {v1.16b}, [x1], #16 /* load next input block */ - eor v0.16b, v0.16b, v1.16b /* xor with mac */ - bne 1b -6: st1 {v0.16b}, [x0] /* store mac */ - beq 10f - adds w2, w2, #16 - beq 10f - mov w3, w2 -7: ldrb w7, [x1], #1 - umov w6, v0.b[0] - eor w6, w6, w7 - strb w6, [x0], #1 - subs w2, w2, #1 - beq 10f - ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */ - b 7b -8: cbz w3, 91f - mov w7, w3 - add w3, w3, #16 -9: ext v1.16b, v1.16b, v1.16b, #1 - adds w7, w7, #1 - bne 9b -91: eor v0.16b, v0.16b, v1.16b - st1 {v0.16b}, [x0] -10: mov w0, w3 - ret -SYM_FUNC_END(ce_aes_ccm_auth_data) - /* * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[], * u32 rounds); diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 4710e59075f5..ed3d79e05112 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -18,6 +18,8 @@ #include "aes-ce-setkey.h" +MODULE_IMPORT_NS(CRYPTO_INTERNAL); + static int num_rounds(struct crypto_aes_ctx *ctx) { /* @@ -30,8 +32,9 @@ static int num_rounds(struct crypto_aes_ctx *ctx) return 6 + ctx->key_length / 4; } -asmlinkage u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes, - u32 macp, u32 const rk[], u32 rounds); +asmlinkage u32 ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds, + int blocks, u8 dg[], int enc_before, + int enc_after); asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes, u32 const rk[], u32 rounds, u8 mac[], @@ -97,6 +100,41 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen) return 0; } +static u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes, + u32 macp, u32 const rk[], u32 rounds) +{ + int enc_after = (macp + abytes) % AES_BLOCK_SIZE; + + do { + u32 blocks = abytes / AES_BLOCK_SIZE; + + if (macp == AES_BLOCK_SIZE || (!macp && blocks > 0)) { + u32 rem = ce_aes_mac_update(in, rk, rounds, blocks, mac, + macp, enc_after); + u32 adv = (blocks - rem) * AES_BLOCK_SIZE; + + macp = enc_after ? 0 : AES_BLOCK_SIZE; + in += adv; + abytes -= adv; + + if (unlikely(rem)) { + kernel_neon_end(); + kernel_neon_begin(); + macp = 0; + } + } else { + u32 l = min(AES_BLOCK_SIZE - macp, abytes); + + crypto_xor(&mac[macp], in, l); + in += l; + macp += l; + abytes -= l; + } + } while (abytes > 0); + + return macp; +} + static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) { struct crypto_aead *aead = crypto_aead_reqtfm(req); @@ -104,7 +142,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) struct __packed { __be16 l; __be32 h; u16 len; } ltag; struct scatter_walk walk; u32 len = req->assoclen; - u32 macp = 0; + u32 macp = AES_BLOCK_SIZE; /* prepend the AAD with a length tag */ if (len < 0xff00) { @@ -128,16 +166,11 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) scatterwalk_start(&walk, sg_next(walk.sg)); n = scatterwalk_clamp(&walk, len); } - n = min_t(u32, n, SZ_4K); /* yield NEON at least every 4k */ p = scatterwalk_map(&walk); macp = ce_aes_ccm_auth_data(mac, p, n, macp, ctx->key_enc, num_rounds(ctx)); - if (len / SZ_4K > (len - n) / SZ_4K) { - kernel_neon_end(); - kernel_neon_begin(); - } len -= n; scatterwalk_unmap(p); diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 162787c7aa86..a147e847a5a1 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -1048,6 +1048,7 @@ unregister_ciphers: #ifdef USE_V8_CRYPTO_EXTENSIONS module_cpu_feature_match(AES, aes_init); +EXPORT_SYMBOL_NS(ce_aes_mac_update, CRYPTO_INTERNAL); #else module_init(aes_init); EXPORT_SYMBOL(neon_aes_ecb_encrypt); From 565def1542ab6cbf8a03acb07e612036aa5b5a6b Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 18 Jan 2024 18:06:35 +0100 Subject: [PATCH 17/79] crypto: arm64/aes-ccm - Cache round keys and unroll AES loops The CCM code as originally written attempted to use as few NEON registers as possible, to avoid having to eagerly preserve/restore the entire NEON register file at every call to kernel_neon_begin/end. At that time, this API took a number of NEON registers as a parameter, and only preserved that many registers. Today, the NEON register file is restored lazily, and the old API is long gone. This means we can use as many NEON registers as we can make meaningful use of, which means in the AES case that we can keep all round keys in registers rather than reloading each of them for each AES block processed. On Cortex-A53, this results in a speedup of more than 50%. (From 4 cycles per byte to 2.6 cycles per byte) Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/aes-ce-ccm-core.S | 95 ++++++++++++----------------- 1 file changed, 38 insertions(+), 57 deletions(-) diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index 0132872bd780..0ec59fc4ef3e 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S @@ -14,40 +14,46 @@ .text .arch armv8-a+crypto + .macro load_round_keys, rk, nr, tmp + sub w\tmp, \nr, #10 + add \tmp, \rk, w\tmp, sxtw #4 + ld1 {v10.4s-v13.4s}, [\rk] + ld1 {v14.4s-v17.4s}, [\tmp], #64 + ld1 {v18.4s-v21.4s}, [\tmp], #64 + ld1 {v3.4s-v5.4s}, [\tmp] + .endm + + .macro dround, va, vb, vk + aese \va\().16b, \vk\().16b + aesmc \va\().16b, \va\().16b + aese \vb\().16b, \vk\().16b + aesmc \vb\().16b, \vb\().16b + .endm + + .macro aes_encrypt, va, vb, nr + tbz \nr, #2, .L\@ + dround \va, \vb, v10 + dround \va, \vb, v11 + tbz \nr, #1, .L\@ + dround \va, \vb, v12 + dround \va, \vb, v13 +.L\@: .irp v, v14, v15, v16, v17, v18, v19, v20, v21, v3 + dround \va, \vb, \v + .endr + aese \va\().16b, v4.16b + aese \vb\().16b, v4.16b + .endm + /* * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[], * u32 rounds); */ SYM_FUNC_START(ce_aes_ccm_final) - ld1 {v3.4s}, [x2], #16 /* load first round key */ ld1 {v0.16b}, [x0] /* load mac */ - cmp w3, #12 /* which key size? */ - sub w3, w3, #2 /* modified # of rounds */ ld1 {v1.16b}, [x1] /* load 1st ctriv */ - bmi 0f - bne 3f - mov v5.16b, v3.16b - b 2f -0: mov v4.16b, v3.16b -1: ld1 {v5.4s}, [x2], #16 /* load next round key */ - aese v0.16b, v4.16b - aesmc v0.16b, v0.16b - aese v1.16b, v4.16b - aesmc v1.16b, v1.16b -2: ld1 {v3.4s}, [x2], #16 /* load next round key */ - aese v0.16b, v5.16b - aesmc v0.16b, v0.16b - aese v1.16b, v5.16b - aesmc v1.16b, v1.16b -3: ld1 {v4.4s}, [x2], #16 /* load next round key */ - subs w3, w3, #3 - aese v0.16b, v3.16b - aesmc v0.16b, v0.16b - aese v1.16b, v3.16b - aesmc v1.16b, v1.16b - bpl 1b - aese v0.16b, v4.16b - aese v1.16b, v4.16b + + aes_encrypt v0, v1, w3 + /* final round key cancels out */ eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */ st1 {v0.16b}, [x0] /* store result */ @@ -55,6 +61,8 @@ SYM_FUNC_START(ce_aes_ccm_final) SYM_FUNC_END(ce_aes_ccm_final) .macro aes_ccm_do_crypt,enc + load_round_keys x3, w4, x10 + cbz x2, 5f ldr x8, [x6, #8] /* load lower ctr */ ld1 {v0.16b}, [x5] /* load mac */ @@ -64,37 +72,10 @@ CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ prfm pldl1strm, [x1] add x8, x8, #1 rev x9, x8 - cmp w4, #12 /* which key size? */ - sub w7, w4, #2 /* get modified # of rounds */ ins v1.d[1], x9 /* no carry in lower ctr */ - ld1 {v3.4s}, [x3] /* load first round key */ - add x10, x3, #16 - bmi 1f - bne 4f - mov v5.16b, v3.16b - b 3f -1: mov v4.16b, v3.16b - ld1 {v5.4s}, [x10], #16 /* load 2nd round key */ -2: /* inner loop: 3 rounds, 2x interleaved */ - aese v0.16b, v4.16b - aesmc v0.16b, v0.16b - aese v1.16b, v4.16b - aesmc v1.16b, v1.16b -3: ld1 {v3.4s}, [x10], #16 /* load next round key */ - aese v0.16b, v5.16b - aesmc v0.16b, v0.16b - aese v1.16b, v5.16b - aesmc v1.16b, v1.16b -4: ld1 {v4.4s}, [x10], #16 /* load next round key */ - subs w7, w7, #3 - aese v0.16b, v3.16b - aesmc v0.16b, v0.16b - aese v1.16b, v3.16b - aesmc v1.16b, v1.16b - ld1 {v5.4s}, [x10], #16 /* load next round key */ - bpl 2b - aese v0.16b, v4.16b - aese v1.16b, v4.16b + + aes_encrypt v0, v1, w4 + subs w2, w2, #16 bmi 6f /* partial block? */ ld1 {v2.16b}, [x1], #16 /* load next input block */ From 715052884929cc29e2d09dbf1d530c6ada306ce8 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 18 Jan 2024 18:06:36 +0100 Subject: [PATCH 18/79] crypto: arm64/aes-ccm - Merge encrypt and decrypt tail handling The encryption and decryption code paths are mostly identical, except for a small difference where the plaintext input into the MAC is taken from either the input or the output block. We can factor this in quite easily using a vector bit select, and a few additional XORs, without the need for branches. This way, we can use the same tail handling logic on the encrypt and decrypt code paths, allowing further consolidation of the asm helpers in a subsequent patch. (In the main loop, adding just a handful of ALU instructions results in a noticeable performance hit [around 5% on Apple M2], so those routines are kept separate) Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/aes-ce-ccm-core.S | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index 0ec59fc4ef3e..bf3a888a5615 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S @@ -77,7 +77,7 @@ CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ aes_encrypt v0, v1, w4 subs w2, w2, #16 - bmi 6f /* partial block? */ + bmi ce_aes_ccm_crypt_tail ld1 {v2.16b}, [x1], #16 /* load next input block */ .if \enc == 1 eor v2.16b, v2.16b, v5.16b /* final round enc+mac */ @@ -93,8 +93,10 @@ CPU_LE( rev x8, x8 ) st1 {v0.16b}, [x5] /* store mac */ str x8, [x6, #8] /* store lsb end of ctr (BE) */ 5: ret + .endm -6: eor v0.16b, v0.16b, v5.16b /* final round mac */ +SYM_FUNC_START_LOCAL(ce_aes_ccm_crypt_tail) + eor v0.16b, v0.16b, v5.16b /* final round mac */ eor v1.16b, v1.16b, v5.16b /* final round enc */ add x1, x1, w2, sxtw /* rewind the input pointer (w2 < 0) */ @@ -108,20 +110,16 @@ CPU_LE( rev x8, x8 ) ld1 {v2.16b}, [x1] /* load a full block of input */ tbl v1.16b, {v1.16b}, v7.16b /* move keystream to end of register */ - .if \enc == 1 - tbl v7.16b, {v2.16b}, v9.16b /* copy plaintext to start of v7 */ - eor v2.16b, v2.16b, v1.16b /* encrypt partial input block */ - .else - eor v2.16b, v2.16b, v1.16b /* decrypt partial input block */ - tbl v7.16b, {v2.16b}, v9.16b /* copy plaintext to start of v7 */ - .endif - eor v0.16b, v0.16b, v7.16b /* fold plaintext into mac */ - tbx v2.16b, {v6.16b}, v8.16b /* insert output from previous iteration */ + eor v7.16b, v2.16b, v1.16b /* encrypt partial input block */ + bif v2.16b, v7.16b, v22.16b /* select plaintext */ + tbx v7.16b, {v6.16b}, v8.16b /* insert output from previous iteration */ + tbl v2.16b, {v2.16b}, v9.16b /* copy plaintext to start of v2 */ + eor v0.16b, v0.16b, v2.16b /* fold plaintext into mac */ st1 {v0.16b}, [x5] /* store mac */ - st1 {v2.16b}, [x0] /* store output block */ + st1 {v7.16b}, [x0] /* store output block */ ret - .endm +SYM_FUNC_END(ce_aes_ccm_crypt_tail) /* * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes, @@ -132,10 +130,12 @@ CPU_LE( rev x8, x8 ) * u8 ctr[]); */ SYM_FUNC_START(ce_aes_ccm_encrypt) + movi v22.16b, #255 aes_ccm_do_crypt 1 SYM_FUNC_END(ce_aes_ccm_encrypt) SYM_FUNC_START(ce_aes_ccm_decrypt) + movi v22.16b, #0 aes_ccm_do_crypt 0 SYM_FUNC_END(ce_aes_ccm_decrypt) From f691d444f9a9094a5a89bb618495f2bfa9309f32 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 18 Jan 2024 18:06:37 +0100 Subject: [PATCH 19/79] crypto: arm64/aes-ccm - Merge finalization into en/decrypt asm helpers The C glue code already infers whether or not the current iteration is the final one, by comparing walk.nbytes with walk.total. This means we can easily inform the asm helpers of this as well, by conditionally passing a pointer to the original IV, which is used in the finalization of the MAC. This removes the need for a separate call into the asm code to perform the finalization. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/aes-ce-ccm-core.S | 40 ++++++++++++----------------- arch/arm64/crypto/aes-ce-ccm-glue.c | 27 +++++++++---------- 2 files changed, 29 insertions(+), 38 deletions(-) diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index bf3a888a5615..f2624238fd95 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S @@ -44,28 +44,12 @@ aese \vb\().16b, v4.16b .endm - /* - * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[], - * u32 rounds); - */ -SYM_FUNC_START(ce_aes_ccm_final) - ld1 {v0.16b}, [x0] /* load mac */ - ld1 {v1.16b}, [x1] /* load 1st ctriv */ - - aes_encrypt v0, v1, w3 - - /* final round key cancels out */ - eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */ - st1 {v0.16b}, [x0] /* store result */ - ret -SYM_FUNC_END(ce_aes_ccm_final) - .macro aes_ccm_do_crypt,enc load_round_keys x3, w4, x10 - cbz x2, 5f - ldr x8, [x6, #8] /* load lower ctr */ ld1 {v0.16b}, [x5] /* load mac */ + cbz x2, ce_aes_ccm_final + ldr x8, [x6, #8] /* load lower ctr */ CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ 0: /* outer loop */ ld1 {v1.8b}, [x6] /* load upper ctr */ @@ -90,9 +74,10 @@ CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ st1 {v6.16b}, [x0], #16 /* write output block */ bne 0b CPU_LE( rev x8, x8 ) - st1 {v0.16b}, [x5] /* store mac */ str x8, [x6, #8] /* store lsb end of ctr (BE) */ -5: ret + cbnz x7, ce_aes_ccm_final + st1 {v0.16b}, [x5] /* store mac */ + ret .endm SYM_FUNC_START_LOCAL(ce_aes_ccm_crypt_tail) @@ -116,18 +101,27 @@ SYM_FUNC_START_LOCAL(ce_aes_ccm_crypt_tail) tbl v2.16b, {v2.16b}, v9.16b /* copy plaintext to start of v2 */ eor v0.16b, v0.16b, v2.16b /* fold plaintext into mac */ - st1 {v0.16b}, [x5] /* store mac */ st1 {v7.16b}, [x0] /* store output block */ + cbz x7, 0f + +SYM_INNER_LABEL(ce_aes_ccm_final, SYM_L_LOCAL) + ld1 {v1.16b}, [x7] /* load 1st ctriv */ + + aes_encrypt v0, v1, w4 + + /* final round key cancels out */ + eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */ +0: st1 {v0.16b}, [x5] /* store result */ ret SYM_FUNC_END(ce_aes_ccm_crypt_tail) /* * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes, * u8 const rk[], u32 rounds, u8 mac[], - * u8 ctr[]); + * u8 ctr[], u8 const final_iv[]); * void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes, * u8 const rk[], u32 rounds, u8 mac[], - * u8 ctr[]); + * u8 ctr[], u8 const final_iv[]); */ SYM_FUNC_START(ce_aes_ccm_encrypt) movi v22.16b, #255 diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index ed3d79e05112..ce9b28e3c7d6 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -38,14 +38,11 @@ asmlinkage u32 ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds, asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes, u32 const rk[], u32 rounds, u8 mac[], - u8 ctr[]); + u8 ctr[], u8 const final_iv[]); asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes, u32 const rk[], u32 rounds, u8 mac[], - u8 ctr[]); - -asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[], - u32 rounds); + u8 ctr[], u8 const final_iv[]); static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) @@ -210,9 +207,12 @@ static int ccm_encrypt(struct aead_request *req) const u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; u8 buf[AES_BLOCK_SIZE]; + u8 *final_iv = NULL; - if (walk.nbytes == walk.total) + if (walk.nbytes == walk.total) { tail = 0; + final_iv = orig_iv; + } if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], @@ -220,14 +220,11 @@ static int ccm_encrypt(struct aead_request *req) ce_aes_ccm_encrypt(dst, src, walk.nbytes - tail, ctx->key_enc, num_rounds(ctx), - mac, walk.iv); + mac, walk.iv, final_iv); if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) memcpy(walk.dst.virt.addr, dst, walk.nbytes); - if (walk.nbytes == walk.total) - ce_aes_ccm_final(mac, orig_iv, ctx->key_enc, num_rounds(ctx)); - if (walk.nbytes) { err = skcipher_walk_done(&walk, tail); } @@ -277,9 +274,12 @@ static int ccm_decrypt(struct aead_request *req) const u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; u8 buf[AES_BLOCK_SIZE]; + u8 *final_iv = NULL; - if (walk.nbytes == walk.total) + if (walk.nbytes == walk.total) { tail = 0; + final_iv = orig_iv; + } if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], @@ -287,14 +287,11 @@ static int ccm_decrypt(struct aead_request *req) ce_aes_ccm_decrypt(dst, src, walk.nbytes - tail, ctx->key_enc, num_rounds(ctx), - mac, walk.iv); + mac, walk.iv, final_iv); if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) memcpy(walk.dst.virt.addr, dst, walk.nbytes); - if (walk.nbytes == walk.total) - ce_aes_ccm_final(mac, orig_iv, ctx->key_enc, num_rounds(ctx)); - if (walk.nbytes) { err = skcipher_walk_done(&walk, tail); } From 61909cc831750a81c5063ad4bee2ef29f039c987 Mon Sep 17 00:00:00 2001 From: Wenkai Lin Date: Fri, 19 Jan 2024 16:11:07 +0800 Subject: [PATCH 20/79] crypto: hisilicon - Fix smp_processor_id() warnings Switch to raw_smp_processor_id() to prevent a number of warnings from kernel debugging. We do not care about preemption here, as the CPU number is only used as a poor mans load balancing or device selection. If preemption happens during an encrypt/decrypt operation a small performance hit will occur but everything will continue to work, so just ignore it. This commit is similar to e7a9b05ca4 ("crypto: cavium - Fix smp_processor_id() warnings"). [ 7538.874350] BUG: using smp_processor_id() in preemptible [00000000] code: af_alg06/8438 [ 7538.874368] caller is debug_smp_processor_id+0x1c/0x28 [ 7538.874373] CPU: 50 PID: 8438 Comm: af_alg06 Kdump: loaded Not tainted 5.10.0.pc+ #18 [ 7538.874377] Call trace: [ 7538.874387] dump_backtrace+0x0/0x210 [ 7538.874389] show_stack+0x2c/0x38 [ 7538.874392] dump_stack+0x110/0x164 [ 7538.874394] check_preemption_disabled+0xf4/0x108 [ 7538.874396] debug_smp_processor_id+0x1c/0x28 [ 7538.874406] sec_create_qps+0x24/0xe8 [hisi_sec2] [ 7538.874408] sec_ctx_base_init+0x20/0x4d8 [hisi_sec2] [ 7538.874411] sec_aead_ctx_init+0x68/0x180 [hisi_sec2] [ 7538.874413] sec_aead_sha256_ctx_init+0x28/0x38 [hisi_sec2] [ 7538.874421] crypto_aead_init_tfm+0x54/0x68 [ 7538.874423] crypto_create_tfm_node+0x6c/0x110 [ 7538.874424] crypto_alloc_tfm_node+0x74/0x288 [ 7538.874426] crypto_alloc_aead+0x40/0x50 [ 7538.874431] aead_bind+0x50/0xd0 [ 7538.874433] alg_bind+0x94/0x148 [ 7538.874439] __sys_bind+0x98/0x118 [ 7538.874441] __arm64_sys_bind+0x28/0x38 [ 7538.874445] do_el0_svc+0x88/0x258 [ 7538.874447] el0_svc+0x1c/0x28 [ 7538.874449] el0_sync_handler+0x8c/0xb8 [ 7538.874452] el0_sync+0x148/0x180 Signed-off-by: Wenkai Lin Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 2 +- drivers/crypto/hisilicon/sec2/sec_main.c | 2 +- drivers/crypto/hisilicon/zip/zip_main.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 3255b2a070c7..d93aa6630a57 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -440,7 +440,7 @@ MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); struct hisi_qp *hpre_create_qp(u8 type) { - int node = cpu_to_node(smp_processor_id()); + int node = cpu_to_node(raw_smp_processor_id()); struct hisi_qp *qp = NULL; int ret; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 7bb99381bbdf..efa957ece23d 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -374,7 +374,7 @@ void sec_destroy_qps(struct hisi_qp **qps, int qp_num) struct hisi_qp **sec_create_qps(void) { - int node = cpu_to_node(smp_processor_id()); + int node = cpu_to_node(raw_smp_processor_id()); u32 ctx_num = ctx_q_num; struct hisi_qp **qps; int ret; diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 479ba8a1d6b5..c065fd867161 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -454,7 +454,7 @@ MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); int zip_create_qps(struct hisi_qp **qps, int qp_num, int node) { if (node == NUMA_NO_NODE) - node = cpu_to_node(smp_processor_id()); + node = cpu_to_node(raw_smp_processor_id()); return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps); } From 1dc0c1389d6afe4ef3e1d4054fb837b023ebf472 Mon Sep 17 00:00:00 2001 From: Erick Archer Date: Sun, 21 Jan 2024 16:34:07 +0100 Subject: [PATCH 21/79] crypto: sun8i-ce - Use kcalloc() instead of kzalloc() As noted in the "Deprecated Interfaces, Language Features, Attributes, and Conventions" documentation [1], size calculations (especially multiplication) should not be performed in memory allocator (or similar) function arguments due to the risk of them overflowing. This could lead to values wrapping around and a smaller allocation being made than the caller was expecting. Using those allocations could lead to linear overflows of heap memory and other misbehaviors. So, use the purpose specific kcalloc() function instead of the argument size * count in the kzalloc() function. Link: https://www.kernel.org/doc/html/next/process/deprecated.html#open-coded-arithmetic-in-allocator-arguments [1] Link: https://github.com/KSPP/linux/issues/162 Signed-off-by: Erick Archer Reviewed-by: Gustavo A. R. Silva Acked-by: Jernej Skrabec Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index d358334e5981..ee2a28c906ed 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -362,7 +362,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) digestsize = SHA512_DIGEST_SIZE; /* the padding could be up to two block. */ - buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA); + buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA); if (!buf) { err = -ENOMEM; goto theend; From 4da3bc65d218605557696109e42cfeee666d601f Mon Sep 17 00:00:00 2001 From: Erick Archer Date: Sun, 21 Jan 2024 17:40:43 +0100 Subject: [PATCH 22/79] crypto: qat - use kcalloc_node() instead of kzalloc_node() As noted in the "Deprecated Interfaces, Language Features, Attributes, and Conventions" documentation [1], size calculations (especially multiplication) should not be performed in memory allocator (or similar) function arguments due to the risk of them overflowing. This could lead to values wrapping around and a smaller allocation being made than the caller was expecting. Using those allocations could lead to linear overflows of heap memory and other misbehaviors. So, use the purpose specific kcalloc_node() function instead of the argument count * size in the kzalloc_node() function. Link: https://www.kernel.org/doc/html/next/process/deprecated.html#open-coded-arithmetic-in-allocator-arguments [1] Link: https://github.com/KSPP/linux/issues/162 Signed-off-by: Erick Archer Reviewed-by: Gustavo A. R. Silva Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_isr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index 3557a0d6dea2..a13d9885d60f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -272,7 +272,7 @@ static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev) if (!accel_dev->pf.vf_info) msix_num_entries += hw_data->num_banks; - irqs = kzalloc_node(msix_num_entries * sizeof(*irqs), + irqs = kcalloc_node(msix_num_entries, sizeof(*irqs), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!irqs) return -ENOMEM; From 8203695ca50b421fe2650d2e9506533b59e13d4c Mon Sep 17 00:00:00 2001 From: Joachim Vandersmissen Date: Sun, 21 Jan 2024 13:45:26 -0600 Subject: [PATCH 23/79] crypto: testmgr - remove unused xts4096 and xts512 algorithms from testmgr.c Commit a93492cae30a ("crypto: ccree - remove data unit size support") removed support for the xts512 and xts4096 algorithms, but left them defined in testmgr.c. This patch removes those definitions. Signed-off-by: Joachim Vandersmissen Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/testmgr.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index c26aeda85787..3dddd288ca02 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5720,14 +5720,6 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { #endif - .alg = "xts4096(paes)", - .test = alg_test_null, - .fips_allowed = 1, - }, { - .alg = "xts512(paes)", - .test = alg_test_null, - .fips_allowed = 1, - }, { .alg = "xxhash64", .test = alg_test_hash, .fips_allowed = 1, From 4d314d27130b674a3687135fe94f44a40f107f76 Mon Sep 17 00:00:00 2001 From: David Wronek Date: Sun, 21 Jan 2024 17:57:41 +0100 Subject: [PATCH 24/79] dt-bindings: crypto: ice: Document SC7180 inline crypto engine Document the compatible used for the inline crypto engine found on SC7180. Acked-by: Rob Herring Signed-off-by: David Wronek Signed-off-by: Herbert Xu --- .../devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml index 09e43157cc71..e91bc7dc6ad3 100644 --- a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml +++ b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml @@ -14,6 +14,7 @@ properties: items: - enum: - qcom,sa8775p-inline-crypto-engine + - qcom,sc7180-inline-crypto-engine - qcom,sm8450-inline-crypto-engine - qcom,sm8550-inline-crypto-engine - qcom,sm8650-inline-crypto-engine From 68baa4289b8554998771799ed1f9695721e41a22 Mon Sep 17 00:00:00 2001 From: Qi Tao Date: Fri, 26 Jan 2024 17:38:25 +0800 Subject: [PATCH 25/79] crypto: hisilicon/sec2 - updates the sec DFX function register As the sec DFX function is enhanced, some RAS registers are added to the original DFX registers to enhance the DFX positioning function. Signed-off-by: Qi Tao Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_main.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index efa957ece23d..c290d8937b19 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -282,6 +282,11 @@ static const struct debugfs_reg32 sec_dfx_regs[] = { {"SEC_BD_SAA6 ", 0x301C38}, {"SEC_BD_SAA7 ", 0x301C3C}, {"SEC_BD_SAA8 ", 0x301C40}, + {"SEC_RAS_CE_ENABLE ", 0x301050}, + {"SEC_RAS_FE_ENABLE ", 0x301054}, + {"SEC_RAS_NFE_ENABLE ", 0x301058}, + {"SEC_REQ_TRNG_TIME_TH ", 0x30112C}, + {"SEC_CHANNEL_RNG_REQ_THLD ", 0x302110}, }; /* define the SEC's dfx regs region and region length */ From c4af422545474a0dbae1b17d306ec94e32ee0551 Mon Sep 17 00:00:00 2001 From: Qi Tao Date: Fri, 26 Jan 2024 17:38:26 +0800 Subject: [PATCH 26/79] crypto: hisilicon/sec2 - modify nested macro call Nested macros are integrated into a single macro, making the code simpler. Signed-off-by: Qi Tao Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index f028dcfd0ead..692ba3213cc6 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -2145,8 +2145,8 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req) return sec_skcipher_crypto(sk_req, false); } -#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \ - sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\ +#define SEC_SKCIPHER_ALG(sec_cra_name, sec_set_key, \ + sec_min_key_size, sec_max_key_size, blk_size, iv_size)\ {\ .base = {\ .cra_name = sec_cra_name,\ @@ -2158,8 +2158,8 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req) .cra_ctxsize = sizeof(struct sec_ctx),\ .cra_module = THIS_MODULE,\ },\ - .init = ctx_init,\ - .exit = ctx_exit,\ + .init = sec_skcipher_ctx_init,\ + .exit = sec_skcipher_ctx_exit,\ .setkey = sec_set_key,\ .decrypt = sec_skcipher_decrypt,\ .encrypt = sec_skcipher_encrypt,\ @@ -2168,11 +2168,6 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req) .ivsize = iv_size,\ } -#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ - max_key_size, blk_size, iv_size) \ - SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ - sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) - static struct sec_skcipher sec_skciphers[] = { { .alg_msk = BIT(0), From dd1a502cabcaa410c68ee9f74eae50d85e89fee3 Mon Sep 17 00:00:00 2001 From: Qi Tao Date: Fri, 26 Jan 2024 17:38:27 +0800 Subject: [PATCH 27/79] crypto: hisilicon/sec2 - fix some cleanup issues This patch fixes following cleanup issues: - The return value of the function is inconsistent with the actual return type. - After the pointer type is directly converted to the `__le64` type, the program may crash or produce unexpected results. Signed-off-by: Qi Tao Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 692ba3213cc6..f7e2ba0bf160 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -118,7 +118,7 @@ struct sec_aead { }; /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ -static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) +static inline u32 sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) { if (req->c_req.encrypt) return (u32)atomic_inc_return(&ctx->enc_qcyclic) % @@ -1371,7 +1371,7 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) sec_sqe3->bd_param = cpu_to_le32(bd_param); sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len); - sec_sqe3->tag = cpu_to_le64(req); + sec_sqe3->tag = cpu_to_le64((unsigned long)req); return 0; } From 0c753f33428d824f476c1e76fac2f06b8dd0b3d5 Mon Sep 17 00:00:00 2001 From: Wenkai Lin Date: Fri, 26 Jan 2024 17:38:28 +0800 Subject: [PATCH 28/79] crypto: hisilicon/sec - remove unused parameter Unused parameter of static functions should be removed. Signed-off-by: Wenkai Lin Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index f7e2ba0bf160..93a972fcbf63 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -485,8 +485,7 @@ static void sec_alg_resource_free(struct sec_ctx *ctx, sec_free_mac_resource(dev, qp_ctx->res); } -static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx, - struct sec_qp_ctx *qp_ctx) +static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { u16 q_depth = qp_ctx->qp->sq_depth; struct device *dev = ctx->dev; @@ -541,8 +540,7 @@ static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ kfree(qp_ctx->req_list); } -static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, - int qp_ctx_id, int alg_type) +static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id) { struct sec_qp_ctx *qp_ctx; struct hisi_qp *qp; @@ -561,7 +559,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, idr_init(&qp_ctx->req_idr); INIT_LIST_HEAD(&qp_ctx->backlog); - ret = sec_alloc_qp_ctx_resource(qm, ctx, qp_ctx); + ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx); if (ret) goto err_destroy_idr; @@ -614,7 +612,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) } for (i = 0; i < sec->ctx_q_num; i++) { - ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0); + ret = sec_create_qp_ctx(ctx, i); if (ret) goto err_sec_release_qp_ctx; } @@ -750,9 +748,7 @@ static void sec_skcipher_uninit(struct crypto_skcipher *tfm) sec_ctx_base_uninit(ctx); } -static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, - const u32 keylen, - const enum sec_cmode c_mode) +static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen) { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; @@ -843,7 +839,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, switch (c_alg) { case SEC_CALG_3DES: - ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode); + ret = sec_skcipher_3des_setkey(tfm, key, keylen); break; case SEC_CALG_AES: case SEC_CALG_SM4: From 9a14b311f2f786a7ac68f445bc550459b36d3190 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 26 Jan 2024 23:49:27 -0800 Subject: [PATCH 29/79] crypto: ahash - unexport crypto_hash_alg_has_setkey() Since crypto_hash_alg_has_setkey() is only called from ahash.c itself, make it a static function. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 21 ++++++++++----------- include/crypto/internal/hash.h | 2 -- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 80c3e5354711..0ac83f7f701d 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -618,6 +618,16 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_ahash); +static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) +{ + struct crypto_alg *alg = &halg->base; + + if (alg->cra_type == &crypto_shash_type) + return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); + + return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey; +} + struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) { struct hash_alg_common *halg = crypto_hash_alg_common(hash); @@ -760,16 +770,5 @@ int ahash_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(ahash_register_instance); -bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) -{ - struct crypto_alg *alg = &halg->base; - - if (alg->cra_type == &crypto_shash_type) - return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); - - return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey; -} -EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); - MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 59c707e4dea4..58967593b6b4 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -87,8 +87,6 @@ static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg) !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); } -bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); - int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); From a853450bf4c752e664abab0b2fad395b7ad7701c Mon Sep 17 00:00:00 2001 From: Quanyang Wang Date: Sun, 28 Jan 2024 12:29:06 +0800 Subject: [PATCH 30/79] crypto: xilinx - call finalize with bh disabled When calling crypto_finalize_request, BH should be disabled to avoid triggering the following calltrace: ------------[ cut here ]------------ WARNING: CPU: 2 PID: 74 at crypto/crypto_engine.c:58 crypto_finalize_request+0xa0/0x118 Modules linked in: cryptodev(O) CPU: 2 PID: 74 Comm: firmware:zynqmp Tainted: G O 6.8.0-rc1-yocto-standard #323 Hardware name: ZynqMP ZCU102 Rev1.0 (DT) pstate: 40000005 (nZcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : crypto_finalize_request+0xa0/0x118 lr : crypto_finalize_request+0x104/0x118 sp : ffffffc085353ce0 x29: ffffffc085353ce0 x28: 0000000000000000 x27: ffffff8808ea8688 x26: ffffffc081715038 x25: 0000000000000000 x24: ffffff880100db00 x23: ffffff880100da80 x22: 0000000000000000 x21: 0000000000000000 x20: ffffff8805b14000 x19: ffffff880100da80 x18: 0000000000010450 x17: 0000000000000000 x16: 0000000000000000 x15: 0000000000000000 x14: 0000000000000003 x13: 0000000000000000 x12: ffffff880100dad0 x11: 0000000000000000 x10: ffffffc0832dcd08 x9 : ffffffc0812416d8 x8 : 00000000000001f4 x7 : ffffffc0830d2830 x6 : 0000000000000001 x5 : ffffffc082091000 x4 : ffffffc082091658 x3 : 0000000000000000 x2 : ffffffc7f9653000 x1 : 0000000000000000 x0 : ffffff8802d20000 Call trace: crypto_finalize_request+0xa0/0x118 crypto_finalize_aead_request+0x18/0x30 zynqmp_handle_aes_req+0xcc/0x388 crypto_pump_work+0x168/0x2d8 kthread_worker_fn+0xfc/0x3a0 kthread+0x118/0x138 ret_from_fork+0x10/0x20 irq event stamp: 40 hardirqs last enabled at (39): [] _raw_spin_unlock_irqrestore+0x70/0xb0 hardirqs last disabled at (40): [] el1_dbg+0x28/0x90 softirqs last enabled at (36): [] kernel_neon_begin+0x8c/0xf0 softirqs last disabled at (34): [] kernel_neon_begin+0x60/0xf0 ---[ end trace 0000000000000000 ]--- Fixes: 4d96f7d48131 ("crypto: xilinx - Add Xilinx AES driver") Signed-off-by: Quanyang Wang Signed-off-by: Herbert Xu --- drivers/crypto/xilinx/zynqmp-aes-gcm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c index 3c205324b22b..e61405718840 100644 --- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c +++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c @@ -231,7 +231,10 @@ static int zynqmp_handle_aes_req(struct crypto_engine *engine, err = zynqmp_aes_aead_cipher(areq); } + local_bh_disable(); crypto_finalize_aead_request(engine, areq, err); + local_bh_enable(); + return 0; } From 633eeefab69ed4bdfa3f1d375c609e87feb1c55a Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Thu, 1 Feb 2024 14:17:16 +0800 Subject: [PATCH 31/79] crypto: virtio - remove duplicate check if queue is broken virtqueue_enable_cb() will call virtqueue_poll() which will check if queue is broken at beginning, so remove the virtqueue_is_broken() call Reviewed-by: Stefan Hajnoczi Signed-off-by: Li RongQing Signed-off-by: Herbert Xu --- drivers/crypto/virtio/virtio_crypto_core.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index b909c6a2bf1c..6a67d70e7f1c 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -42,8 +42,6 @@ static void virtcrypto_ctrlq_callback(struct virtqueue *vq) virtio_crypto_ctrlq_callback(vc_ctrl_req); spin_lock_irqsave(&vcrypto->ctrl_lock, flags); } - if (unlikely(virtqueue_is_broken(vq))) - break; } while (!virtqueue_enable_cb(vq)); spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags); } From e2b67859ab6efd4458bda1baaee20331a367d995 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 2 Feb 2024 18:53:16 +0800 Subject: [PATCH 32/79] crypto: qat - add heartbeat error simulator Add a mechanism that allows to inject a heartbeat error for testing purposes. A new attribute `inject_error` is added to debugfs for each QAT device. Upon a write on this attribute, the driver will inject an error on the device which can then be detected by the heartbeat feature. Errors are breaking the device functionality thus they require a device reset in order to be recovered. This functionality is not compiled by default, to enable it CRYPTO_DEV_QAT_ERROR_INJECTION must be set. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Lucas Segarra Fernandez Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu --- Documentation/ABI/testing/debugfs-driver-qat | 26 +++++++ drivers/crypto/intel/qat/Kconfig | 14 ++++ drivers/crypto/intel/qat/qat_common/Makefile | 2 + .../intel/qat/qat_common/adf_common_drv.h | 1 + .../intel/qat/qat_common/adf_heartbeat.c | 6 -- .../intel/qat/qat_common/adf_heartbeat.h | 18 +++++ .../qat/qat_common/adf_heartbeat_dbgfs.c | 52 +++++++++++++ .../qat/qat_common/adf_heartbeat_inject.c | 76 +++++++++++++++++++ .../intel/qat/qat_common/adf_hw_arbiter.c | 25 ++++++ 9 files changed, 214 insertions(+), 6 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat index b2db010d851e..bd6793760f29 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat +++ b/Documentation/ABI/testing/debugfs-driver-qat @@ -81,3 +81,29 @@ Description: (RO) Read returns, for each Acceleration Engine (AE), the number : Number of Compress and Verify (CnV) errors and type of the last CnV error detected by Acceleration Engine N. + +What: /sys/kernel/debug/qat__/heartbeat/inject_error +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (WO) Write to inject an error that simulates an heartbeat + failure. This is to be used for testing purposes. + + After writing this file, the driver stops arbitration on a + random engine and disables the fetching of heartbeat counters. + If a workload is running on the device, a job submitted to the + accelerator might not get a response and a read of the + `heartbeat/status` attribute might report -1, i.e. device + unresponsive. + The error is unrecoverable thus the device must be restarted to + restore its functionality. + + This attribute is available only when the kernel is built with + CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION=y. + + A write of 1 enables error injection. + + The following example shows how to enable error injection:: + + # cd /sys/kernel/debug/qat__ + # echo 1 > heartbeat/inject_error diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig index c120f6715a09..02fb8abe4e6e 100644 --- a/drivers/crypto/intel/qat/Kconfig +++ b/drivers/crypto/intel/qat/Kconfig @@ -106,3 +106,17 @@ config CRYPTO_DEV_QAT_C62XVF To compile this as a module, choose M here: the module will be called qat_c62xvf. + +config CRYPTO_DEV_QAT_ERROR_INJECTION + bool "Support for Intel(R) QAT Devices Heartbeat Error Injection" + depends on CRYPTO_DEV_QAT + depends on DEBUG_FS + help + Enables a mechanism that allows to inject a heartbeat error on + Intel(R) QuickAssist devices for testing purposes. + + This is intended for developer use only. + If unsure, say N. + + This functionality is available via debugfs entry of the Intel(R) + QuickAssist device diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 6908727bff3b..5915cde8a7aa 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -53,3 +53,5 @@ intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \ adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \ adf_gen2_pfvf.o adf_gen4_pfvf.o + +intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index f06188033a93..0baae42deb3a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -90,6 +90,7 @@ void adf_exit_aer(void); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); +int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr); int adf_dev_get(struct adf_accel_dev *accel_dev); void adf_dev_put(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index 13f48d2f6da8..f88b1bc6857e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -23,12 +23,6 @@ #define ADF_HB_EMPTY_SIG 0xA5A5A5A5 -/* Heartbeat counter pair */ -struct hb_cnt_pair { - __u16 resp_heartbeat_cnt; - __u16 req_heartbeat_cnt; -}; - static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev) { u64 curr_time = adf_clock_get_current_time(); diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h index b22e3cb29798..24c3f4f24c86 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h @@ -19,6 +19,12 @@ enum adf_device_heartbeat_status { HB_DEV_UNSUPPORTED, }; +/* Heartbeat counter pair */ +struct hb_cnt_pair { + __u16 resp_heartbeat_cnt; + __u16 req_heartbeat_cnt; +}; + struct adf_heartbeat { unsigned int hb_sent_counter; unsigned int hb_failed_counter; @@ -35,6 +41,9 @@ struct adf_heartbeat { struct dentry *cfg; struct dentry *sent; struct dentry *failed; +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + struct dentry *inject_error; +#endif } dbgfs; }; @@ -51,6 +60,15 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev, enum adf_device_heartbeat_status *hb_status); void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev); +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION +int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev); +#else +static inline int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev) +{ + return -EPERM; +} +#endif + #else static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c index 2661af6a2ef6..5cd6c2d6f90a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c @@ -155,6 +155,43 @@ static const struct file_operations adf_hb_cfg_fops = { .write = adf_hb_cfg_write, }; +static ssize_t adf_hb_error_inject_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct adf_accel_dev *accel_dev = file->private_data; + size_t written_chars; + char buf[3]; + int ret; + + /* last byte left as string termination */ + if (count != 2) + return -EINVAL; + + written_chars = simple_write_to_buffer(buf, sizeof(buf) - 1, + ppos, user_buf, count); + if (buf[0] != '1') + return -EINVAL; + + ret = adf_heartbeat_inject_error(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Heartbeat error injection failed with status %d\n", + ret); + return ret; + } + + dev_info(&GET_DEV(accel_dev), "Heartbeat error injection enabled\n"); + + return written_chars; +} + +static const struct file_operations adf_hb_error_inject_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = adf_hb_error_inject_write, +}; + void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev) { struct adf_heartbeat *hb = accel_dev->heartbeat; @@ -171,6 +208,17 @@ void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev) &hb->hb_failed_counter, &adf_hb_stats_fops); hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir, accel_dev, &adf_hb_cfg_fops); + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION)) { + struct dentry *inject_error __maybe_unused; + + inject_error = debugfs_create_file("inject_error", 0200, + hb->dbgfs.base_dir, accel_dev, + &adf_hb_error_inject_fops); +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + hb->dbgfs.inject_error = inject_error; +#endif + } } EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add); @@ -189,6 +237,10 @@ void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev) hb->dbgfs.failed = NULL; debugfs_remove(hb->dbgfs.cfg); hb->dbgfs.cfg = NULL; +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + debugfs_remove(hb->dbgfs.inject_error); + hb->dbgfs.inject_error = NULL; +#endif debugfs_remove(hb->dbgfs.base_dir); hb->dbgfs.base_dir = NULL; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c new file mode 100644 index 000000000000..a3b474bdef6c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include + +#include "adf_admin.h" +#include "adf_common_drv.h" +#include "adf_heartbeat.h" + +#define MAX_HB_TICKS 0xFFFFFFFF + +static int adf_hb_set_timer_to_max(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + + accel_dev->heartbeat->hb_timer = 0; + + if (hw_data->stop_timer) + hw_data->stop_timer(accel_dev); + + return adf_send_admin_hb_timer(accel_dev, MAX_HB_TICKS); +} + +static void adf_set_hb_counters_fail(struct adf_accel_dev *accel_dev, u32 ae, + u32 thr) +{ + struct hb_cnt_pair *stats = accel_dev->heartbeat->dma.virt_addr; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + const size_t max_aes = hw_device->get_num_aes(hw_device); + const size_t hb_ctrs = hw_device->num_hb_ctrs; + size_t thr_id = ae * hb_ctrs + thr; + u16 num_rsp = stats[thr_id].resp_heartbeat_cnt; + + /* + * Inject live.req != live.rsp and live.rsp == last.rsp + * to trigger the heartbeat error detection + */ + stats[thr_id].req_heartbeat_cnt++; + stats += (max_aes * hb_ctrs); + stats[thr_id].resp_heartbeat_cnt = num_rsp; +} + +int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + const size_t max_aes = hw_device->get_num_aes(hw_device); + const size_t hb_ctrs = hw_device->num_hb_ctrs; + u32 rand, rand_ae, rand_thr; + unsigned long ae_mask; + int ret; + + ae_mask = hw_device->ae_mask; + + do { + /* Ensure we have a valid ae */ + get_random_bytes(&rand, sizeof(rand)); + rand_ae = rand % max_aes; + } while (!test_bit(rand_ae, &ae_mask)); + + get_random_bytes(&rand, sizeof(rand)); + rand_thr = rand % hb_ctrs; + + /* Increase the heartbeat timer to prevent FW updating HB counters */ + ret = adf_hb_set_timer_to_max(accel_dev); + if (ret) + return ret; + + /* Configure worker threads to stop processing any packet */ + ret = adf_disable_arb_thd(accel_dev, rand_ae, rand_thr); + if (ret) + return ret; + + /* Change HB counters memory to simulate a hang */ + adf_set_hb_counters_fail(accel_dev, rand_ae, rand_thr); + + return 0; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c index da6956699246..65bd26b25abc 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c +++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c @@ -103,3 +103,28 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev) csr_ops->write_csr_ring_srv_arb_en(csr, i, 0); } EXPORT_SYMBOL_GPL(adf_exit_arb); + +int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr) +{ + void __iomem *csr = accel_dev->transport->banks[0].csr_addr; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + const u32 *thd_2_arb_cfg; + struct arb_info info; + u32 ae_thr_map; + + if (ADF_AE_STRAND0_THREAD == thr || ADF_AE_STRAND1_THREAD == thr) + thr = ADF_AE_ADMIN_THREAD; + + hw_data->get_arb_info(&info); + thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev); + if (!thd_2_arb_cfg) + return -EFAULT; + + /* Disable scheduling for this particular AE and thread */ + ae_thr_map = *(thd_2_arb_cfg + ae); + ae_thr_map &= ~(GENMASK(3, 0) << (thr * BIT(2))); + + WRITE_CSR_ARB_WT2SAM(csr, info.arb_offset, info.wt2sam_offset, ae, + ae_thr_map); + return 0; +} From ae508d7afb753f7576c435226e32b9535b7f8b10 Mon Sep 17 00:00:00 2001 From: Furong Zhou Date: Fri, 2 Feb 2024 18:53:17 +0800 Subject: [PATCH 33/79] crypto: qat - add fatal error notify method Add error notify method to report a fatal error event to all the subsystems registered. In addition expose an API, adf_notify_fatal_error(), that allows to trigger a fatal error notification asynchronously in the context of a workqueue. This will be invoked when a fatal error is detected by the ISR or through Heartbeat. Signed-off-by: Furong Zhou Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 30 +++++++++++++++++++ .../intel/qat/qat_common/adf_common_drv.h | 3 ++ .../crypto/intel/qat/qat_common/adf_init.c | 12 ++++++++ 3 files changed, 45 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index a39e70bd4b21..22a43b4b8315 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -8,6 +8,11 @@ #include "adf_accel_devices.h" #include "adf_common_drv.h" +struct adf_fatal_error_data { + struct adf_accel_dev *accel_dev; + struct work_struct work; +}; + static struct workqueue_struct *device_reset_wq; static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, @@ -171,6 +176,31 @@ const struct pci_error_handlers adf_err_handler = { }; EXPORT_SYMBOL_GPL(adf_err_handler); +static void adf_notify_fatal_error_worker(struct work_struct *work) +{ + struct adf_fatal_error_data *wq_data = + container_of(work, struct adf_fatal_error_data, work); + struct adf_accel_dev *accel_dev = wq_data->accel_dev; + + adf_error_notifier(accel_dev); + kfree(wq_data); +} + +int adf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ + struct adf_fatal_error_data *wq_data; + + wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC); + if (!wq_data) + return -ENOMEM; + + wq_data->accel_dev = accel_dev; + INIT_WORK(&wq_data->work, adf_notify_fatal_error_worker); + adf_misc_wq_queue_work(&wq_data->work); + + return 0; +} + int adf_init_aer(void) { device_reset_wq = alloc_workqueue("qat_device_reset_wq", diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 0baae42deb3a..8c062d5a8db2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -40,6 +40,7 @@ enum adf_event { ADF_EVENT_SHUTDOWN, ADF_EVENT_RESTARTING, ADF_EVENT_RESTARTED, + ADF_EVENT_FATAL_ERROR, }; struct service_hndl { @@ -60,6 +61,8 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev); void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); void adf_clean_vf_map(bool); +int adf_notify_fatal_error(struct adf_accel_dev *accel_dev); +void adf_error_notifier(struct adf_accel_dev *accel_dev); int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf); void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index f43ae9111553..74f0818c0703 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -433,6 +433,18 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) return 0; } +void adf_error_notifier(struct adf_accel_dev *accel_dev) +{ + struct service_hndl *service; + + list_for_each_entry(service, &service_table, list) { + if (service->event_hld(accel_dev, ADF_EVENT_FATAL_ERROR)) + dev_err(&GET_DEV(accel_dev), + "Failed to send error event to %s.\n", + service->name); + } +} + static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev) { char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; From 758a0087db98fa23a3597289dbf3643ba9db2700 Mon Sep 17 00:00:00 2001 From: Furong Zhou Date: Fri, 2 Feb 2024 18:53:18 +0800 Subject: [PATCH 34/79] crypto: qat - disable arbitration before reset Disable arbitration to avoid new requests to be processed before resetting a device. This is needed so that new requests are not fetched when an error is detected. Signed-off-by: Furong Zhou Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 22a43b4b8315..acbbd32bd815 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -181,8 +181,16 @@ static void adf_notify_fatal_error_worker(struct work_struct *work) struct adf_fatal_error_data *wq_data = container_of(work, struct adf_fatal_error_data, work); struct adf_accel_dev *accel_dev = wq_data->accel_dev; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; adf_error_notifier(accel_dev); + + if (!accel_dev->is_vf) { + /* Disable arbitration to stop processing of new requests */ + if (hw_device->exit_arb) + hw_device->exit_arb(accel_dev); + } + kfree(wq_data); } From ec26f8e6c784ae391e69b19f4738d7196ed7794d Mon Sep 17 00:00:00 2001 From: Mun Chun Yep Date: Fri, 2 Feb 2024 18:53:19 +0800 Subject: [PATCH 35/79] crypto: qat - update PFVF protocol for recovery Update the PFVF logic to handle restart and recovery. This adds the following functions: * adf_pf2vf_notify_fatal_error(): allows the PF to notify VFs that the device detected a fatal error and requires a reset. This sends to VF the event `ADF_PF2VF_MSGTYPE_FATAL_ERROR`. * adf_pf2vf_wait_for_restarting_complete(): allows the PF to wait for `ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE` events from active VFs before proceeding with a reset. * adf_pf2vf_notify_restarted(): enables the PF to notify VFs with an `ADF_PF2VF_MSGTYPE_RESTARTED` event after recovery, indicating that the device is back to normal. This prompts VF drivers switch back to use the accelerator for workload processing. These changes improve the communication and synchronization between PF and VF drivers during system restart and recovery processes. Signed-off-by: Mun Chun Yep Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- .../intel/qat/qat_common/adf_accel_devices.h | 1 + drivers/crypto/intel/qat/qat_common/adf_aer.c | 3 + .../intel/qat/qat_common/adf_pfvf_msg.h | 7 +- .../intel/qat/qat_common/adf_pfvf_pf_msg.c | 64 ++++++++++++++++++- .../intel/qat/qat_common/adf_pfvf_pf_msg.h | 21 ++++++ .../intel/qat/qat_common/adf_pfvf_pf_proto.c | 8 +++ .../intel/qat/qat_common/adf_pfvf_vf_proto.c | 6 ++ .../crypto/intel/qat/qat_common/adf_sriov.c | 1 + 8 files changed, 109 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index a16c7e6edc65..4a3c36aaa7ca 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -332,6 +332,7 @@ struct adf_accel_vf_info { struct ratelimit_state vf2pf_ratelimit; u32 vf_nr; bool init; + bool restarting; u8 vf_compat_ver; }; diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index acbbd32bd815..ecb114e1b59f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -7,6 +7,7 @@ #include #include "adf_accel_devices.h" #include "adf_common_drv.h" +#include "adf_pfvf_pf_msg.h" struct adf_fatal_error_data { struct adf_accel_dev *accel_dev; @@ -189,6 +190,8 @@ static void adf_notify_fatal_error_worker(struct work_struct *work) /* Disable arbitration to stop processing of new requests */ if (hw_device->exit_arb) hw_device->exit_arb(accel_dev); + if (accel_dev->pf.vf_info) + adf_pf2vf_notify_fatal_error(accel_dev); } kfree(wq_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h index 204a42438992..d1b3ef9cadac 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h @@ -99,6 +99,8 @@ enum pf2vf_msgtype { ADF_PF2VF_MSGTYPE_RESTARTING = 0x01, ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02, ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03, + ADF_PF2VF_MSGTYPE_FATAL_ERROR = 0x04, + ADF_PF2VF_MSGTYPE_RESTARTED = 0x05, /* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */ ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10, }; @@ -112,6 +114,7 @@ enum vf2pf_msgtype { ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07, ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08, ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09, + ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE = 0x0a, /* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */ ADF_VF2PF_MSGTYPE_RP_RESET = 0x10, }; @@ -124,8 +127,10 @@ enum pfvf_compatibility_version { ADF_PFVF_COMPAT_FAST_ACK = 0x03, /* Ring to service mapping support for non-standard mappings */ ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04, + /* Fallback compat */ + ADF_PFVF_COMPAT_FALLBACK = 0x05, /* Reference to the latest version */ - ADF_PFVF_COMPAT_THIS_VERSION = 0x04, + ADF_PFVF_COMPAT_THIS_VERSION = 0x05, }; /* PF->VF Version Response */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c index 14c069f0d71a..0e31f4b41844 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c @@ -1,21 +1,83 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2015 - 2021 Intel Corporation */ +#include #include #include "adf_accel_devices.h" #include "adf_pfvf_msg.h" #include "adf_pfvf_pf_msg.h" #include "adf_pfvf_pf_proto.h" +#define ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY 100 +#define ADF_VF_SHUTDOWN_RETRY 100 + void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) { struct adf_accel_vf_info *vf; struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING }; int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n"); for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { - if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg)) + vf->restarting = false; + if (!vf->init) + continue; + if (adf_send_pf2vf_msg(accel_dev, i, msg)) dev_err(&GET_DEV(accel_dev), "Failed to send restarting msg to VF%d\n", i); + else if (vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK) + vf->restarting = true; + } +} + +void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev) +{ + int num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + int i, retries = ADF_VF_SHUTDOWN_RETRY; + struct adf_accel_vf_info *vf; + bool vf_running; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf wait for restarting complete\n"); + do { + vf_running = false; + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) + if (vf->restarting) + vf_running = true; + if (!vf_running) + break; + msleep(ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY); + } while (--retries); + + if (vf_running) + dev_warn(&GET_DEV(accel_dev), "Some VFs are still running\n"); +} + +void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTED }; + int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + struct adf_accel_vf_info *vf; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarted\n"); + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { + if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK && + adf_send_pf2vf_msg(accel_dev, i, msg)) + dev_err(&GET_DEV(accel_dev), + "Failed to send restarted msg to VF%d\n", i); + } +} + +void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_FATAL_ERROR }; + int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + struct adf_accel_vf_info *vf; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify fatal error\n"); + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { + if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK && + adf_send_pf2vf_msg(accel_dev, i, msg)) + dev_err(&GET_DEV(accel_dev), + "Failed to send fatal error msg to VF%d\n", i); } } diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h index e8982d1ac896..f203d88c919c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h @@ -5,7 +5,28 @@ #include "adf_accel_devices.h" +#if defined(CONFIG_PCI_IOV) void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); +void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev); +void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev); +void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev); +#else +static inline void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ +} +#endif typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev, u8 *buffer, u8 compat); diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c index 388e58bcbcaf..9ab93fbfefde 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c @@ -291,6 +291,14 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, vf_info->init = false; } break; + case ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE: + { + dev_dbg(&GET_DEV(accel_dev), + "Restarting Complete received from VF%d\n", vf_nr); + vf_info->restarting = false; + vf_info->init = false; + } + break; case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ: case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ: case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ: diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c index 1015155b6374..dc284a089c88 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c @@ -308,6 +308,12 @@ static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev, adf_pf2vf_handle_pf_restarting(accel_dev); return false; + case ADF_PF2VF_MSGTYPE_RESTARTED: + dev_dbg(&GET_DEV(accel_dev), "Restarted message received from PF\n"); + return true; + case ADF_PF2VF_MSGTYPE_FATAL_ERROR: + dev_err(&GET_DEV(accel_dev), "Fatal error received from PF\n"); + return true; case ADF_PF2VF_MSGTYPE_VERSION_RESP: case ADF_PF2VF_MSGTYPE_BLKMSG_RESP: case ADF_PF2VF_MSGTYPE_RP_RESET_RESP: diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index f44025bb6f99..cb2a9830f192 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -103,6 +103,7 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) return; adf_pf2vf_notify_restarting(accel_dev); + adf_pf2vf_wait_for_restarting_complete(accel_dev); pci_disable_sriov(accel_to_pci_dev(accel_dev)); /* Disable VF to PF interrupts */ From 4469f9b2346834085fe4478ee1a851ee1de8ccb2 Mon Sep 17 00:00:00 2001 From: Mun Chun Yep Date: Fri, 2 Feb 2024 18:53:20 +0800 Subject: [PATCH 36/79] crypto: qat - re-enable sriov after pf reset When a Physical Function (PF) is reset, SR-IOV gets disabled, making the associated Virtual Functions (VFs) unavailable. Even after reset and using pci_restore_state, VFs remain uncreated because the numvfs still at 0. Therefore, it's necessary to reconfigure SR-IOV to re-enable VFs. This commit introduces the ADF_SRIOV_ENABLED configuration flag to cache the SR-IOV enablement state. SR-IOV is only re-enabled if it was previously configured. This commit also introduces a dedicated workqueue without `WQ_MEM_RECLAIM` flag for enabling SR-IOV during Heartbeat and CPM error resets, preventing workqueue flushing warning. This patch is based on earlier work done by Shashank Gupta. Signed-off-by: Mun Chun Yep Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 40 ++++++++++++++++++- .../intel/qat/qat_common/adf_cfg_strings.h | 1 + .../intel/qat/qat_common/adf_common_drv.h | 5 +++ .../crypto/intel/qat/qat_common/adf_sriov.c | 37 +++++++++++++++-- 4 files changed, 79 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index ecb114e1b59f..cd273b31db0e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -15,6 +15,7 @@ struct adf_fatal_error_data { }; static struct workqueue_struct *device_reset_wq; +static struct workqueue_struct *device_sriov_wq; static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, pci_channel_state_t state) @@ -43,6 +44,13 @@ struct adf_reset_dev_data { struct work_struct reset_work; }; +/* sriov dev data */ +struct adf_sriov_dev_data { + struct adf_accel_dev *accel_dev; + struct completion compl; + struct work_struct sriov_work; +}; + void adf_reset_sbr(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); @@ -88,11 +96,22 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev) } } +static void adf_device_sriov_worker(struct work_struct *work) +{ + struct adf_sriov_dev_data *sriov_data = + container_of(work, struct adf_sriov_dev_data, sriov_work); + + adf_reenable_sriov(sriov_data->accel_dev); + complete(&sriov_data->compl); +} + static void adf_device_reset_worker(struct work_struct *work) { struct adf_reset_dev_data *reset_data = container_of(work, struct adf_reset_dev_data, reset_work); struct adf_accel_dev *accel_dev = reset_data->accel_dev; + unsigned long wait_jiffies = msecs_to_jiffies(10000); + struct adf_sriov_dev_data sriov_data; adf_dev_restarting_notify(accel_dev); if (adf_dev_restart(accel_dev)) { @@ -103,6 +122,14 @@ static void adf_device_reset_worker(struct work_struct *work) WARN(1, "QAT: device restart failed. Device is unusable\n"); return; } + + sriov_data.accel_dev = accel_dev; + init_completion(&sriov_data.compl); + INIT_WORK(&sriov_data.sriov_work, adf_device_sriov_worker); + queue_work(device_sriov_wq, &sriov_data.sriov_work); + if (wait_for_completion_timeout(&sriov_data.compl, wait_jiffies)) + adf_pf2vf_notify_restarted(accel_dev); + adf_dev_restarted_notify(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); @@ -216,7 +243,14 @@ int adf_init_aer(void) { device_reset_wq = alloc_workqueue("qat_device_reset_wq", WQ_MEM_RECLAIM, 0); - return !device_reset_wq ? -EFAULT : 0; + if (!device_reset_wq) + return -EFAULT; + + device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0); + if (!device_sriov_wq) + return -EFAULT; + + return 0; } void adf_exit_aer(void) @@ -224,4 +258,8 @@ void adf_exit_aer(void) if (device_reset_wq) destroy_workqueue(device_reset_wq); device_reset_wq = NULL; + + if (device_sriov_wq) + destroy_workqueue(device_sriov_wq); + device_sriov_wq = NULL; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h index 322b76903a73..e015ad6cace2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h @@ -49,5 +49,6 @@ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY #define ADF_ACCEL_STR "Accelerator%d" #define ADF_HEARTBEAT_TIMER "HeartbeatTimer" +#define ADF_SRIOV_ENABLED "SriovEnabled" #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 8c062d5a8db2..10891c9da6e7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -192,6 +192,7 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work, #if defined(CONFIG_PCI_IOV) int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); +void adf_reenable_sriov(struct adf_accel_dev *accel_dev); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask); void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev); bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev); @@ -212,6 +213,10 @@ static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) { } +static inline void adf_reenable_sriov(struct adf_accel_dev *accel_dev) +{ +} + static inline int adf_init_pf_wq(void) { return 0; diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index cb2a9830f192..87a70c00c41e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -60,7 +60,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) /* This ptr will be populated when VFs will be created */ vf_info->accel_dev = accel_dev; vf_info->vf_nr = i; - vf_info->vf_compat_ver = 0; mutex_init(&vf_info->pf2vf_lock); ratelimit_state_init(&vf_info->vf2pf_ratelimit, @@ -84,6 +83,32 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) return pci_enable_sriov(pdev, totalvfs); } +void adf_reenable_sriov(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + unsigned long val = 0; + + if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SRIOV_ENABLED, cfg)) + return; + + if (!accel_dev->pf.vf_info) + return; + + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC)) + return; + + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC)) + return; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + dev_dbg(&pdev->dev, "Re-enabling SRIOV\n"); + adf_enable_sriov(accel_dev); +} + /** * adf_disable_sriov() - Disable SRIOV for the device * @accel_dev: Pointer to accel device. @@ -116,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) mutex_destroy(&vf->pf2vf_lock); - kfree(accel_dev->pf.vf_info); - accel_dev->pf.vf_info = NULL; + if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { + kfree(accel_dev->pf.vf_info); + accel_dev->pf.vf_info = NULL; + } } EXPORT_SYMBOL_GPL(adf_disable_sriov); @@ -195,6 +222,10 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) if (ret) return ret; + val = 1; + adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED, + &val, ADF_DEC); + return numvfs; } EXPORT_SYMBOL_GPL(adf_sriov_configure); From 2aaa1995a94a3187e52ddb9f127fa1307ee8ad00 Mon Sep 17 00:00:00 2001 From: Mun Chun Yep Date: Fri, 2 Feb 2024 18:53:21 +0800 Subject: [PATCH 37/79] crypto: qat - add fatal error notification Notify a fatal error condition and optionally reset the device in the following cases: * if the device reports an uncorrectable fatal error through an interrupt * if the heartbeat feature detects that the device is not responding This patch is based on earlier work done by Shashank Gupta. Signed-off-by: Mun Chun Yep Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_heartbeat.c | 3 +++ drivers/crypto/intel/qat/qat_common/adf_isr.c | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index f88b1bc6857e..fe8428d4ff39 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -229,6 +229,9 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev, "Heartbeat ERROR: QAT is not responding.\n"); *hb_status = HB_DEV_UNRESPONSIVE; hb->hb_failed_counter++; + if (adf_notify_fatal_error(accel_dev)) + dev_err(&GET_DEV(accel_dev), + "Failed to notify fatal error\n"); return; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index a13d9885d60f..020d213f4c99 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -139,8 +139,13 @@ static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev) if (ras_ops->handle_interrupt && ras_ops->handle_interrupt(accel_dev, &reset_required)) { - if (reset_required) + if (reset_required) { dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n"); + if (adf_notify_fatal_error(accel_dev)) + dev_err(&GET_DEV(accel_dev), + "Failed to notify fatal error\n"); + } + return true; } From f5419a4239af8b3951f990c83d0d8c865a485475 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 2 Feb 2024 18:53:22 +0800 Subject: [PATCH 38/79] crypto: qat - add auto reset on error Expose the `auto_reset` sysfs attribute to configure the driver to reset the device when a fatal error is detected. When auto reset is enabled, the driver resets the device when it detects either an heartbeat failure or a fatal error through an interrupt. This patch is based on earlier work done by Shashank Gupta. Signed-off-by: Damian Muszynski Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu --- Documentation/ABI/testing/sysfs-driver-qat | 20 ++++++++++ .../intel/qat/qat_common/adf_accel_devices.h | 1 + drivers/crypto/intel/qat/qat_common/adf_aer.c | 11 +++++- .../intel/qat/qat_common/adf_common_drv.h | 1 + .../crypto/intel/qat/qat_common/adf_sysfs.c | 37 +++++++++++++++++++ 5 files changed, 69 insertions(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index bbf329cf0d67..6778f1fea874 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -141,3 +141,23 @@ Description: 64 This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/auto_reset +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RW) Reports the current state of the autoreset feature + for a QAT device + + Write to the attribute to enable or disable device auto reset. + + Device auto reset is disabled by default. + + The values are:: + + * 1/Yy/on: auto reset enabled. If the device encounters an + unrecoverable error, it will be reset automatically. + * 0/Nn/off: auto reset disabled. If the device encounters an + unrecoverable error, it will not be reset. + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 4a3c36aaa7ca..0f26aa976c8c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -402,6 +402,7 @@ struct adf_accel_dev { struct adf_error_counters ras_errors; struct mutex state_lock; /* protect state of the device */ bool is_vf; + bool autoreset_on_error; u32 accel_id; }; #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index cd273b31db0e..b3d4b6b99c65 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -204,6 +204,14 @@ const struct pci_error_handlers adf_err_handler = { }; EXPORT_SYMBOL_GPL(adf_err_handler); +int adf_dev_autoreset(struct adf_accel_dev *accel_dev) +{ + if (accel_dev->autoreset_on_error) + return adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_ASYNC); + + return 0; +} + static void adf_notify_fatal_error_worker(struct work_struct *work) { struct adf_fatal_error_data *wq_data = @@ -215,10 +223,11 @@ static void adf_notify_fatal_error_worker(struct work_struct *work) if (!accel_dev->is_vf) { /* Disable arbitration to stop processing of new requests */ - if (hw_device->exit_arb) + if (accel_dev->autoreset_on_error && hw_device->exit_arb) hw_device->exit_arb(accel_dev); if (accel_dev->pf.vf_info) adf_pf2vf_notify_fatal_error(accel_dev); + adf_dev_autoreset(accel_dev); } kfree(wq_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 10891c9da6e7..57328249c89e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -87,6 +87,7 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev); extern const struct pci_error_handlers adf_err_handler; void adf_reset_sbr(struct adf_accel_dev *accel_dev); void adf_reset_flr(struct adf_accel_dev *accel_dev); +int adf_dev_autoreset(struct adf_accel_dev *accel_dev); void adf_dev_restore(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index d450dad32c9e..4e7f70d4049d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -204,6 +204,42 @@ static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute } static DEVICE_ATTR_RW(pm_idle_enabled); +static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + char *auto_reset; + struct adf_accel_dev *accel_dev; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + auto_reset = accel_dev->autoreset_on_error ? "on" : "off"; + + return sysfs_emit(buf, "%s\n", auto_reset); +} + +static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + bool enabled = false; + int ret; + + ret = kstrtobool(buf, &enabled); + if (ret) + return ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + accel_dev->autoreset_on_error = enabled; + + return count; +} +static DEVICE_ATTR_RW(auto_reset); + static DEVICE_ATTR_RW(state); static DEVICE_ATTR_RW(cfg_services); @@ -291,6 +327,7 @@ static struct attribute *qat_attrs[] = { &dev_attr_pm_idle_enabled.attr, &dev_attr_rp2srv.attr, &dev_attr_num_rps.attr, + &dev_attr_auto_reset.attr, NULL, }; From 750fa7c20e60926431ec50d63899771ffcd9fd5c Mon Sep 17 00:00:00 2001 From: Furong Zhou Date: Fri, 2 Feb 2024 18:53:23 +0800 Subject: [PATCH 39/79] crypto: qat - limit heartbeat notifications When the driver detects an heartbeat failure, it starts the recovery flow. Set a limit so that the number of events is limited in case the heartbeat status is read too frequently. Signed-off-by: Furong Zhou Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu --- .../crypto/intel/qat/qat_common/adf_heartbeat.c | 17 ++++++++++++++--- .../crypto/intel/qat/qat_common/adf_heartbeat.h | 3 +++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index fe8428d4ff39..b19aa1ef8eee 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -205,6 +205,19 @@ static int adf_hb_get_status(struct adf_accel_dev *accel_dev) return ret; } +static void adf_heartbeat_reset(struct adf_accel_dev *accel_dev) +{ + u64 curr_time = adf_clock_get_current_time(); + u64 time_since_reset = curr_time - accel_dev->heartbeat->last_hb_reset_time; + + if (time_since_reset < ADF_CFG_HB_RESET_MS) + return; + + accel_dev->heartbeat->last_hb_reset_time = curr_time; + if (adf_notify_fatal_error(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to notify fatal error\n"); +} + void adf_heartbeat_status(struct adf_accel_dev *accel_dev, enum adf_device_heartbeat_status *hb_status) { @@ -229,9 +242,7 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev, "Heartbeat ERROR: QAT is not responding.\n"); *hb_status = HB_DEV_UNRESPONSIVE; hb->hb_failed_counter++; - if (adf_notify_fatal_error(accel_dev)) - dev_err(&GET_DEV(accel_dev), - "Failed to notify fatal error\n"); + adf_heartbeat_reset(accel_dev); return; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h index 24c3f4f24c86..16fdfb48b196 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h @@ -13,6 +13,8 @@ struct dentry; #define ADF_CFG_HB_TIMER_DEFAULT_MS 500 #define ADF_CFG_HB_COUNT_THRESHOLD 3 +#define ADF_CFG_HB_RESET_MS 5000 + enum adf_device_heartbeat_status { HB_DEV_UNRESPONSIVE = 0, HB_DEV_ALIVE, @@ -30,6 +32,7 @@ struct adf_heartbeat { unsigned int hb_failed_counter; unsigned int hb_timer; u64 last_hb_check_time; + u64 last_hb_reset_time; bool ctrs_cnt_checked; struct hb_dma_addr { dma_addr_t phy_addr; From 9567d3dc760931afc38f7f1144c66dd8c4b8c680 Mon Sep 17 00:00:00 2001 From: Mun Chun Yep Date: Fri, 2 Feb 2024 18:53:24 +0800 Subject: [PATCH 40/79] crypto: qat - improve aer error reset handling Rework the AER reset and recovery flow to take into account root port integrated devices that gets reset between the error detected and the slot reset callbacks. In adf_error_detected() the devices is gracefully shut down. The worker threads are disabled, the error conditions are notified to listeners and through PFVF comms and finally the device is reset as part of adf_dev_down(). In adf_slot_reset(), the device is brought up again. If SRIOV VFs were enabled before reset, these are re-enabled and VFs are notified of restarting through PFVF comms. Signed-off-by: Mun Chun Yep Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index b3d4b6b99c65..3597e7605a14 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -33,6 +33,19 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_DISCONNECT; } + set_bit(ADF_STATUS_RESTARTING, &accel_dev->status); + if (accel_dev->hw_device->exit_arb) { + dev_dbg(&pdev->dev, "Disabling arbitration\n"); + accel_dev->hw_device->exit_arb(accel_dev); + } + adf_error_notifier(accel_dev); + adf_pf2vf_notify_fatal_error(accel_dev); + adf_dev_restarting_notify(accel_dev); + adf_pf2vf_notify_restarting(accel_dev); + adf_pf2vf_wait_for_restarting_complete(accel_dev); + pci_clear_master(pdev); + adf_dev_down(accel_dev, false); + return PCI_ERS_RESULT_NEED_RESET; } @@ -180,14 +193,25 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev) { struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + int res = 0; if (!accel_dev) { pr_err("QAT: Can't find acceleration device\n"); return PCI_ERS_RESULT_DISCONNECT; } - if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC)) + + if (!pdev->is_busmaster) + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + res = adf_dev_up(accel_dev, false); + if (res && res != -EALREADY) return PCI_ERS_RESULT_DISCONNECT; + adf_reenable_sriov(accel_dev); + adf_pf2vf_notify_restarted(accel_dev); + adf_dev_restarted_notify(accel_dev); + clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); return PCI_ERS_RESULT_RECOVERED; } From e8829ef1f73fa4051a936ab9f0204195dae4ef2b Mon Sep 17 00:00:00 2001 From: Joachim Vandersmissen Date: Sat, 3 Feb 2024 01:19:59 -0600 Subject: [PATCH 41/79] crypto: rsa - restrict plaintext/ciphertext values more MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SP 800-56Br2, Section 7.1.1 [1] specifies that: 1. If m does not satisfy 1 < m < (n – 1), output an indication that m is out of range, and exit without further processing. Similarly, Section 7.1.2 of the same standard specifies that: 1. If the ciphertext c does not satisfy 1 < c < (n – 1), output an indication that the ciphertext is out of range, and exit without further processing. This range is slightly more conservative than RFC3447, as it also excludes RSA fixed points 0, 1, and n - 1. [1] https://doi.org/10.6028/NIST.SP.800-56Br2 Signed-off-by: Joachim Vandersmissen Signed-off-by: Herbert Xu --- crypto/rsa.c | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/crypto/rsa.c b/crypto/rsa.c index b9cd11fb7d36..d9be9e86097e 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -24,14 +24,38 @@ struct rsa_mpi_key { MPI qinv; }; +static int rsa_check_payload(MPI x, MPI n) +{ + MPI n1; + + if (mpi_cmp_ui(x, 1) <= 0) + return -EINVAL; + + n1 = mpi_alloc(0); + if (!n1) + return -ENOMEM; + + if (mpi_sub_ui(n1, n, 1) || mpi_cmp(x, n1) >= 0) { + mpi_free(n1); + return -EINVAL; + } + + mpi_free(n1); + return 0; +} + /* * RSAEP function [RFC3447 sec 5.1.1] * c = m^e mod n; */ static int _rsa_enc(const struct rsa_mpi_key *key, MPI c, MPI m) { - /* (1) Validate 0 <= m < n */ - if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0) + /* + * Even though (1) in RFC3447 only requires 0 <= m <= n - 1, we are + * slightly more conservative and require 1 < m < n - 1. This is in line + * with SP 800-56Br2, Section 7.1.1. + */ + if (rsa_check_payload(m, key->n)) return -EINVAL; /* (2) c = m^e mod n */ @@ -50,8 +74,12 @@ static int _rsa_dec_crt(const struct rsa_mpi_key *key, MPI m_or_m1_or_h, MPI c) MPI m2, m12_or_qh; int ret = -ENOMEM; - /* (1) Validate 0 <= c < n */ - if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0) + /* + * Even though (1) in RFC3447 only requires 0 <= c <= n - 1, we are + * slightly more conservative and require 1 < c < n - 1. This is in line + * with SP 800-56Br2, Section 7.1.2. + */ + if (rsa_check_payload(c, key->n)) return -EINVAL; m2 = mpi_alloc(0); From 6e031ef2c201cea07bea3b286ed151378c4099f3 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Mon, 5 Feb 2024 16:46:01 +0100 Subject: [PATCH 42/79] crypto: ccp - State in dmesg that TSME is enabled In the case when only TSME is enabled, it is useful to state that fact too, so that users are aware that memory encryption is still enabled even when the corresponding software variant of memory encryption is not enabled. Signed-off-by: Borislav Petkov (AMD) Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/psp-dev.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 124a2e0c8999..56bf832c2947 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -156,11 +156,14 @@ static unsigned int psp_get_capability(struct psp_device *psp) } psp->capability = val; - /* Detect if TSME and SME are both enabled */ + /* Detect TSME and/or SME status */ if (PSP_CAPABILITY(psp, PSP_SECURITY_REPORTING) && - psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET) && - cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) - dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n"); + psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET)) { + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) + dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n"); + else + dev_notice(psp->dev, "psp: TSME enabled\n"); + } return 0; } From 12b8ae68f50de200c038246c2496822f38b18fe2 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Wed, 7 Feb 2024 17:50:59 +0800 Subject: [PATCH 43/79] crypto: hisilicon/qm - add stop function by hardware Hardware V3 could be able to drain function by sending mailbox to hardware which will trigger tasks in device to be flushed out. When the function is reset, the function can be stopped by this way. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 40 ++++++++++++++++++++++++++++------- include/linux/hisi_acc_qm.h | 2 ++ 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 4b20b94e6371..3b015482b4e6 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -312,6 +312,7 @@ static const struct hisi_qm_cap_info qm_cap_info_comm[] = { {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1}, + {QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1}, {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, }; @@ -1674,6 +1675,11 @@ unlock: return ret; } +static int qm_drain_qm(struct hisi_qm *qm) +{ + return hisi_qm_mb(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0); +} + static int qm_stop_qp(struct hisi_qp *qp) { return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); @@ -2088,7 +2094,8 @@ static int qm_drain_qp(struct hisi_qp *qp) static int qm_stop_qp_nolock(struct hisi_qp *qp) { - struct device *dev = &qp->qm->pdev->dev; + struct hisi_qm *qm = qp->qm; + struct device *dev = &qm->pdev->dev; int ret; /* @@ -2104,11 +2111,14 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp) atomic_set(&qp->qp_status.flags, QP_STOP); - ret = qm_drain_qp(qp); - if (ret) - dev_err(dev, "Failed to drain out data for stopping!\n"); + /* V3 supports direct stop function when FLR prepare */ + if (qm->ver < QM_HW_V3 || qm->status.stop_reason == QM_NORMAL) { + ret = qm_drain_qp(qp); + if (ret) + dev_err(dev, "Failed to drain out data for stopping qp(%u)!\n", qp->qp_id); + } - flush_workqueue(qp->qm->wq); + flush_workqueue(qm->wq); if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) qp_stop_fail_cb(qp); @@ -3112,16 +3122,29 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) down_write(&qm->qps_lock); - qm->status.stop_reason = r; if (atomic_read(&qm->status.flags) == QM_STOP) goto err_unlock; /* Stop all the request sending at first. */ atomic_set(&qm->status.flags, QM_STOP); + qm->status.stop_reason = r; - if (qm->status.stop_reason == QM_SOFT_RESET || - qm->status.stop_reason == QM_DOWN) { + if (qm->status.stop_reason != QM_NORMAL) { hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); + /* + * When performing soft reset, the hardware will no longer + * do tasks, and the tasks in the device will be flushed + * out directly since the master ooo is closed. + */ + if (test_bit(QM_SUPPORT_STOP_FUNC, &qm->caps) && + r != QM_SOFT_RESET) { + ret = qm_drain_qm(qm); + if (ret) { + dev_err(dev, "failed to drain qm!\n"); + goto err_unlock; + } + } + ret = qm_stop_started_qp(qm); if (ret < 0) { dev_err(dev, "Failed to stop started qp!\n"); @@ -3141,6 +3164,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) } qm_clear_queues(qm); + qm->status.stop_reason = QM_NORMAL; err_unlock: up_write(&qm->qps_lock); diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 5f4c74facf6a..720f10874a66 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -43,6 +43,7 @@ #define QM_MB_CMD_CQC_BT 0x5 #define QM_MB_CMD_SQC_VFT_V2 0x6 #define QM_MB_CMD_STOP_QP 0x8 +#define QM_MB_CMD_FLUSH_QM 0x9 #define QM_MB_CMD_SRC 0xc #define QM_MB_CMD_DST 0xd @@ -151,6 +152,7 @@ enum qm_cap_bits { QM_SUPPORT_DB_ISOLATION = 0x0, QM_SUPPORT_FUNC_QOS, QM_SUPPORT_STOP_QP, + QM_SUPPORT_STOP_FUNC, QM_SUPPORT_MB_COMMAND, QM_SUPPORT_SVA_PREFETCH, QM_SUPPORT_RPM, From ce133a22123055f5f988499cd9ac7953d2bf0677 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Wed, 7 Feb 2024 17:51:00 +0800 Subject: [PATCH 44/79] crypto: hisilicon/qm - obtain stop queue status The debugfs files 'dev_state' and 'dev_timeout' are added. Users can query the current queue stop status through these two files. And set the waiting timeout when the queue is released. dev_state: if dev_timeout is set, dev_state indicates the status of stopping the queue. 0 indicates that the queue is stopped successfully. Other values indicate that the queue stops fail. If dev_timeout is not set, the value of dev_state is 0; dev_timeout: if the queue fails to stop, the queue is released after waiting dev_timeout * 20ms. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- Documentation/ABI/testing/debugfs-hisi-hpre | 15 +++ Documentation/ABI/testing/debugfs-hisi-sec | 15 +++ Documentation/ABI/testing/debugfs-hisi-zip | 15 +++ drivers/crypto/hisilicon/debugfs.c | 5 + drivers/crypto/hisilicon/qm.c | 108 +++++++++++++++----- include/linux/hisi_acc_qm.h | 6 ++ 6 files changed, 138 insertions(+), 26 deletions(-) diff --git a/Documentation/ABI/testing/debugfs-hisi-hpre b/Documentation/ABI/testing/debugfs-hisi-hpre index 6ed9258605c7..d4e16ef9ac9a 100644 --- a/Documentation/ABI/testing/debugfs-hisi-hpre +++ b/Documentation/ABI/testing/debugfs-hisi-hpre @@ -118,6 +118,21 @@ Description: Dump the state of the device. 0: busy, 1: idle. Only available for PF, and take no other effect on HPRE. +What: /sys/kernel/debug/hisi_hpre//qm/dev_timeout +Date: Feb 2024 +Contact: linux-crypto@vger.kernel.org +Description: Set the wait time when stop queue fails. Available for both PF + and VF, and take no other effect on HPRE. + 0: not wait(default), others value: wait dev_timeout * 20 microsecond. + +What: /sys/kernel/debug/hisi_hpre//qm/dev_state +Date: Feb 2024 +Contact: linux-crypto@vger.kernel.org +Description: Dump the stop queue status of the QM. The default value is 0, + if dev_timeout is set, when stop queue fails, the dev_state + will return non-zero value. Available for both PF and VF, + and take no other effect on HPRE. + What: /sys/kernel/debug/hisi_hpre//hpre_dfx/diff_regs Date: Mar 2022 Contact: linux-crypto@vger.kernel.org diff --git a/Documentation/ABI/testing/debugfs-hisi-sec b/Documentation/ABI/testing/debugfs-hisi-sec index 403f5de96318..6c6c9a6e150a 100644 --- a/Documentation/ABI/testing/debugfs-hisi-sec +++ b/Documentation/ABI/testing/debugfs-hisi-sec @@ -98,6 +98,21 @@ Description: Dump the state of the device. 0: busy, 1: idle. Only available for PF, and take no other effect on SEC. +What: /sys/kernel/debug/hisi_sec2//qm/dev_timeout +Date: Feb 2024 +Contact: linux-crypto@vger.kernel.org +Description: Set the wait time when stop queue fails. Available for both PF + and VF, and take no other effect on SEC. + 0: not wait(default), others value: wait dev_timeout * 20 microsecond. + +What: /sys/kernel/debug/hisi_sec2//qm/dev_state +Date: Feb 2024 +Contact: linux-crypto@vger.kernel.org +Description: Dump the stop queue status of the QM. The default value is 0, + if dev_timeout is set, when stop queue fails, the dev_state + will return non-zero value. Available for both PF and VF, + and take no other effect on SEC. + What: /sys/kernel/debug/hisi_sec2//sec_dfx/diff_regs Date: Mar 2022 Contact: linux-crypto@vger.kernel.org diff --git a/Documentation/ABI/testing/debugfs-hisi-zip b/Documentation/ABI/testing/debugfs-hisi-zip index 2394e6a3cfe2..a22dd6942219 100644 --- a/Documentation/ABI/testing/debugfs-hisi-zip +++ b/Documentation/ABI/testing/debugfs-hisi-zip @@ -111,6 +111,21 @@ Description: Dump the state of the device. 0: busy, 1: idle. Only available for PF, and take no other effect on ZIP. +What: /sys/kernel/debug/hisi_zip//qm/dev_timeout +Date: Feb 2024 +Contact: linux-crypto@vger.kernel.org +Description: Set the wait time when stop queue fails. Available for both PF + and VF, and take no other effect on ZIP. + 0: not wait(default), others value: wait dev_timeout * 20 microsecond. + +What: /sys/kernel/debug/hisi_zip//qm/dev_state +Date: Feb 2024 +Contact: linux-crypto@vger.kernel.org +Description: Dump the stop queue status of the QM. The default value is 0, + if dev_timeout is set, when stop queue fails, the dev_state + will return non-zero value. Available for both PF and VF, + and take no other effect on ZIP. + What: /sys/kernel/debug/hisi_zip//zip_dfx/diff_regs Date: Mar 2022 Contact: linux-crypto@vger.kernel.org diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 06e67eda409f..cd67fa348ca7 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -1112,6 +1112,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get, void hisi_qm_debug_init(struct hisi_qm *qm) { struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs; + struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx; struct qm_dfx *dfx = &qm->debug.dfx; struct dentry *qm_d; void *data; @@ -1140,6 +1141,10 @@ void hisi_qm_debug_init(struct hisi_qm *qm) debugfs_create_file("status", 0444, qm->debug.qm_d, qm, &qm_status_fops); + + debugfs_create_u32("dev_state", 0444, qm->debug.qm_d, &dev_dfx->dev_state); + debugfs_create_u32("dev_timeout", 0644, qm->debug.qm_d, &dev_dfx->dev_timeout); + for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) { data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset); debugfs_create_file(qm_dfx_files[i].name, diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 3b015482b4e6..41dff28326f1 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -236,6 +236,12 @@ #define QM_DEV_ALG_MAX_LEN 256 + /* abnormal status value for stopping queue */ +#define QM_STOP_QUEUE_FAIL 1 +#define QM_DUMP_SQC_FAIL 3 +#define QM_DUMP_CQC_FAIL 4 +#define QM_FINISH_WAIT 5 + #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ @@ -2037,43 +2043,25 @@ static void qp_stop_fail_cb(struct hisi_qp *qp) } } -/** - * qm_drain_qp() - Drain a qp. - * @qp: The qp we want to drain. - * - * Determine whether the queue is cleared by judging the tail pointers of - * sq and cq. - */ -static int qm_drain_qp(struct hisi_qp *qp) +static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id) { - struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; struct qm_sqc sqc; struct qm_cqc cqc; int ret, i = 0; - /* No need to judge if master OOO is blocked. */ - if (qm_check_dev_error(qm)) - return 0; - - /* Kunpeng930 supports drain qp by device */ - if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { - ret = qm_stop_qp(qp); - if (ret) - dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); - return ret; - } - while (++i) { - ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp->qp_id, 1); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1); if (ret) { dev_err_ratelimited(dev, "Failed to dump sqc!\n"); + *state = QM_DUMP_SQC_FAIL; return ret; } - ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp->qp_id, 1); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1); if (ret) { dev_err_ratelimited(dev, "Failed to dump cqc!\n"); + *state = QM_DUMP_CQC_FAIL; return ret; } @@ -2082,8 +2070,9 @@ static int qm_drain_qp(struct hisi_qp *qp) break; if (i == MAX_WAIT_COUNTS) { - dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); - return -EBUSY; + dev_err(dev, "Fail to empty queue %u!\n", qp_id); + *state = QM_STOP_QUEUE_FAIL; + return -ETIMEDOUT; } usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); @@ -2092,6 +2081,49 @@ static int qm_drain_qp(struct hisi_qp *qp) return 0; } +/** + * qm_drain_qp() - Drain a qp. + * @qp: The qp we want to drain. + * + * If the device does not support stopping queue by sending mailbox, + * determine whether the queue is cleared by judging the tail pointers of + * sq and cq. + */ +static int qm_drain_qp(struct hisi_qp *qp) +{ + struct hisi_qm *qm = qp->qm; + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); + u32 state = 0; + int ret; + + /* No need to judge if master OOO is blocked. */ + if (qm_check_dev_error(pf_qm)) + return 0; + + /* HW V3 supports drain qp by device */ + if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { + ret = qm_stop_qp(qp); + if (ret) { + dev_err(&qm->pdev->dev, "Failed to stop qp!\n"); + state = QM_STOP_QUEUE_FAIL; + goto set_dev_state; + } + return ret; + } + + ret = qm_wait_qp_empty(qm, &state, qp->qp_id); + if (ret) + goto set_dev_state; + + return 0; + +set_dev_state: + if (qm->debug.dev_dfx.dev_timeout) + qm->debug.dev_dfx.dev_state = state; + + return ret; +} + static int qm_stop_qp_nolock(struct hisi_qp *qp) { struct hisi_qm *qm = qp->qm; @@ -2319,7 +2351,31 @@ static int hisi_qm_uacce_start_queue(struct uacce_queue *q) static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) { - hisi_qm_stop_qp(q->priv); + struct hisi_qp *qp = q->priv; + struct hisi_qm *qm = qp->qm; + struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx; + u32 i = 0; + + hisi_qm_stop_qp(qp); + + if (!dev_dfx->dev_timeout || !dev_dfx->dev_state) + return; + + /* + * After the queue fails to be stopped, + * wait for a period of time before releasing the queue. + */ + while (++i) { + msleep(WAIT_PERIOD); + + /* Since dev_timeout maybe modified, check i >= dev_timeout */ + if (i >= dev_dfx->dev_timeout) { + dev_err(&qm->pdev->dev, "Stop q %u timeout, state %u\n", + qp->qp_id, dev_dfx->dev_state); + dev_dfx->dev_state = QM_FINISH_WAIT; + break; + } + } } static int hisi_qm_is_q_updated(struct uacce_queue *q) diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 720f10874a66..2d14742ad729 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -163,6 +163,11 @@ struct qm_dev_alg { const char *alg; }; +struct qm_dev_dfx { + u32 dev_state; + u32 dev_timeout; +}; + struct dfx_diff_registers { u32 *regs; u32 reg_offset; @@ -191,6 +196,7 @@ struct qm_debug { struct dentry *debug_root; struct dentry *qm_d; struct debugfs_file files[DEBUG_FILE_NUM]; + struct qm_dev_dfx dev_dfx; unsigned int *qm_last_words; /* ACC engines recoreding last regs */ unsigned int *last_words; From 9066ac364d8659ab7c993b83c60a6182c3ec1ef9 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Wed, 7 Feb 2024 17:51:01 +0800 Subject: [PATCH 45/79] crypto: hisilicon/qm - change function type to void The function qm_stop_qp_nolock() always return zero, so function type is changed to void. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 38 ++++++++++------------------------- include/linux/hisi_acc_qm.h | 2 +- 2 files changed, 12 insertions(+), 28 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 41dff28326f1..92f0a1d9b4a6 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -2124,7 +2124,7 @@ set_dev_state: return ret; } -static int qm_stop_qp_nolock(struct hisi_qp *qp) +static void qm_stop_qp_nolock(struct hisi_qp *qp) { struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; @@ -2138,7 +2138,7 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp) */ if (atomic_read(&qp->qp_status.flags) != QP_START) { qp->is_resetting = false; - return 0; + return; } atomic_set(&qp->qp_status.flags, QP_STOP); @@ -2155,25 +2155,19 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp) qp_stop_fail_cb(qp); dev_dbg(dev, "stop queue %u!", qp->qp_id); - - return 0; } /** * hisi_qm_stop_qp() - Stop a qp in qm. * @qp: The qp we want to stop. * - * This function is reverse of hisi_qm_start_qp. Return 0 if successful. + * This function is reverse of hisi_qm_start_qp. */ -int hisi_qm_stop_qp(struct hisi_qp *qp) +void hisi_qm_stop_qp(struct hisi_qp *qp) { - int ret; - down_write(&qp->qm->qps_lock); - ret = qm_stop_qp_nolock(qp); + qm_stop_qp_nolock(qp); up_write(&qp->qm->qps_lock); - - return ret; } EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); @@ -3120,25 +3114,18 @@ static int qm_restart(struct hisi_qm *qm) } /* Stop started qps in reset flow */ -static int qm_stop_started_qp(struct hisi_qm *qm) +static void qm_stop_started_qp(struct hisi_qm *qm) { - struct device *dev = &qm->pdev->dev; struct hisi_qp *qp; - int i, ret; + int i; for (i = 0; i < qm->qp_num; i++) { qp = &qm->qp_array[i]; - if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { + if (atomic_read(&qp->qp_status.flags) == QP_START) { qp->is_resetting = true; - ret = qm_stop_qp_nolock(qp); - if (ret < 0) { - dev_err(dev, "Failed to stop qp%d!\n", i); - return ret; - } + qm_stop_qp_nolock(qp); } } - - return 0; } /** @@ -3201,11 +3188,8 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) } } - ret = qm_stop_started_qp(qm); - if (ret < 0) { - dev_err(dev, "Failed to stop started qp!\n"); - goto err_unlock; - } + qm_stop_started_qp(qm); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); } diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 2d14742ad729..9d7754ad5e9b 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -531,7 +531,7 @@ void hisi_qm_uninit(struct hisi_qm *qm); int hisi_qm_start(struct hisi_qm *qm); int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r); int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); -int hisi_qm_stop_qp(struct hisi_qp *qp); +void hisi_qm_stop_qp(struct hisi_qp *qp); int hisi_qp_send(struct hisi_qp *qp, const void *msg); void hisi_qm_debug_init(struct hisi_qm *qm); void hisi_qm_debug_regs_clear(struct hisi_qm *qm); From 3ee2cee56c5e399424bb6d18de3b63be7126f28a Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Thu, 8 Feb 2024 10:33:27 +0100 Subject: [PATCH 46/79] MAINTAINERS: adjust file entries after crypto vmx file movement Commit 109303336a0c ("crypto: vmx - Move to arch/powerpc/crypto") moves the crypto vmx files to arch/powerpc, but misses to adjust the file entries for IBM Power VMX Cryptographic instructions and LINUX FOR POWERPC. Hence, ./scripts/get_maintainer.pl --self-test=patterns complains about broken references. Adjust these file entries accordingly. To keep the matched files exact after the movement, spell out each file name in the new directory. Signed-off-by: Lukas Bulwahn Signed-off-by: Herbert Xu --- MAINTAINERS | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 8d1052fa6a69..5a4051996f1e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10294,12 +10294,17 @@ M: Nayna Jain M: Paulo Flabiano Smorigo L: linux-crypto@vger.kernel.org S: Supported -F: drivers/crypto/vmx/Kconfig -F: drivers/crypto/vmx/Makefile -F: drivers/crypto/vmx/aes* -F: drivers/crypto/vmx/ghash* -F: drivers/crypto/vmx/ppc-xlate.pl -F: drivers/crypto/vmx/vmx.c +F: arch/powerpc/crypto/Kconfig +F: arch/powerpc/crypto/Makefile +F: arch/powerpc/crypto/aes.c +F: arch/powerpc/crypto/aes_cbc.c +F: arch/powerpc/crypto/aes_ctr.c +F: arch/powerpc/crypto/aes_xts.c +F: arch/powerpc/crypto/aesp8-ppc.* +F: arch/powerpc/crypto/ghash.c +F: arch/powerpc/crypto/ghashp8-ppc.pl +F: arch/powerpc/crypto/ppc-xlate.pl +F: arch/powerpc/crypto/vmx.c IBM ServeRAID RAID DRIVER S: Orphan @@ -12382,7 +12387,6 @@ F: drivers/*/*/*pasemi* F: drivers/*/*pasemi* F: drivers/char/tpm/tpm_ibmvtpm* F: drivers/crypto/nx/ -F: drivers/crypto/vmx/ F: drivers/i2c/busses/i2c-opal.c F: drivers/net/ethernet/ibm/ibmveth.* F: drivers/net/ethernet/ibm/ibmvnic.* From c2304e1a0b8051a60d4eb9c99a1c509d90380ae5 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 9 Feb 2024 13:42:07 +0100 Subject: [PATCH 47/79] crypto: qat - change SLAs cleanup flow at shutdown The implementation of the Rate Limiting (RL) feature includes the cleanup of all SLAs during device shutdown. For each SLA, the firmware is notified of the removal through an admin message, the data structures that take into account the budgets are updated and the memory is freed. However, this explicit cleanup is not necessary as (1) the device is reset, and the firmware state is lost and (2) all RL data structures are freed anyway. In addition, if the device is unresponsive, for example after a PCI AER error is detected, the admin interface might not be available. This might slow down the shutdown sequence and cause a timeout in the recovery flows which in turn makes the driver believe that the device is not recoverable. Fix by replacing the explicit SLAs removal with just a free of the SLA data structures. Fixes: d9fb8408376e ("crypto: qat - add rate limiting feature to qat_4xxx") Cc: Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_rl.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index de1b214dba1f..d4f2db3c53d8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -788,6 +788,24 @@ static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla) sla_type_arr[node_id] = NULL; } +static void free_all_sla(struct adf_accel_dev *accel_dev) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + int sla_id; + + mutex_lock(&rl_data->rl_lock); + + for (sla_id = 0; sla_id < RL_NODES_CNT_MAX; sla_id++) { + if (!rl_data->sla[sla_id]) + continue; + + kfree(rl_data->sla[sla_id]); + rl_data->sla[sla_id] = NULL; + } + + mutex_unlock(&rl_data->rl_lock); +} + /** * add_update_sla() - handles the creation and the update of an SLA * @accel_dev: pointer to acceleration device structure @@ -1155,7 +1173,7 @@ void adf_rl_stop(struct adf_accel_dev *accel_dev) return; adf_sysfs_rl_rm(accel_dev); - adf_rl_remove_sla_all(accel_dev, true); + free_all_sla(accel_dev); } void adf_rl_exit(struct adf_accel_dev *accel_dev) From 7d42e097607c4d246d99225bf2b195b6167a210c Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 9 Feb 2024 13:43:42 +0100 Subject: [PATCH 48/79] crypto: qat - resolve race condition during AER recovery During the PCI AER system's error recovery process, the kernel driver may encounter a race condition with freeing the reset_data structure's memory. If the device restart will take more than 10 seconds the function scheduling that restart will exit due to a timeout, and the reset_data structure will be freed. However, this data structure is used for completion notification after the restart is completed, which leads to a UAF bug. This results in a KFENCE bug notice. BUG: KFENCE: use-after-free read in adf_device_reset_worker+0x38/0xa0 [intel_qat] Use-after-free read at 0x00000000bc56fddf (in kfence-#142): adf_device_reset_worker+0x38/0xa0 [intel_qat] process_one_work+0x173/0x340 To resolve this race condition, the memory associated to the container of the work_struct is freed on the worker if the timeout expired, otherwise on the function that schedules the worker. The timeout detection can be done by checking if the caller is still waiting for completion or not by using completion_done() function. Fixes: d8cba25d2c68 ("crypto: qat - Intel(R) QAT driver framework") Cc: Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 3597e7605a14..9da2278bd5b7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -130,7 +130,8 @@ static void adf_device_reset_worker(struct work_struct *work) if (adf_dev_restart(accel_dev)) { /* The device hanged and we can't restart it so stop here */ dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); - if (reset_data->mode == ADF_DEV_RESET_ASYNC) + if (reset_data->mode == ADF_DEV_RESET_ASYNC || + completion_done(&reset_data->compl)) kfree(reset_data); WARN(1, "QAT: device restart failed. Device is unusable\n"); return; @@ -146,11 +147,19 @@ static void adf_device_reset_worker(struct work_struct *work) adf_dev_restarted_notify(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); - /* The dev is back alive. Notify the caller if in sync mode */ - if (reset_data->mode == ADF_DEV_RESET_SYNC) - complete(&reset_data->compl); - else + /* + * The dev is back alive. Notify the caller if in sync mode + * + * If device restart will take a more time than expected, + * the schedule_reset() function can timeout and exit. This can be + * detected by calling the completion_done() function. In this case + * the reset_data structure needs to be freed here. + */ + if (reset_data->mode == ADF_DEV_RESET_ASYNC || + completion_done(&reset_data->compl)) kfree(reset_data); + else + complete(&reset_data->compl); } static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, @@ -183,8 +192,9 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Reset device timeout expired\n"); ret = -EFAULT; + } else { + kfree(reset_data); } - kfree(reset_data); return ret; } return 0; From 2ecd43413d7668d67b9b8a56f882aa1ea12b8a62 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 12 Feb 2024 13:05:09 +0000 Subject: [PATCH 49/79] Documentation: qat: fix auto_reset section Remove unneeded colon in the auto_reset section. This resolves the following errors when building the documentation: Documentation/ABI/testing/sysfs-driver-qat:146: ERROR: Unexpected indentation. Documentation/ABI/testing/sysfs-driver-qat:146: WARNING: Block quote ends without a blank line; unexpected unindent. Fixes: f5419a4239af ("crypto: qat - add auto reset on error") Reported-by: Stephen Rothwell Closes: https://lore.kernel.org/linux-kernel/20240212144830.70495d07@canb.auug.org.au/T/ Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- Documentation/ABI/testing/sysfs-driver-qat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index 6778f1fea874..96020fb051c3 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -153,7 +153,7 @@ Description: (RW) Reports the current state of the autoreset feature Device auto reset is disabled by default. - The values are:: + The values are: * 1/Yy/on: auto reset enabled. If the device encounters an unrecoverable error, it will be reset automatically. From 53cc9baeb9bc2a187eb9c9790d30995148852b12 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 13 Feb 2024 14:49:46 +0100 Subject: [PATCH 50/79] crypto: arm/sha - fix function cast warnings clang-16 warns about casting between incompatible function types: arch/arm/crypto/sha256_glue.c:37:5: error: cast from 'void (*)(u32 *, const void *, unsigned int)' (aka 'void (*)(unsigned int *, const void *, unsigned int)') to 'sha256_block_fn *' (aka 'void (*)(struct sha256_state *, const unsigned char *, int)') converts to incompatible function type [-Werror,-Wcast-function-type-strict] 37 | (sha256_block_fn *)sha256_block_data_order); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ arch/arm/crypto/sha512-glue.c:34:3: error: cast from 'void (*)(u64 *, const u8 *, int)' (aka 'void (*)(unsigned long long *, const unsigned char *, int)') to 'sha512_block_fn *' (aka 'void (*)(struct sha512_state *, const unsigned char *, int)') converts to incompatible function type [-Werror,-Wcast-function-type-strict] 34 | (sha512_block_fn *)sha512_block_data_order); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Fix the prototypes for the assembler functions to match the typedef. The code already relies on the digest being the first part of the state structure, so there is no change in behavior. Fixes: c80ae7ca3726 ("crypto: arm/sha512 - accelerated SHA-512 using ARM generic ASM and NEON") Fixes: b59e2ae3690c ("crypto: arm/sha256 - move SHA-224/256 ASM/NEON implementation to base layer") Signed-off-by: Arnd Bergmann Reviewed-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm/crypto/sha256_glue.c | 13 +++++-------- arch/arm/crypto/sha512-glue.c | 12 +++++------- 2 files changed, 10 insertions(+), 15 deletions(-) diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c index 433ee4ddce6c..f85933fdec75 100644 --- a/arch/arm/crypto/sha256_glue.c +++ b/arch/arm/crypto/sha256_glue.c @@ -24,8 +24,8 @@ #include "sha256_glue.h" -asmlinkage void sha256_block_data_order(u32 *digest, const void *data, - unsigned int num_blks); +asmlinkage void sha256_block_data_order(struct sha256_state *state, + const u8 *data, int num_blks); int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data, unsigned int len) @@ -33,23 +33,20 @@ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data, /* make sure casting to sha256_block_fn() is safe */ BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0); - return sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_block_data_order); + return sha256_base_do_update(desc, data, len, sha256_block_data_order); } EXPORT_SYMBOL(crypto_sha256_arm_update); static int crypto_sha256_arm_final(struct shash_desc *desc, u8 *out) { - sha256_base_do_finalize(desc, - (sha256_block_fn *)sha256_block_data_order); + sha256_base_do_finalize(desc, sha256_block_data_order); return sha256_base_finish(desc, out); } int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_block_data_order); + sha256_base_do_update(desc, data, len, sha256_block_data_order); return crypto_sha256_arm_final(desc, out); } EXPORT_SYMBOL(crypto_sha256_arm_finup); diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c index 0635a65aa488..1be5bd498af3 100644 --- a/arch/arm/crypto/sha512-glue.c +++ b/arch/arm/crypto/sha512-glue.c @@ -25,27 +25,25 @@ MODULE_ALIAS_CRYPTO("sha512"); MODULE_ALIAS_CRYPTO("sha384-arm"); MODULE_ALIAS_CRYPTO("sha512-arm"); -asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks); +asmlinkage void sha512_block_data_order(struct sha512_state *state, + u8 const *src, int blocks); int sha512_arm_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha512_base_do_update(desc, data, len, - (sha512_block_fn *)sha512_block_data_order); + return sha512_base_do_update(desc, data, len, sha512_block_data_order); } static int sha512_arm_final(struct shash_desc *desc, u8 *out) { - sha512_base_do_finalize(desc, - (sha512_block_fn *)sha512_block_data_order); + sha512_base_do_finalize(desc, sha512_block_data_order); return sha512_base_finish(desc, out); } int sha512_arm_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - sha512_base_do_update(desc, data, len, - (sha512_block_fn *)sha512_block_data_order); + sha512_base_do_update(desc, data, len, sha512_block_data_order); return sha512_arm_final(desc, out); } From 0e8fca2f12ceb77c3a6b6f210135031f264aa612 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Tue, 13 Feb 2024 11:34:28 -0600 Subject: [PATCH 51/79] crypto: ccp - Avoid discarding errors in psp_send_platform_access_msg() Errors can potentially occur in the "processing" of PSP commands or commands can be processed successfully but still return an error code in the header. This second case was being discarded because PSP communication worked but the command returned an error code in the payload header. Capture both cases and return them to the caller as -EIO for the caller to investigate. The caller can detect the latter by looking at `req->header->status`. Reported-and-tested-by: Tim Van Patten Fixes: 7ccc4f4e2e50 ("crypto: ccp - Add support for an interface for platform features") Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu --- drivers/crypto/ccp/platform-access.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/platform-access.c b/drivers/crypto/ccp/platform-access.c index 94367bc49e35..1b8ed3389733 100644 --- a/drivers/crypto/ccp/platform-access.c +++ b/drivers/crypto/ccp/platform-access.c @@ -118,9 +118,16 @@ int psp_send_platform_access_msg(enum psp_platform_access_msg msg, goto unlock; } - /* Store the status in request header for caller to investigate */ + /* + * Read status from PSP. If status is non-zero, it indicates an error + * occurred during "processing" of the command. + * If status is zero, it indicates the command was "processed" + * successfully, but the result of the command is in the payload. + * Return both cases to the caller as -EIO to investigate. + */ cmd_reg = ioread32(cmd); - req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg); + if (FIELD_GET(PSP_CMDRESP_STS, cmd_reg)) + req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg); if (req->header.status) { ret = -EIO; goto unlock; From 14af865be47ac16ba9f3c98d031dc1f30cb1a642 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Tue, 13 Feb 2024 11:34:29 -0600 Subject: [PATCH 52/79] crypto: ccp - Update return values for some unit tests Until authenticated the platform enforces a state machine. Adjust unit tests with this in mind. Correct the return codes for all the states the unit tests ends up hitting: * Set Param / Get Param: DBC_ERROR_BAD_STATE * Set UID: DBC_ERROR_SIGNATURE_INVALID * Authencitated Nonce: DBC_ERROR_BAD_PARAMETERS Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu --- tools/crypto/ccp/test_dbc.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tools/crypto/ccp/test_dbc.py b/tools/crypto/ccp/test_dbc.py index 79de3638a01a..bb0e671be96d 100755 --- a/tools/crypto/ccp/test_dbc.py +++ b/tools/crypto/ccp/test_dbc.py @@ -138,12 +138,14 @@ class TestInvalidSignature(DynamicBoostControlTest): def test_authenticated_nonce(self) -> None: """fetch authenticated nonce""" + get_nonce(self.d, None) with self.assertRaises(OSError) as error: get_nonce(self.d, self.signature) - self.assertEqual(error.exception.errno, 1) + self.assertEqual(error.exception.errno, 22) def test_set_uid(self) -> None: """set uid""" + get_nonce(self.d, None) with self.assertRaises(OSError) as error: set_uid(self.d, self.uid, self.signature) self.assertEqual(error.exception.errno, 1) @@ -152,13 +154,13 @@ class TestInvalidSignature(DynamicBoostControlTest): """fetch a parameter""" with self.assertRaises(OSError) as error: process_param(self.d, PARAM_GET_SOC_PWR_CUR, self.signature) - self.assertEqual(error.exception.errno, 1) + self.assertEqual(error.exception.errno, 11) def test_set_param(self) -> None: """set a parameter""" with self.assertRaises(OSError) as error: process_param(self.d, PARAM_SET_PWR_CAP, self.signature, 1000) - self.assertEqual(error.exception.errno, 1) + self.assertEqual(error.exception.errno, 11) class TestUnFusedSystem(DynamicBoostControlTest): From bcc06e1b3dadc76140203753a08979374c965ada Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 13 Feb 2024 21:09:41 +0300 Subject: [PATCH 53/79] crypto: qat - uninitialized variable in adf_hb_error_inject_write() There are a few issues in this code. If *ppos is non-zero then the first part of the buffer is not initialized. We never initialize the last character of the buffer. The return is not checked so it's possible that none of the buffer is initialized. This is debugfs code which is root only and the impact of these bugs is very small. However, it's still worth fixing. To fix this: 1) Check that *ppos is zero. 2) Use copy_from_user() instead of simple_write_to_buffer(). 3) Explicitly add a NUL terminator. Fixes: e2b67859ab6e ("crypto: qat - add heartbeat error simulator") Signed-off-by: Dan Carpenter Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- .../crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c index 5cd6c2d6f90a..cccdff24b48d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c @@ -160,16 +160,17 @@ static ssize_t adf_hb_error_inject_write(struct file *file, size_t count, loff_t *ppos) { struct adf_accel_dev *accel_dev = file->private_data; - size_t written_chars; char buf[3]; int ret; /* last byte left as string termination */ - if (count != 2) + if (*ppos != 0 || count != 2) return -EINVAL; - written_chars = simple_write_to_buffer(buf, sizeof(buf) - 1, - ppos, user_buf, count); + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + buf[count] = '\0'; + if (buf[0] != '1') return -EINVAL; @@ -183,7 +184,7 @@ static ssize_t adf_hb_error_inject_write(struct file *file, dev_info(&GET_DEV(accel_dev), "Heartbeat error injection enabled\n"); - return written_chars; + return count; } static const struct file_operations adf_hb_error_inject_fops = { From dfff0e35fa5dd84ae75052ba129b0219d83e46dc Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:19:55 +0000 Subject: [PATCH 54/79] crypto: qat - remove unused macros in qat_comp_alg.c As a result of the removal of qat_zlib_deflate, some defines where not removed. Remove them. This is to fix the following warning when compiling the QAT driver using the clang compiler with CC=clang W=2: drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:21:9: warning: macro is not used [-Wunused-macros] 21 | #define QAT_RFC_1950_CM_OFFSET 4 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:16:9: warning: macro is not used [-Wunused-macros] 16 | #define QAT_RFC_1950_HDR_SIZE 2 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:17:9: warning: macro is not used [-Wunused-macros] 17 | #define QAT_RFC_1950_FOOTER_SIZE 4 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:22:9: warning: macro is not used [-Wunused-macros] 22 | #define QAT_RFC_1950_DICT_MASK 0x20 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:18:9: warning: macro is not used [-Wunused-macros] 18 | #define QAT_RFC_1950_CM_DEFLATE 8 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:20:9: warning: macro is not used [-Wunused-macros] 20 | #define QAT_RFC_1950_CM_MASK 0x0f | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:23:9: warning: macro is not used [-Wunused-macros] 23 | #define QAT_RFC_1950_COMP_HDR 0x785e | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:19:9: warning: macro is not used [-Wunused-macros] 19 | #define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7 | ^ Fixes: e9dd20e0e5f6 ("crypto: qat - Remove zlib-deflate") Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/qat_comp_algs.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index bf8c0ee62917..2ba4aa22e092 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -13,15 +13,6 @@ #include "qat_compression.h" #include "qat_algs_send.h" -#define QAT_RFC_1950_HDR_SIZE 2 -#define QAT_RFC_1950_FOOTER_SIZE 4 -#define QAT_RFC_1950_CM_DEFLATE 8 -#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7 -#define QAT_RFC_1950_CM_MASK 0x0f -#define QAT_RFC_1950_CM_OFFSET 4 -#define QAT_RFC_1950_DICT_MASK 0x20 -#define QAT_RFC_1950_COMP_HDR 0x785e - static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; From 9a5dcada14d5e027856a1bc38443e54111438da6 Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:19:56 +0000 Subject: [PATCH 55/79] crypto: qat - removed unused macro in adf_cnv_dbgfs.c This macro was added but never used, remove it. This is to fix the following warning when compiling the QAT driver using the clang compiler with CC=clang W=2: drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c:19:9: warning: macro is not used [-Wunused-macros] 19 | #define CNV_SLICE_ERR_MASK GENMASK(7, 0) | ^ Fixes: d807f0240c71 ("crypto: qat - add cnv_errors debugfs file") Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c index 07119c487da0..627953a72d47 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c @@ -16,7 +16,6 @@ #define CNV_ERR_INFO_MASK GENMASK(11, 0) #define CNV_ERR_TYPE_MASK GENMASK(15, 12) -#define CNV_SLICE_ERR_MASK GENMASK(7, 0) #define CNV_SLICE_ERR_SIGN_BIT_INDEX 7 #define CNV_DELTA_ERR_SIGN_BIT_INDEX 11 From f99fb7d660f7c818105803f1f1915396a14d18ad Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:19:57 +0000 Subject: [PATCH 56/79] crypto: qat - avoid division by zero Check if delta_us is not zero and return -EINVAL if it is. delta_us is unlikely to be zero as there is a sleep between the reads of the two timestamps. This is to fix the following warning when compiling the QAT driver using clang scan-build: drivers/crypto/intel/qat/qat_common/adf_clock.c:87:9: warning: Division by zero [core.DivideZero] 87 | temp = DIV_ROUND_CLOSEST_ULL(temp, delta_us); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Fixes: e2980ba57e79 ("crypto: qat - add measure clock frequency") Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_clock.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c index 01e0a389e462..cf89f57de2a7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_clock.c +++ b/drivers/crypto/intel/qat/qat_common/adf_clock.c @@ -83,6 +83,9 @@ static int measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency) } delta_us = timespec_to_us(&ts3) - timespec_to_us(&ts1); + if (!delta_us) + return -EINVAL; + temp = (timestamp2 - timestamp1) * ME_CLK_DIVIDER * 10; temp = DIV_ROUND_CLOSEST_ULL(temp, delta_us); /* From a66cf93ab33853f17b8cc33a99263dd0a383a1a1 Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:19:58 +0000 Subject: [PATCH 57/79] crypto: qat - remove double initialization of value Remove double initialization of the reg variable. This is to fix the following warning when compiling the QAT driver using clang scan-build: drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c:1010:6: warning: Value stored to 'reg' during its initialization is never read [deadcode.DeadStores] 1010 | u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); | ^~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c:1109:6: warning: Value stored to 'reg' during its initialization is never read [deadcode.DeadStores] 1109 | u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); | ^~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Fixes: 99b1c9826e48 ("crypto: qat - count QAT GEN4 errors") Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 048c24607939..2dd3772bf58a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -1007,8 +1007,7 @@ static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev, static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { - u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); - u32 bits_num = BITS_PER_REG(reg); + u32 reg, bits_num = BITS_PER_REG(reg); bool reset_required = false; unsigned long errs_bits; u32 bit_iterator; @@ -1106,8 +1105,7 @@ static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { - u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); - u32 bits_num = BITS_PER_REG(reg); + u32 reg, bits_num = BITS_PER_REG(reg); bool reset_required = false; unsigned long errs_bits; u32 bit_iterator; From ff391345141e727320ca906e6928c6a1f14e7e37 Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:19:59 +0000 Subject: [PATCH 58/79] crypto: qat - remove unnecessary description from comment Remove extra description from comments as it is not required. This is to fix the following warning when compiling the QAT driver using the clang compiler with CC=clang W=2: drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c:65: warning: contents before sections drivers/crypto/intel/qat/qat_common/adf_isr.c:380: warning: contents before sections drivers/crypto/intel/qat/qat_common/adf_vf_isr.c:298: warning: contents before sections Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c | 4 ++-- drivers/crypto/intel/qat/qat_common/adf_isr.c | 2 -- drivers/crypto/intel/qat/qat_common/adf_vf_isr.c | 2 -- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c index 86ee36feefad..f07b748795f7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c @@ -60,10 +60,10 @@ static int adf_get_vf_real_id(u32 fake) /** * adf_clean_vf_map() - Cleans VF id mapings - * - * Function cleans internal ids for virtual functions. * @vf: flag indicating whether mappings is cleaned * for vfs only or for vfs and pfs + * + * Function cleans internal ids for virtual functions. */ void adf_clean_vf_map(bool vf) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index 020d213f4c99..cae1aee5479a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -380,8 +380,6 @@ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); /** * adf_init_misc_wq() - Init misc workqueue * - * Function init workqueue 'qat_misc_wq' for general purpose. - * * Return: 0 on success, error code otherwise. */ int __init adf_init_misc_wq(void) diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c index b05c3957a160..cdbb2d687b1b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c @@ -293,8 +293,6 @@ EXPORT_SYMBOL_GPL(adf_flush_vf_wq); /** * adf_init_vf_wq() - Init workqueue for VF * - * Function init workqueue 'adf_vf_stop_wq' for VF. - * * Return: 0 on success, error code otherwise. */ int __init adf_init_vf_wq(void) From bca79b9f5639b2fd4692904bce696291336e0246 Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:20:00 +0000 Subject: [PATCH 59/79] crypto: qat - fix comment structure Move comment description to the same line as the function name. This is to fix the following warning when compiling the QAT driver using the clang compiler with CC=clang W=2: drivers/crypto/intel/qat/qat_common/qat_crypto.c:108: warning: missing initial short description on line: * qat_crypto_vf_dev_config() Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/qat_crypto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.c b/drivers/crypto/intel/qat/qat_common/qat_crypto.c index 40c8e74d1cf9..101c6ea41673 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.c @@ -105,8 +105,8 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) } /** - * qat_crypto_vf_dev_config() - * create dev config required to create crypto inst. + * qat_crypto_vf_dev_config() - create dev config required to create + * crypto inst. * * @accel_dev: Pointer to acceleration device. * From df018f82002a8b4dc407bc9a6f416b9241d14415 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 16 Feb 2024 18:21:54 +0100 Subject: [PATCH 60/79] crypto: qat - fix ring to service map for dcc in 4xxx If a device is configured for data compression chaining (dcc), half of the engines are loaded with the symmetric crypto image and the rest are loaded with the compression image. However, in such configuration all rings can handle compression requests. Fix the ring to service mapping so that when a device is configured for dcc, the ring to service mapping reports that all rings in a bank can be used for compression. Fixes: a238487f7965 ("crypto: qat - fix ring to service map for QAT GEN4") Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 94a0ebb03d8c..e171cddf6f02 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -331,6 +331,13 @@ static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) if (!fw_config) return 0; + /* If dcc, all rings handle compression requests */ + if (adf_get_service_enabled(accel_dev) == SVC_DCC) { + for (i = 0; i < RP_GROUP_COUNT; i++) + rps[i] = COMP; + goto set_mask; + } + for (i = 0; i < RP_GROUP_COUNT; i++) { switch (fw_config[i].ae_mask) { case ADF_AE_GROUP_0: @@ -359,6 +366,7 @@ static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) } } +set_mask: ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | From a20a6060e0dd57fecaf55487985aef28bd08c6bf Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 16 Feb 2024 18:21:55 +0100 Subject: [PATCH 61/79] crypto: qat - fix ring to service map for dcc in 420xx If a device is configured for data compression chaining (dcc), half of the engines are loaded with the symmetric crypto image and the rest are loaded with the compression image. However, in such configuration all rings can handle compression requests. Fix the ring to service mapping so that when a device is configured for dcc, the ring to service mapping reports that all rings in a bank can be used for compression. Fixes: fcf60f4bcf54 ("crypto: qat - add support for 420xx devices") Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index a87d29ae724f..7909b51e97c3 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -372,6 +372,13 @@ static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) if (!fw_config) return 0; + /* If dcc, all rings handle compression requests */ + if (adf_get_service_enabled(accel_dev) == SVC_DCC) { + for (i = 0; i < RP_GROUP_COUNT; i++) + rps[i] = COMP; + goto set_mask; + } + for (i = 0; i < RP_GROUP_COUNT; i++) { switch (fw_config[i].ae_mask) { case ADF_AE_GROUP_0: @@ -400,6 +407,7 @@ static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) } } +set_mask: ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | From ed3d95fe788dec7c23bb20b41f8af47cbce04715 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 16 Feb 2024 18:21:56 +0100 Subject: [PATCH 62/79] crypto: qat - make ring to service map common for QAT GEN4 The function get_ring_to_svc_map() is present in both 420xx and 4xxx drivers. Rework the logic to make it generic to GEN4 devices and move it to qat_common/adf_gen4_hw_data.c. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 72 +++++-------------- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 72 +++++-------------- .../intel/qat/qat_common/adf_accel_devices.h | 1 + .../intel/qat/qat_common/adf_gen4_hw_data.c | 56 +++++++++++++++ .../intel/qat/qat_common/adf_gen4_hw_data.h | 1 + 5 files changed, 90 insertions(+), 112 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 7909b51e97c3..1102c47f8293 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -361,61 +361,6 @@ static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num) } } -static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) -{ - enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { }; - const struct adf_fw_config *fw_config; - u16 ring_to_svc_map; - int i, j; - - fw_config = get_fw_config(accel_dev); - if (!fw_config) - return 0; - - /* If dcc, all rings handle compression requests */ - if (adf_get_service_enabled(accel_dev) == SVC_DCC) { - for (i = 0; i < RP_GROUP_COUNT; i++) - rps[i] = COMP; - goto set_mask; - } - - for (i = 0; i < RP_GROUP_COUNT; i++) { - switch (fw_config[i].ae_mask) { - case ADF_AE_GROUP_0: - j = RP_GROUP_0; - break; - case ADF_AE_GROUP_1: - j = RP_GROUP_1; - break; - default: - return 0; - } - - switch (fw_config[i].obj) { - case ADF_FW_SYM_OBJ: - rps[j] = SYM; - break; - case ADF_FW_ASYM_OBJ: - rps[j] = ASYM; - break; - case ADF_FW_DC_OBJ: - rps[j] = COMP; - break; - default: - rps[j] = 0; - break; - } - } - -set_mask: - ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | - rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; - - return ring_to_svc_map; -} - static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, const char * const fw_objs[], int num_objs) { @@ -441,6 +386,20 @@ static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_n return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs); } +static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return -EINVAL; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return -EINVAL; + + return fw_config[obj_num].obj; +} + static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) { const struct adf_fw_config *fw_config; @@ -504,12 +463,13 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->fw_mmp_name = ADF_420XX_MMP; hw_data->uof_get_name = uof_get_name_420xx; hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_obj_type = uof_get_obj_type; hw_data->uof_get_ae_mask = uof_get_ae_mask; hw_data->get_rp_group = get_rp_group; hw_data->get_ena_thd_mask = get_ena_thd_mask; hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; - hw_data->get_ring_to_svc_map = get_ring_to_svc_map; + hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; hw_data->enable_pm = adf_gen4_enable_pm; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index e171cddf6f02..927506cf271d 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -320,61 +320,6 @@ static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num) } } -static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) -{ - enum adf_cfg_service_type rps[RP_GROUP_COUNT]; - const struct adf_fw_config *fw_config; - u16 ring_to_svc_map; - int i, j; - - fw_config = get_fw_config(accel_dev); - if (!fw_config) - return 0; - - /* If dcc, all rings handle compression requests */ - if (adf_get_service_enabled(accel_dev) == SVC_DCC) { - for (i = 0; i < RP_GROUP_COUNT; i++) - rps[i] = COMP; - goto set_mask; - } - - for (i = 0; i < RP_GROUP_COUNT; i++) { - switch (fw_config[i].ae_mask) { - case ADF_AE_GROUP_0: - j = RP_GROUP_0; - break; - case ADF_AE_GROUP_1: - j = RP_GROUP_1; - break; - default: - return 0; - } - - switch (fw_config[i].obj) { - case ADF_FW_SYM_OBJ: - rps[j] = SYM; - break; - case ADF_FW_ASYM_OBJ: - rps[j] = ASYM; - break; - case ADF_FW_DC_OBJ: - rps[j] = COMP; - break; - default: - rps[j] = 0; - break; - } - } - -set_mask: - ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | - rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; - - return ring_to_svc_map; -} - static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, const char * const fw_objs[], int num_objs) { @@ -407,6 +352,20 @@ static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_n return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs); } +static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return -EINVAL; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return -EINVAL; + + return fw_config[obj_num].obj; +} + static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) { const struct adf_fw_config *fw_config; @@ -487,11 +446,12 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) break; } hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_obj_type = uof_get_obj_type; hw_data->uof_get_ae_mask = uof_get_ae_mask; hw_data->get_rp_group = get_rp_group; hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; - hw_data->get_ring_to_svc_map = get_ring_to_svc_map; + hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; hw_data->enable_pm = adf_gen4_enable_pm; diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 0f26aa976c8c..08658c3a01e9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -248,6 +248,7 @@ struct adf_hw_device_data { void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev); + int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num); u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask); u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index f752653ccb47..d28e1921940a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -4,6 +4,7 @@ #include "adf_accel_devices.h" #include "adf_cfg_services.h" #include "adf_common_drv.h" +#include "adf_fw_config.h" #include "adf_gen4_hw_data.h" #include "adf_gen4_pm.h" @@ -433,3 +434,58 @@ int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) return 0; } EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map); + +u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { }; + unsigned int ae_mask, start_id, worker_obj_cnt, i; + u16 ring_to_svc_map; + int rp_group; + + if (!hw_data->get_rp_group || !hw_data->uof_get_ae_mask || + !hw_data->uof_get_obj_type || !hw_data->uof_get_num_objs) + return 0; + + /* If dcc, all rings handle compression requests */ + if (adf_get_service_enabled(accel_dev) == SVC_DCC) { + for (i = 0; i < RP_GROUP_COUNT; i++) + rps[i] = COMP; + goto set_mask; + } + + worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) - + ADF_GEN4_ADMIN_ACCELENGINES; + start_id = worker_obj_cnt - RP_GROUP_COUNT; + + for (i = start_id; i < worker_obj_cnt; i++) { + ae_mask = hw_data->uof_get_ae_mask(accel_dev, i); + rp_group = hw_data->get_rp_group(accel_dev, ae_mask); + if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0) + return 0; + + switch (hw_data->uof_get_obj_type(accel_dev, i)) { + case ADF_FW_SYM_OBJ: + rps[rp_group] = SYM; + break; + case ADF_FW_ASYM_OBJ: + rps[rp_group] = ASYM; + break; + case ADF_FW_DC_OBJ: + rps[rp_group] = COMP; + break; + default: + rps[rp_group] = 0; + break; + } + } + +set_mask: + ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | + rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; + + return ring_to_svc_map; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 7d8a774cadc8..c6e80df5a85a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -235,5 +235,6 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev); +u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev); #endif From e63df1ec9a16dd9e13e9068243e64876de06f795 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 17 Feb 2024 08:55:13 -0800 Subject: [PATCH 63/79] crypto: jitter - fix CRYPTO_JITTERENTROPY help text Correct various small problems in the help text: a. change 2 spaces to ", " b. finish an incomplete sentence c. change non-working URL to working URL Fixes: a9a98d49da52 ("crypto: Kconfig - simplify compression/RNG entries") Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218458 Signed-off-by: Randy Dunlap Cc: Bagas Sanjaya Cc: Robert Elliott Cc: Christoph Biedl Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Acked-by: Bagas Sanjaya Signed-off-by: Herbert Xu --- crypto/Kconfig | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 7d156c75f15f..44661c2e30ca 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1269,10 +1269,11 @@ config CRYPTO_JITTERENTROPY A non-physical non-deterministic ("true") RNG (e.g., an entropy source compliant with NIST SP800-90B) intended to provide a seed to a - deterministic RNG (e.g. per NIST SP800-90C). + deterministic RNG (e.g., per NIST SP800-90C). This RNG does not perform any cryptographic whitening of the generated + random numbers. - See https://www.chronox.de/jent.html + See https://www.chronox.de/jent/ if CRYPTO_JITTERENTROPY if CRYPTO_FIPS && EXPERT From a24e3b583ea2db3418f0c6ae1f12b07ca96531cc Mon Sep 17 00:00:00 2001 From: Kilian Zinnecker Date: Sun, 18 Feb 2024 23:16:58 +0100 Subject: [PATCH 64/79] crypto: rockchip - fix to check return value crypto_engine_alloc_init may fail, e.g., as result of a fail of devm_kzalloc or kthread_create_worker. Other drivers (e.g., amlogic-gxl-core.c, aspeed-acry.c, aspeed-hace.c, jr.c, etc.) check crypto_engine_alloc_init's return value and return -ENOMEM in case a NULL pointer is returned. This patch inserts a corresponding return value check to rk3288_crypto.c. Signed-off-by: Kilian Zinnecker Signed-off-by: Herbert Xu --- drivers/crypto/rockchip/rk3288_crypto.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index 70edf40bc523..f74b3c81ba6d 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -371,6 +371,11 @@ static int rk_crypto_probe(struct platform_device *pdev) } crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true); + if (!crypto_info->engine) { + err = -ENOMEM; + goto err_crypto; + } + crypto_engine_start(crypto_info->engine); init_completion(&crypto_info->complete); From f66a211e8c5d9b1b0531364c1b16874b0499e0d2 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 21 Feb 2024 13:19:15 +0800 Subject: [PATCH 65/79] crypto: dh - Make public key test FIPS-only The function dh_is_pubkey_valid was added to for FIPS but it was only partially conditional to fips_enabled. In particular, the first test in the function relies on the last test to work properly, but the last test is only run in FIPS mode. Fix this inconsistency by making the whole function conditional on fips_enabled. Signed-off-by: Herbert Xu --- crypto/dh.c | 63 +++++++++++++++++++++++++++-------------------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/crypto/dh.c b/crypto/dh.c index 0fcad279e6fe..68d11d66c0b5 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -106,6 +106,12 @@ err_clear_ctx: */ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y) { + MPI val, q; + int ret; + + if (!fips_enabled) + return 0; + if (unlikely(!ctx->p)) return -EINVAL; @@ -125,41 +131,36 @@ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y) * * For the safe-prime groups q = (p - 1)/2. */ - if (fips_enabled) { - MPI val, q; - int ret; - - val = mpi_alloc(0); - if (!val) - return -ENOMEM; - - q = mpi_alloc(mpi_get_nlimbs(ctx->p)); - if (!q) { - mpi_free(val); - return -ENOMEM; - } - - /* - * ->p is odd, so no need to explicitly subtract one - * from it before shifting to the right. - */ - mpi_rshift(q, ctx->p, 1); - - ret = mpi_powm(val, y, q, ctx->p); - mpi_free(q); - if (ret) { - mpi_free(val); - return ret; - } - - ret = mpi_cmp_ui(val, 1); + val = mpi_alloc(0); + if (!val) + return -ENOMEM; + q = mpi_alloc(mpi_get_nlimbs(ctx->p)); + if (!q) { mpi_free(val); - - if (ret != 0) - return -EINVAL; + return -ENOMEM; } + /* + * ->p is odd, so no need to explicitly subtract one + * from it before shifting to the right. + */ + mpi_rshift(q, ctx->p, 1); + + ret = mpi_powm(val, y, q, ctx->p); + mpi_free(q); + if (ret) { + mpi_free(val); + return ret; + } + + ret = mpi_cmp_ui(val, 1); + + mpi_free(val); + + if (ret != 0) + return -EINVAL; + return 0; } From 2beb81fbf0c01a62515a1bcef326168494ee2bd0 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 23 Feb 2024 01:03:34 -0800 Subject: [PATCH 66/79] crypto: remove CONFIG_CRYPTO_STATS Remove support for the "Crypto usage statistics" feature (CONFIG_CRYPTO_STATS). This feature does not appear to have ever been used, and it is harmful because it significantly reduces performance and is a large maintenance burden. Covering each of these points in detail: 1. Feature is not being used Since these generic crypto statistics are only readable using netlink, it's fairly straightforward to look for programs that use them. I'm unable to find any evidence that any such programs exist. For example, Debian Code Search returns no hits except the kernel header and kernel code itself and translations of the kernel header: https://codesearch.debian.net/search?q=CRYPTOCFGA_STAT&literal=1&perpkg=1 The patch series that added this feature in 2018 (https://lore.kernel.org/linux-crypto/1537351855-16618-1-git-send-email-clabbe@baylibre.com/) said "The goal is to have an ifconfig for crypto device." This doesn't appear to have happened. It's not clear that there is real demand for crypto statistics. Just because the kernel provides other types of statistics such as I/O and networking statistics and some people find those useful does not mean that crypto statistics are useful too. Further evidence that programs are not using CONFIG_CRYPTO_STATS is that it was able to be disabled in RHEL and Fedora as a bug fix (https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/2947). Even further evidence comes from the fact that there are and have been bugs in how the stats work, but they were never reported. For example, before Linux v6.7 hash stats were double-counted in most cases. There has also never been any documentation for this feature, so it might be hard to use even if someone wanted to. 2. CONFIG_CRYPTO_STATS significantly reduces performance Enabling CONFIG_CRYPTO_STATS significantly reduces the performance of the crypto API, even if no program ever retrieves the statistics. This primarily affects systems with large number of CPUs. For example, https://bugs.launchpad.net/ubuntu/+source/linux/+bug/2039576 reported that Lustre client encryption performance improved from 21.7GB/s to 48.2GB/s by disabling CONFIG_CRYPTO_STATS. It can be argued that this means that CONFIG_CRYPTO_STATS should be optimized with per-cpu counters similar to many of the networking counters. But no one has done this in 5+ years. This is consistent with the fact that the feature appears to be unused, so there seems to be little interest in improving it as opposed to just disabling it. It can be argued that because CONFIG_CRYPTO_STATS is off by default, performance doesn't matter. But Linux distros tend to error on the side of enabling options. The option is enabled in Ubuntu and Arch Linux, and until recently was enabled in RHEL and Fedora (see above). So, even just having the option available is harmful to users. 3. CONFIG_CRYPTO_STATS is a large maintenance burden There are over 1000 lines of code associated with CONFIG_CRYPTO_STATS, spread among 32 files. It significantly complicates much of the implementation of the crypto API. After the initial submission, many fixes and refactorings have consumed effort of multiple people to keep this feature "working". We should be spending this effort elsewhere. Cc: Corentin Labbe Signed-off-by: Eric Biggers Acked-by: Ard Biesheuvel Acked-by: Corentin Labbe Signed-off-by: Herbert Xu --- arch/s390/configs/debug_defconfig | 1 - arch/s390/configs/defconfig | 1 - crypto/Kconfig | 20 --- crypto/Makefile | 2 - crypto/acompress.c | 47 +---- crypto/aead.c | 84 +-------- crypto/ahash.c | 63 +------ crypto/akcipher.c | 31 ---- crypto/compress.h | 5 - crypto/{crypto_user_base.c => crypto_user.c} | 10 +- crypto/crypto_user_stat.c | 176 ------------------- crypto/hash.h | 30 ---- crypto/kpp.c | 30 ---- crypto/lskcipher.c | 73 +------- crypto/rng.c | 44 +---- crypto/scompress.c | 8 +- crypto/shash.c | 75 +------- crypto/sig.c | 13 -- crypto/skcipher.c | 86 +-------- crypto/skcipher.h | 10 -- include/crypto/acompress.h | 90 +--------- include/crypto/aead.h | 21 --- include/crypto/akcipher.h | 78 +------- include/crypto/algapi.h | 3 - include/crypto/hash.h | 22 --- include/crypto/internal/acompress.h | 7 +- include/crypto/internal/cryptouser.h | 16 -- include/crypto/internal/scompress.h | 8 +- include/crypto/kpp.h | 58 +----- include/crypto/rng.h | 51 +----- include/crypto/skcipher.h | 25 --- include/uapi/linux/cryptouser.h | 28 +-- 32 files changed, 77 insertions(+), 1139 deletions(-) rename crypto/{crypto_user_base.c => crypto_user.c} (98%) delete mode 100644 crypto/crypto_user_stat.c delete mode 100644 include/crypto/internal/cryptouser.h diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index cae2dd34fbb4..063f0c11087d 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -766,7 +766,6 @@ CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m -CONFIG_CRYPTO_STATS=y CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA1_S390=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 42b988873e54..ab608ce768b7 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -752,7 +752,6 @@ CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m -CONFIG_CRYPTO_STATS=y CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA1_S390=m diff --git a/crypto/Kconfig b/crypto/Kconfig index 44661c2e30ca..f937142aa94d 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1456,26 +1456,6 @@ config CRYPTO_USER_API_ENABLE_OBSOLETE already been phased out from internal use by the kernel, and are only useful for userspace clients that still rely on them. -config CRYPTO_STATS - bool "Crypto usage statistics" - depends on CRYPTO_USER - help - Enable the gathering of crypto stats. - - Enabling this option reduces the performance of the crypto API. It - should only be enabled when there is actually a use case for it. - - This collects data sizes, numbers of requests, and numbers - of errors processed by: - - AEAD ciphers (encrypt, decrypt) - - asymmetric key ciphers (encrypt, decrypt, verify, sign) - - symmetric key ciphers (encrypt, decrypt) - - compression algorithms (compress, decompress) - - hash algorithms (hash) - - key-agreement protocol primitives (setsecret, generate - public key, compute shared secret) - - RNG (generate, seed) - endmenu config CRYPTO_HASH_INFO diff --git a/crypto/Makefile b/crypto/Makefile index 408f0a1f9ab9..de9a3312a2c8 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -69,8 +69,6 @@ cryptomgr-y := algboss.o testmgr.o obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o obj-$(CONFIG_CRYPTO_USER) += crypto_user.o -crypto_user-y := crypto_user_base.o -crypto_user-$(CONFIG_CRYPTO_STATS) += crypto_user_stat.o obj-$(CONFIG_CRYPTO_CMAC) += cmac.o obj-$(CONFIG_CRYPTO_HMAC) += hmac.o obj-$(CONFIG_CRYPTO_VMAC) += vmac.o diff --git a/crypto/acompress.c b/crypto/acompress.c index 1c682810a484..484a865b23cd 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -25,7 +25,7 @@ static const struct crypto_type crypto_acomp_type; static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) { - return container_of(alg, struct acomp_alg, calg.base); + return container_of(alg, struct acomp_alg, base); } static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm) @@ -93,32 +93,6 @@ static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) return extsize; } -static inline int __crypto_acomp_report_stat(struct sk_buff *skb, - struct crypto_alg *alg) -{ - struct comp_alg_common *calg = __crypto_comp_alg_common(alg); - struct crypto_istat_compress *istat = comp_get_stat(calg); - struct crypto_stat_compress racomp; - - memset(&racomp, 0, sizeof(racomp)); - - strscpy(racomp.type, "acomp", sizeof(racomp.type)); - racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt); - racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen); - racomp.stat_decompress_cnt = atomic64_read(&istat->decompress_cnt); - racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen); - racomp.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp); -} - -#ifdef CONFIG_CRYPTO_STATS -int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg) -{ - return __crypto_acomp_report_stat(skb, alg); -} -#endif - static const struct crypto_type crypto_acomp_type = { .extsize = crypto_acomp_extsize, .init_tfm = crypto_acomp_init_tfm, @@ -127,9 +101,6 @@ static const struct crypto_type crypto_acomp_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_acomp_report, -#endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_acomp_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK, @@ -182,24 +153,12 @@ void acomp_request_free(struct acomp_req *req) } EXPORT_SYMBOL_GPL(acomp_request_free); -void comp_prepare_alg(struct comp_alg_common *alg) -{ - struct crypto_istat_compress *istat = comp_get_stat(alg); - struct crypto_alg *base = &alg->base; - - base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); -} - int crypto_register_acomp(struct acomp_alg *alg) { - struct crypto_alg *base = &alg->calg.base; - - comp_prepare_alg(&alg->calg); + struct crypto_alg *base = &alg->base; base->cra_type = &crypto_acomp_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS; return crypto_register_alg(base); diff --git a/crypto/aead.c b/crypto/aead.c index 54906633566a..0e75a69189df 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -20,15 +20,6 @@ #include "internal.h" -static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { @@ -90,62 +81,28 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) } EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); -static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&istat->err_cnt); - - return err; -} - int crypto_aead_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct aead_alg *alg = crypto_aead_alg(aead); - struct crypto_istat_aead *istat; - int ret; - - istat = aead_get_stat(alg); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - atomic64_inc(&istat->encrypt_cnt); - atomic64_add(req->cryptlen, &istat->encrypt_tlen); - } if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else - ret = alg->encrypt(req); + return -ENOKEY; - return crypto_aead_errstat(istat, ret); + return crypto_aead_alg(aead)->encrypt(req); } EXPORT_SYMBOL_GPL(crypto_aead_encrypt); int crypto_aead_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct aead_alg *alg = crypto_aead_alg(aead); - struct crypto_istat_aead *istat; - int ret; - - istat = aead_get_stat(alg); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - atomic64_inc(&istat->encrypt_cnt); - atomic64_add(req->cryptlen, &istat->encrypt_tlen); - } if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else if (req->cryptlen < crypto_aead_authsize(aead)) - ret = -EINVAL; - else - ret = alg->decrypt(req); + return -ENOKEY; - return crypto_aead_errstat(istat, ret); + if (req->cryptlen < crypto_aead_authsize(aead)) + return -EINVAL; + + return crypto_aead_alg(aead)->decrypt(req); } EXPORT_SYMBOL_GPL(crypto_aead_decrypt); @@ -215,26 +172,6 @@ static void crypto_aead_free_instance(struct crypto_instance *inst) aead->free(aead); } -static int __maybe_unused crypto_aead_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - struct aead_alg *aead = container_of(alg, struct aead_alg, base); - struct crypto_istat_aead *istat = aead_get_stat(aead); - struct crypto_stat_aead raead; - - memset(&raead, 0, sizeof(raead)); - - strscpy(raead.type, "aead", sizeof(raead.type)); - - raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); - raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); - raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); - raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); - raead.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead); -} - static const struct crypto_type crypto_aead_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_aead_init_tfm, @@ -244,9 +181,6 @@ static const struct crypto_type crypto_aead_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_aead_report, -#endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_aead_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, @@ -277,7 +211,6 @@ EXPORT_SYMBOL_GPL(crypto_has_aead); static int aead_prepare_alg(struct aead_alg *alg) { - struct crypto_istat_aead *istat = aead_get_stat(alg); struct crypto_alg *base = &alg->base; if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) > @@ -291,9 +224,6 @@ static int aead_prepare_alg(struct aead_alg *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AEAD; - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); - return 0; } diff --git a/crypto/ahash.c b/crypto/ahash.c index 0ac83f7f701d..bcd9de009a91 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -27,22 +27,6 @@ #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e -static inline struct crypto_istat_hash *ahash_get_stat(struct ahash_alg *alg) -{ - return hash_get_stat(&alg->halg); -} - -static inline int crypto_ahash_errstat(struct ahash_alg *alg, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&ahash_get_stat(alg)->err_cnt); - - return err; -} - /* * For an ahash tfm that is using an shash algorithm (instead of an ahash * algorithm), this returns the underlying shash tfm. @@ -344,75 +328,47 @@ static void ahash_restore_req(struct ahash_request *req, int err) int crypto_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_alg *alg; if (likely(tfm->using_shash)) return shash_ahash_update(req, ahash_request_ctx(req)); - alg = crypto_ahash_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_add(req->nbytes, &ahash_get_stat(alg)->hash_tlen); - return crypto_ahash_errstat(alg, alg->update(req)); + return crypto_ahash_alg(tfm)->update(req); } EXPORT_SYMBOL_GPL(crypto_ahash_update); int crypto_ahash_final(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_alg *alg; if (likely(tfm->using_shash)) return crypto_shash_final(ahash_request_ctx(req), req->result); - alg = crypto_ahash_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&ahash_get_stat(alg)->hash_cnt); - return crypto_ahash_errstat(alg, alg->final(req)); + return crypto_ahash_alg(tfm)->final(req); } EXPORT_SYMBOL_GPL(crypto_ahash_final); int crypto_ahash_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_alg *alg; if (likely(tfm->using_shash)) return shash_ahash_finup(req, ahash_request_ctx(req)); - alg = crypto_ahash_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = ahash_get_stat(alg); - - atomic64_inc(&istat->hash_cnt); - atomic64_add(req->nbytes, &istat->hash_tlen); - } - return crypto_ahash_errstat(alg, alg->finup(req)); + return crypto_ahash_alg(tfm)->finup(req); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_alg *alg; - int err; if (likely(tfm->using_shash)) return shash_ahash_digest(req, prepare_shash_desc(req, tfm)); - alg = crypto_ahash_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = ahash_get_stat(alg); - - atomic64_inc(&istat->hash_cnt); - atomic64_add(req->nbytes, &istat->hash_tlen); - } - if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - err = -ENOKEY; - else - err = alg->digest(req); + return -ENOKEY; - return crypto_ahash_errstat(alg, err); + return crypto_ahash_alg(tfm)->digest(req); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); @@ -571,12 +527,6 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) __crypto_hash_alg_common(alg)->digestsize); } -static int __maybe_unused crypto_ahash_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - return crypto_hash_report_stat(skb, alg, "ahash"); -} - static const struct crypto_type crypto_ahash_type = { .extsize = crypto_ahash_extsize, .init_tfm = crypto_ahash_init_tfm, @@ -586,9 +536,6 @@ static const struct crypto_type crypto_ahash_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_ahash_report, -#endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_ahash_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, diff --git a/crypto/akcipher.c b/crypto/akcipher.c index 52813f0b19e4..e0ff5f4dda6d 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -70,30 +70,6 @@ static void crypto_akcipher_free_instance(struct crypto_instance *inst) akcipher->free(akcipher); } -static int __maybe_unused crypto_akcipher_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg); - struct crypto_istat_akcipher *istat; - struct crypto_stat_akcipher rakcipher; - - istat = akcipher_get_stat(akcipher); - - memset(&rakcipher, 0, sizeof(rakcipher)); - - strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); - rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); - rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); - rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); - rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); - rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt); - rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt); - rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, - sizeof(rakcipher), &rakcipher); -} - static const struct crypto_type crypto_akcipher_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_akcipher_init_tfm, @@ -103,9 +79,6 @@ static const struct crypto_type crypto_akcipher_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_akcipher_report, -#endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_akcipher_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, @@ -131,15 +104,11 @@ EXPORT_SYMBOL_GPL(crypto_alloc_akcipher); static void akcipher_prepare_alg(struct akcipher_alg *alg) { - struct crypto_istat_akcipher *istat = akcipher_get_stat(alg); struct crypto_alg *base = &alg->base; base->cra_type = &crypto_akcipher_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); } static int akcipher_default_op(struct akcipher_request *req) diff --git a/crypto/compress.h b/crypto/compress.h index 19f65516d699..23ea43810810 100644 --- a/crypto/compress.h +++ b/crypto/compress.h @@ -12,15 +12,10 @@ #include "internal.h" struct acomp_req; -struct comp_alg_common; struct sk_buff; int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req); void crypto_acomp_scomp_free_ctx(struct acomp_req *req); -int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg); - -void comp_prepare_alg(struct comp_alg_common *alg); - #endif /* _LOCAL_CRYPTO_COMPRESS_H */ diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user.c similarity index 98% rename from crypto/crypto_user_base.c rename to crypto/crypto_user.c index 3fa20f12989f..6c571834e86a 100644 --- a/crypto/crypto_user_base.c +++ b/crypto/crypto_user.c @@ -18,7 +18,6 @@ #include #include #include -#include #include "internal.h" @@ -33,7 +32,7 @@ struct crypto_dump_info { u16 nlmsg_flags; }; -struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) +static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) { struct crypto_alg *q, *alg = NULL; @@ -387,6 +386,13 @@ static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh, return crypto_del_default_rng(); } +static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, + struct nlattr **attrs) +{ + /* No longer supported */ + return -ENOTSUPP; +} + #define MSGSIZE(type) sizeof(struct type) static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = { diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c deleted file mode 100644 index d4f3d39b5137..000000000000 --- a/crypto/crypto_user_stat.c +++ /dev/null @@ -1,176 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Crypto user configuration API. - * - * Copyright (C) 2017-2018 Corentin Labbe - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x)) - -struct crypto_dump_info { - struct sk_buff *in_skb; - struct sk_buff *out_skb; - u32 nlmsg_seq; - u16 nlmsg_flags; -}; - -static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_stat_cipher rcipher; - - memset(&rcipher, 0, sizeof(rcipher)); - - strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); - - return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); -} - -static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_stat_compress rcomp; - - memset(&rcomp, 0, sizeof(rcomp)); - - strscpy(rcomp.type, "compression", sizeof(rcomp.type)); - - return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp); -} - -static int crypto_reportstat_one(struct crypto_alg *alg, - struct crypto_user_alg *ualg, - struct sk_buff *skb) -{ - memset(ualg, 0, sizeof(*ualg)); - - strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); - strscpy(ualg->cru_driver_name, alg->cra_driver_name, - sizeof(ualg->cru_driver_name)); - strscpy(ualg->cru_module_name, module_name(alg->cra_module), - sizeof(ualg->cru_module_name)); - - ualg->cru_type = 0; - ualg->cru_mask = 0; - ualg->cru_flags = alg->cra_flags; - ualg->cru_refcnt = refcount_read(&alg->cra_refcnt); - - if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) - goto nla_put_failure; - if (alg->cra_flags & CRYPTO_ALG_LARVAL) { - struct crypto_stat_larval rl; - - memset(&rl, 0, sizeof(rl)); - strscpy(rl.type, "larval", sizeof(rl.type)); - if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl)) - goto nla_put_failure; - goto out; - } - - if (alg->cra_type && alg->cra_type->report_stat) { - if (alg->cra_type->report_stat(skb, alg)) - goto nla_put_failure; - goto out; - } - - switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { - case CRYPTO_ALG_TYPE_CIPHER: - if (crypto_report_cipher(skb, alg)) - goto nla_put_failure; - break; - case CRYPTO_ALG_TYPE_COMPRESS: - if (crypto_report_comp(skb, alg)) - goto nla_put_failure; - break; - default: - pr_err("ERROR: Unhandled alg %d in %s\n", - alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL), - __func__); - } - -out: - return 0; - -nla_put_failure: - return -EMSGSIZE; -} - -static int crypto_reportstat_alg(struct crypto_alg *alg, - struct crypto_dump_info *info) -{ - struct sk_buff *in_skb = info->in_skb; - struct sk_buff *skb = info->out_skb; - struct nlmsghdr *nlh; - struct crypto_user_alg *ualg; - int err = 0; - - nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq, - CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags); - if (!nlh) { - err = -EMSGSIZE; - goto out; - } - - ualg = nlmsg_data(nlh); - - err = crypto_reportstat_one(alg, ualg, skb); - if (err) { - nlmsg_cancel(skb, nlh); - goto out; - } - - nlmsg_end(skb, nlh); - -out: - return err; -} - -int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, - struct nlattr **attrs) -{ - struct net *net = sock_net(in_skb->sk); - struct crypto_user_alg *p = nlmsg_data(in_nlh); - struct crypto_alg *alg; - struct sk_buff *skb; - struct crypto_dump_info info; - int err; - - if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) - return -EINVAL; - - alg = crypto_alg_match(p, 0); - if (!alg) - return -ENOENT; - - err = -ENOMEM; - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); - if (!skb) - goto drop_alg; - - info.in_skb = in_skb; - info.out_skb = skb; - info.nlmsg_seq = in_nlh->nlmsg_seq; - info.nlmsg_flags = 0; - - err = crypto_reportstat_alg(alg, &info); - -drop_alg: - crypto_mod_put(alg); - - if (err) { - kfree_skb(skb); - return err; - } - - return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid); -} - -MODULE_LICENSE("GPL"); diff --git a/crypto/hash.h b/crypto/hash.h index 93f6ba0df263..cf9aee07f77d 100644 --- a/crypto/hash.h +++ b/crypto/hash.h @@ -8,39 +8,9 @@ #define _LOCAL_CRYPTO_HASH_H #include -#include #include "internal.h" -static inline struct crypto_istat_hash *hash_get_stat( - struct hash_alg_common *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_hash_report_stat(struct sk_buff *skb, - struct crypto_alg *alg, - const char *type) -{ - struct hash_alg_common *halg = __crypto_hash_alg_common(alg); - struct crypto_istat_hash *istat = hash_get_stat(halg); - struct crypto_stat_hash rhash; - - memset(&rhash, 0, sizeof(rhash)); - - strscpy(rhash.type, type, sizeof(rhash.type)); - - rhash.stat_hash_cnt = atomic64_read(&istat->hash_cnt); - rhash.stat_hash_tlen = atomic64_read(&istat->hash_tlen); - rhash.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); -} - extern const struct crypto_type crypto_shash_type; int hash_prepare_alg(struct hash_alg_common *alg); diff --git a/crypto/kpp.c b/crypto/kpp.c index 33d44e59387f..ecc63a1a948d 100644 --- a/crypto/kpp.c +++ b/crypto/kpp.c @@ -66,29 +66,6 @@ static void crypto_kpp_free_instance(struct crypto_instance *inst) kpp->free(kpp); } -static int __maybe_unused crypto_kpp_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - struct kpp_alg *kpp = __crypto_kpp_alg(alg); - struct crypto_istat_kpp *istat; - struct crypto_stat_kpp rkpp; - - istat = kpp_get_stat(kpp); - - memset(&rkpp, 0, sizeof(rkpp)); - - strscpy(rkpp.type, "kpp", sizeof(rkpp.type)); - - rkpp.stat_setsecret_cnt = atomic64_read(&istat->setsecret_cnt); - rkpp.stat_generate_public_key_cnt = - atomic64_read(&istat->generate_public_key_cnt); - rkpp.stat_compute_shared_secret_cnt = - atomic64_read(&istat->compute_shared_secret_cnt); - rkpp.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp); -} - static const struct crypto_type crypto_kpp_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_kpp_init_tfm, @@ -98,9 +75,6 @@ static const struct crypto_type crypto_kpp_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_kpp_report, -#endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_kpp_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, @@ -131,15 +105,11 @@ EXPORT_SYMBOL_GPL(crypto_has_kpp); static void kpp_prepare_alg(struct kpp_alg *alg) { - struct crypto_istat_kpp *istat = kpp_get_stat(alg); struct crypto_alg *base = &alg->base; base->cra_type = &crypto_kpp_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_KPP; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); } int crypto_register_kpp(struct kpp_alg *alg) diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c index 0b6dd8aa21f2..0a800292ca4e 100644 --- a/crypto/lskcipher.c +++ b/crypto/lskcipher.c @@ -29,25 +29,6 @@ static inline struct lskcipher_alg *__crypto_lskcipher_alg( return container_of(alg, struct lskcipher_alg, co.base); } -static inline struct crypto_istat_cipher *lskcipher_get_stat( - struct lskcipher_alg *alg) -{ - return skcipher_get_stat_common(&alg->co); -} - -static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err) -{ - struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); - - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err) - atomic64_inc(&istat->err_cnt); - - return err; -} - static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm, const u8 *key, unsigned int keylen) { @@ -147,20 +128,13 @@ static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src, u32 flags)) { unsigned long alignmask = crypto_lskcipher_alignmask(tfm); - struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); - int ret; if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) & - alignmask) { - ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv, - crypt); - goto out; - } + alignmask) + return crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv, + crypt); - ret = crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL); - -out: - return crypto_lskcipher_errstat(alg, ret); + return crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL); } int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, @@ -168,13 +142,6 @@ int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, { struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); - - atomic64_inc(&istat->encrypt_cnt); - atomic64_add(len, &istat->encrypt_tlen); - } - return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt); } EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt); @@ -184,13 +151,6 @@ int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, { struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); - - atomic64_inc(&istat->decrypt_cnt); - atomic64_add(len, &istat->decrypt_tlen); - } - return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt); } EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt); @@ -322,28 +282,6 @@ static int __maybe_unused crypto_lskcipher_report( sizeof(rblkcipher), &rblkcipher); } -static int __maybe_unused crypto_lskcipher_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); - struct crypto_istat_cipher *istat; - struct crypto_stat_cipher rcipher; - - istat = lskcipher_get_stat(skcipher); - - memset(&rcipher, 0, sizeof(rcipher)); - - strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); - - rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); - rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); - rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); - rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); - rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); -} - static const struct crypto_type crypto_lskcipher_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_lskcipher_init_tfm, @@ -353,9 +291,6 @@ static const struct crypto_type crypto_lskcipher_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_lskcipher_report, -#endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_lskcipher_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, diff --git a/crypto/rng.c b/crypto/rng.c index 279dffdebf59..9d8804e46422 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -30,30 +30,24 @@ static int crypto_default_rng_refcnt; int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { - struct rng_alg *alg = crypto_rng_alg(tfm); u8 *buf = NULL; int err; - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&rng_get_stat(alg)->seed_cnt); - if (!seed && slen) { buf = kmalloc(slen, GFP_KERNEL); - err = -ENOMEM; if (!buf) - goto out; + return -ENOMEM; err = get_random_bytes_wait(buf, slen); if (err) - goto free_buf; + goto out; seed = buf; } - err = alg->seed(tfm, seed, slen); -free_buf: - kfree_sensitive(buf); + err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); out: - return crypto_rng_errstat(alg, err); + kfree_sensitive(buf); + return err; } EXPORT_SYMBOL_GPL(crypto_rng_reset); @@ -91,27 +85,6 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "seedsize : %u\n", seedsize(alg)); } -static int __maybe_unused crypto_rng_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - struct rng_alg *rng = __crypto_rng_alg(alg); - struct crypto_istat_rng *istat; - struct crypto_stat_rng rrng; - - istat = rng_get_stat(rng); - - memset(&rrng, 0, sizeof(rrng)); - - strscpy(rrng.type, "rng", sizeof(rrng.type)); - - rrng.stat_generate_cnt = atomic64_read(&istat->generate_cnt); - rrng.stat_generate_tlen = atomic64_read(&istat->generate_tlen); - rrng.stat_seed_cnt = atomic64_read(&istat->seed_cnt); - rrng.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng); -} - static const struct crypto_type crypto_rng_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_rng_init_tfm, @@ -120,9 +93,6 @@ static const struct crypto_type crypto_rng_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_rng_report, -#endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_rng_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, @@ -199,7 +169,6 @@ EXPORT_SYMBOL_GPL(crypto_del_default_rng); int crypto_register_rng(struct rng_alg *alg) { - struct crypto_istat_rng *istat = rng_get_stat(alg); struct crypto_alg *base = &alg->base; if (alg->seedsize > PAGE_SIZE / 8) @@ -209,9 +178,6 @@ int crypto_register_rng(struct rng_alg *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_RNG; - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); - return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_rng); diff --git a/crypto/scompress.c b/crypto/scompress.c index b108a30a7600..9cda4ef84a9b 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -248,9 +248,6 @@ static const struct crypto_type crypto_scomp_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_scomp_report, -#endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_acomp_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, @@ -260,11 +257,10 @@ static const struct crypto_type crypto_scomp_type = { int crypto_register_scomp(struct scomp_alg *alg) { - struct crypto_alg *base = &alg->calg.base; - - comp_prepare_alg(&alg->calg); + struct crypto_alg *base = &alg->base; base->cra_type = &crypto_scomp_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; return crypto_register_alg(base); diff --git a/crypto/shash.c b/crypto/shash.c index c3f7f6a25280..0ffe671b519e 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -16,18 +16,6 @@ #include "hash.h" -static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg) -{ - return hash_get_stat(&alg->halg); -} - -static inline int crypto_shash_errstat(struct shash_alg *alg, int err) -{ - if (IS_ENABLED(CONFIG_CRYPTO_STATS) && err) - atomic64_inc(&shash_get_stat(alg)->err_cnt); - return err; -} - int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { @@ -61,29 +49,13 @@ EXPORT_SYMBOL_GPL(crypto_shash_setkey); int crypto_shash_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct shash_alg *shash = crypto_shash_alg(desc->tfm); - int err; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_add(len, &shash_get_stat(shash)->hash_tlen); - - err = shash->update(desc, data, len); - - return crypto_shash_errstat(shash, err); + return crypto_shash_alg(desc->tfm)->update(desc, data, len); } EXPORT_SYMBOL_GPL(crypto_shash_update); int crypto_shash_final(struct shash_desc *desc, u8 *out) { - struct shash_alg *shash = crypto_shash_alg(desc->tfm); - int err; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&shash_get_stat(shash)->hash_cnt); - - err = shash->final(desc, out); - - return crypto_shash_errstat(shash, err); + return crypto_shash_alg(desc->tfm)->final(desc, out); } EXPORT_SYMBOL_GPL(crypto_shash_final); @@ -99,20 +71,7 @@ static int shash_default_finup(struct shash_desc *desc, const u8 *data, int crypto_shash_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - int err; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = shash_get_stat(shash); - - atomic64_inc(&istat->hash_cnt); - atomic64_add(len, &istat->hash_tlen); - } - - err = shash->finup(desc, data, len, out); - - return crypto_shash_errstat(shash, err); + return crypto_shash_alg(desc->tfm)->finup(desc, data, len, out); } EXPORT_SYMBOL_GPL(crypto_shash_finup); @@ -129,22 +88,11 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - int err; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = shash_get_stat(shash); - - atomic64_inc(&istat->hash_cnt); - atomic64_add(len, &istat->hash_tlen); - } if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - err = -ENOKEY; - else - err = shash->digest(desc, data, len, out); + return -ENOKEY; - return crypto_shash_errstat(shash, err); + return crypto_shash_alg(desc->tfm)->digest(desc, data, len, out); } EXPORT_SYMBOL_GPL(crypto_shash_digest); @@ -265,12 +213,6 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "digestsize : %u\n", salg->digestsize); } -static int __maybe_unused crypto_shash_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - return crypto_hash_report_stat(skb, alg, "shash"); -} - const struct crypto_type crypto_shash_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_shash_init_tfm, @@ -280,9 +222,6 @@ const struct crypto_type crypto_shash_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_shash_report, -#endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_shash_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, @@ -350,7 +289,6 @@ EXPORT_SYMBOL_GPL(crypto_clone_shash); int hash_prepare_alg(struct hash_alg_common *alg) { - struct crypto_istat_hash *istat = hash_get_stat(alg); struct crypto_alg *base = &alg->base; if (alg->digestsize > HASH_MAX_DIGESTSIZE) @@ -362,9 +300,6 @@ int hash_prepare_alg(struct hash_alg_common *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); - return 0; } diff --git a/crypto/sig.c b/crypto/sig.c index 224c47019297..7645bedf3a1f 100644 --- a/crypto/sig.c +++ b/crypto/sig.c @@ -45,16 +45,6 @@ static int __maybe_unused crypto_sig_report(struct sk_buff *skb, return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(rsig), &rsig); } -static int __maybe_unused crypto_sig_report_stat(struct sk_buff *skb, - struct crypto_alg *alg) -{ - struct crypto_stat_akcipher rsig = {}; - - strscpy(rsig.type, "sig", sizeof(rsig.type)); - - return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rsig), &rsig); -} - static const struct crypto_type crypto_sig_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_sig_init_tfm, @@ -63,9 +53,6 @@ static const struct crypto_type crypto_sig_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_sig_report, -#endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_sig_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_SIG_MASK, diff --git a/crypto/skcipher.c b/crypto/skcipher.c index bc70e159d27d..ceed7f33a67b 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -89,25 +89,6 @@ static inline struct skcipher_alg *__crypto_skcipher_alg( return container_of(alg, struct skcipher_alg, base); } -static inline struct crypto_istat_cipher *skcipher_get_stat( - struct skcipher_alg *alg) -{ - return skcipher_get_stat_common(&alg->co); -} - -static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err) -{ - struct crypto_istat_cipher *istat = skcipher_get_stat(alg); - - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&istat->err_cnt); - - return err; -} - static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) { u8 *addr; @@ -654,23 +635,12 @@ int crypto_skcipher_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - int ret; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_cipher *istat = skcipher_get_stat(alg); - - atomic64_inc(&istat->encrypt_cnt); - atomic64_add(req->cryptlen, &istat->encrypt_tlen); - } if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else if (alg->co.base.cra_type != &crypto_skcipher_type) - ret = crypto_lskcipher_encrypt_sg(req); - else - ret = alg->encrypt(req); - - return crypto_skcipher_errstat(alg, ret); + return -ENOKEY; + if (alg->co.base.cra_type != &crypto_skcipher_type) + return crypto_lskcipher_encrypt_sg(req); + return alg->encrypt(req); } EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); @@ -678,23 +648,12 @@ int crypto_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - int ret; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_cipher *istat = skcipher_get_stat(alg); - - atomic64_inc(&istat->decrypt_cnt); - atomic64_add(req->cryptlen, &istat->decrypt_tlen); - } if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else if (alg->co.base.cra_type != &crypto_skcipher_type) - ret = crypto_lskcipher_decrypt_sg(req); - else - ret = alg->decrypt(req); - - return crypto_skcipher_errstat(alg, ret); + return -ENOKEY; + if (alg->co.base.cra_type != &crypto_skcipher_type) + return crypto_lskcipher_decrypt_sg(req); + return alg->decrypt(req); } EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); @@ -846,28 +805,6 @@ static int __maybe_unused crypto_skcipher_report( sizeof(rblkcipher), &rblkcipher); } -static int __maybe_unused crypto_skcipher_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); - struct crypto_istat_cipher *istat; - struct crypto_stat_cipher rcipher; - - istat = skcipher_get_stat(skcipher); - - memset(&rcipher, 0, sizeof(rcipher)); - - strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); - - rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); - rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); - rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); - rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); - rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); -} - static const struct crypto_type crypto_skcipher_type = { .extsize = crypto_skcipher_extsize, .init_tfm = crypto_skcipher_init_tfm, @@ -877,9 +814,6 @@ static const struct crypto_type crypto_skcipher_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_skcipher_report, -#endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_skcipher_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK, @@ -935,7 +869,6 @@ EXPORT_SYMBOL_GPL(crypto_has_skcipher); int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) { - struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg); struct crypto_alg *base = &alg->base; if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || @@ -948,9 +881,6 @@ int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); - return 0; } diff --git a/crypto/skcipher.h b/crypto/skcipher.h index 16c9484360da..703651367dd8 100644 --- a/crypto/skcipher.h +++ b/crypto/skcipher.h @@ -10,16 +10,6 @@ #include #include "internal.h" -static inline struct crypto_istat_cipher *skcipher_get_stat_common( - struct skcipher_alg_common *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - int crypto_lskcipher_encrypt_sg(struct skcipher_request *req); int crypto_lskcipher_decrypt_sg(struct skcipher_request *req); int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm); diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 574cffc90730..d042c90e0907 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -56,35 +56,6 @@ struct crypto_acomp { struct crypto_tfm base; }; -/* - * struct crypto_istat_compress - statistics for compress algorithm - * @compress_cnt: number of compress requests - * @compress_tlen: total data size handled by compress requests - * @decompress_cnt: number of decompress requests - * @decompress_tlen: total data size handled by decompress requests - * @err_cnt: number of error for compress requests - */ -struct crypto_istat_compress { - atomic64_t compress_cnt; - atomic64_t compress_tlen; - atomic64_t decompress_cnt; - atomic64_t decompress_tlen; - atomic64_t err_cnt; -}; - -#ifdef CONFIG_CRYPTO_STATS -#define COMP_ALG_COMMON_STATS struct crypto_istat_compress stat; -#else -#define COMP_ALG_COMMON_STATS -#endif - -#define COMP_ALG_COMMON { \ - COMP_ALG_COMMON_STATS \ - \ - struct crypto_alg base; \ -} -struct comp_alg_common COMP_ALG_COMMON; - /** * DOC: Asynchronous Compression API * @@ -132,23 +103,11 @@ static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) return &tfm->base; } -static inline struct comp_alg_common *__crypto_comp_alg_common( - struct crypto_alg *alg) -{ - return container_of(alg, struct comp_alg_common, base); -} - static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm) { return container_of(tfm, struct crypto_acomp, base); } -static inline struct comp_alg_common *crypto_comp_alg_common( - struct crypto_acomp *tfm) -{ - return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg); -} - static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) { return tfm->reqsize; @@ -255,27 +214,6 @@ static inline void acomp_request_set_params(struct acomp_req *req, req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; } -static inline struct crypto_istat_compress *comp_get_stat( - struct comp_alg_common *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&comp_get_stat(alg)->err_cnt); - - return err; -} - /** * crypto_acomp_compress() -- Invoke asynchronous compress operation * @@ -287,19 +225,7 @@ static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err) */ static inline int crypto_acomp_compress(struct acomp_req *req) { - struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - struct comp_alg_common *alg; - - alg = crypto_comp_alg_common(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_compress *istat = comp_get_stat(alg); - - atomic64_inc(&istat->compress_cnt); - atomic64_add(req->slen, &istat->compress_tlen); - } - - return crypto_comp_errstat(alg, tfm->compress(req)); + return crypto_acomp_reqtfm(req)->compress(req); } /** @@ -313,19 +239,7 @@ static inline int crypto_acomp_compress(struct acomp_req *req) */ static inline int crypto_acomp_decompress(struct acomp_req *req) { - struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - struct comp_alg_common *alg; - - alg = crypto_comp_alg_common(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_compress *istat = comp_get_stat(alg); - - atomic64_inc(&istat->decompress_cnt); - atomic64_add(req->slen, &istat->decompress_tlen); - } - - return crypto_comp_errstat(alg, tfm->decompress(req)); + return crypto_acomp_reqtfm(req)->decompress(req); } #endif diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 51382befbe37..0e8a41638678 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -101,22 +101,6 @@ struct aead_request { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; -/* - * struct crypto_istat_aead - statistics for AEAD algorithm - * @encrypt_cnt: number of encrypt requests - * @encrypt_tlen: total data size handled by encrypt requests - * @decrypt_cnt: number of decrypt requests - * @decrypt_tlen: total data size handled by decrypt requests - * @err_cnt: number of error for AEAD requests - */ -struct crypto_istat_aead { - atomic64_t encrypt_cnt; - atomic64_t encrypt_tlen; - atomic64_t decrypt_cnt; - atomic64_t decrypt_tlen; - atomic64_t err_cnt; -}; - /** * struct aead_alg - AEAD cipher definition * @maxauthsize: Set the maximum authentication tag size supported by the @@ -135,7 +119,6 @@ struct crypto_istat_aead { * @setkey: see struct skcipher_alg * @encrypt: see struct skcipher_alg * @decrypt: see struct skcipher_alg - * @stat: statistics for AEAD algorithm * @ivsize: see struct skcipher_alg * @chunksize: see struct skcipher_alg * @init: Initialize the cryptographic transformation object. This function @@ -162,10 +145,6 @@ struct aead_alg { int (*init)(struct crypto_aead *tfm); void (*exit)(struct crypto_aead *tfm); -#ifdef CONFIG_CRYPTO_STATS - struct crypto_istat_aead stat; -#endif - unsigned int ivsize; unsigned int maxauthsize; unsigned int chunksize; diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 31c111bebb68..18a10cad07aa 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -54,26 +54,6 @@ struct crypto_akcipher { struct crypto_tfm base; }; -/* - * struct crypto_istat_akcipher - statistics for akcipher algorithm - * @encrypt_cnt: number of encrypt requests - * @encrypt_tlen: total data size handled by encrypt requests - * @decrypt_cnt: number of decrypt requests - * @decrypt_tlen: total data size handled by decrypt requests - * @verify_cnt: number of verify operation - * @sign_cnt: number of sign requests - * @err_cnt: number of error for akcipher requests - */ -struct crypto_istat_akcipher { - atomic64_t encrypt_cnt; - atomic64_t encrypt_tlen; - atomic64_t decrypt_cnt; - atomic64_t decrypt_tlen; - atomic64_t verify_cnt; - atomic64_t sign_cnt; - atomic64_t err_cnt; -}; - /** * struct akcipher_alg - generic public key algorithm * @@ -110,7 +90,6 @@ struct crypto_istat_akcipher { * @exit: Deinitialize the cryptographic transformation object. This is a * counterpart to @init, used to remove various changes set in * @init. - * @stat: Statistics for akcipher algorithm * * @base: Common crypto API algorithm data structure */ @@ -127,10 +106,6 @@ struct akcipher_alg { int (*init)(struct crypto_akcipher *tfm); void (*exit)(struct crypto_akcipher *tfm); -#ifdef CONFIG_CRYPTO_STATS - struct crypto_istat_akcipher stat; -#endif - struct crypto_alg base; }; @@ -302,27 +277,6 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) return alg->max_size(tfm); } -static inline struct crypto_istat_akcipher *akcipher_get_stat( - struct akcipher_alg *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&akcipher_get_stat(alg)->err_cnt); - - return err; -} - /** * crypto_akcipher_encrypt() - Invoke public key encrypt operation * @@ -336,16 +290,8 @@ static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err) static inline int crypto_akcipher_encrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_akcipher *istat = akcipher_get_stat(alg); - - atomic64_inc(&istat->encrypt_cnt); - atomic64_add(req->src_len, &istat->encrypt_tlen); - } - - return crypto_akcipher_errstat(alg, alg->encrypt(req)); + return crypto_akcipher_alg(tfm)->encrypt(req); } /** @@ -361,16 +307,8 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req) static inline int crypto_akcipher_decrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_akcipher *istat = akcipher_get_stat(alg); - - atomic64_inc(&istat->decrypt_cnt); - atomic64_add(req->src_len, &istat->decrypt_tlen); - } - - return crypto_akcipher_errstat(alg, alg->decrypt(req)); + return crypto_akcipher_alg(tfm)->decrypt(req); } /** @@ -422,12 +360,8 @@ int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm, static inline int crypto_akcipher_sign(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&akcipher_get_stat(alg)->sign_cnt); - - return crypto_akcipher_errstat(alg, alg->sign(req)); + return crypto_akcipher_alg(tfm)->sign(req); } /** @@ -447,12 +381,8 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req) static inline int crypto_akcipher_verify(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&akcipher_get_stat(alg)->verify_cnt); - - return crypto_akcipher_errstat(alg, alg->verify(req)); + return crypto_akcipher_alg(tfm)->verify(req); } /** diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 7a4a71af653f..156de41ca760 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -61,9 +61,6 @@ struct crypto_type { void (*show)(struct seq_file *m, struct crypto_alg *alg); int (*report)(struct sk_buff *skb, struct crypto_alg *alg); void (*free)(struct crypto_instance *inst); -#ifdef CONFIG_CRYPTO_STATS - int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg); -#endif unsigned int type; unsigned int maskclear; diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 5d61f576cfc8..0014bdd81ab7 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -23,27 +23,8 @@ struct crypto_ahash; * crypto_unregister_shash(). */ -/* - * struct crypto_istat_hash - statistics for has algorithm - * @hash_cnt: number of hash requests - * @hash_tlen: total data size hashed - * @err_cnt: number of error for hash requests - */ -struct crypto_istat_hash { - atomic64_t hash_cnt; - atomic64_t hash_tlen; - atomic64_t err_cnt; -}; - -#ifdef CONFIG_CRYPTO_STATS -#define HASH_ALG_COMMON_STAT struct crypto_istat_hash stat; -#else -#define HASH_ALG_COMMON_STAT -#endif - /* * struct hash_alg_common - define properties of message digest - * @stat: Statistics for hash algorithm. * @digestsize: Size of the result of the transformation. A buffer of this size * must be available to the @final and @finup calls, so they can * store the resulting hash into it. For various predefined sizes, @@ -60,8 +41,6 @@ struct crypto_istat_hash { * information. */ #define HASH_ALG_COMMON { \ - HASH_ALG_COMMON_STAT \ - \ unsigned int digestsize; \ unsigned int statesize; \ \ @@ -243,7 +222,6 @@ struct shash_alg { }; }; #undef HASH_ALG_COMMON -#undef HASH_ALG_COMMON_STAT struct crypto_ahash { bool using_shash; /* Underlying algorithm is shash, not ahash */ diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 4ac46bafba9d..475e60a9f9ea 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -31,9 +31,7 @@ * @init. * * @reqsize: Context size for (de)compression requests - * @stat: Statistics for compress algorithm * @base: Common crypto API algorithm data structure - * @calg: Cmonn algorithm data structure shared with scomp */ struct acomp_alg { int (*compress)(struct acomp_req *req); @@ -44,10 +42,7 @@ struct acomp_alg { unsigned int reqsize; - union { - struct COMP_ALG_COMMON; - struct comp_alg_common calg; - }; + struct crypto_alg base; }; /* diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h deleted file mode 100644 index fd54074332f5..000000000000 --- a/include/crypto/internal/cryptouser.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include -#include - -struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); - -#ifdef CONFIG_CRYPTO_STATS -int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); -#else -static inline int crypto_reportstat(struct sk_buff *in_skb, - struct nlmsghdr *in_nlh, - struct nlattr **attrs) -{ - return -ENOTSUPP; -} -#endif diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index 858fe3965ae3..5a75f2db18ce 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -27,9 +27,7 @@ struct crypto_scomp { * @free_ctx: Function frees context allocated with alloc_ctx * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation - * @stat: Statistics for compress algorithm * @base: Common crypto API algorithm data structure - * @calg: Cmonn algorithm data structure shared with acomp */ struct scomp_alg { void *(*alloc_ctx)(struct crypto_scomp *tfm); @@ -40,11 +38,7 @@ struct scomp_alg { int (*decompress)(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx); - - union { - struct COMP_ALG_COMMON; - struct comp_alg_common calg; - }; + struct crypto_alg base; }; static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg) diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index 1988e24a0d1d..2d9c4de57b69 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -51,20 +51,6 @@ struct crypto_kpp { struct crypto_tfm base; }; -/* - * struct crypto_istat_kpp - statistics for KPP algorithm - * @setsecret_cnt: number of setsecrey operation - * @generate_public_key_cnt: number of generate_public_key operation - * @compute_shared_secret_cnt: number of compute_shared_secret operation - * @err_cnt: number of error for KPP requests - */ -struct crypto_istat_kpp { - atomic64_t setsecret_cnt; - atomic64_t generate_public_key_cnt; - atomic64_t compute_shared_secret_cnt; - atomic64_t err_cnt; -}; - /** * struct kpp_alg - generic key-agreement protocol primitives * @@ -87,7 +73,6 @@ struct crypto_istat_kpp { * @exit: Undo everything @init did. * * @base: Common crypto API algorithm data structure - * @stat: Statistics for KPP algorithm */ struct kpp_alg { int (*set_secret)(struct crypto_kpp *tfm, const void *buffer, @@ -100,10 +85,6 @@ struct kpp_alg { int (*init)(struct crypto_kpp *tfm); void (*exit)(struct crypto_kpp *tfm); -#ifdef CONFIG_CRYPTO_STATS - struct crypto_istat_kpp stat; -#endif - struct crypto_alg base; }; @@ -291,26 +272,6 @@ struct kpp_secret { unsigned short len; }; -static inline struct crypto_istat_kpp *kpp_get_stat(struct kpp_alg *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&kpp_get_stat(alg)->err_cnt); - - return err; -} - /** * crypto_kpp_set_secret() - Invoke kpp operation * @@ -329,12 +290,7 @@ static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err) static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, const void *buffer, unsigned int len) { - struct kpp_alg *alg = crypto_kpp_alg(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&kpp_get_stat(alg)->setsecret_cnt); - - return crypto_kpp_errstat(alg, alg->set_secret(tfm, buffer, len)); + return crypto_kpp_alg(tfm)->set_secret(tfm, buffer, len); } /** @@ -353,12 +309,8 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, static inline int crypto_kpp_generate_public_key(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct kpp_alg *alg = crypto_kpp_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&kpp_get_stat(alg)->generate_public_key_cnt); - - return crypto_kpp_errstat(alg, alg->generate_public_key(req)); + return crypto_kpp_alg(tfm)->generate_public_key(req); } /** @@ -374,12 +326,8 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req) static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct kpp_alg *alg = crypto_kpp_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&kpp_get_stat(alg)->compute_shared_secret_cnt); - - return crypto_kpp_errstat(alg, alg->compute_shared_secret(req)); + return crypto_kpp_alg(tfm)->compute_shared_secret(req); } /** diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 6abe5102e5fb..5ac4388f50e1 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -15,20 +15,6 @@ struct crypto_rng; -/* - * struct crypto_istat_rng: statistics for RNG algorithm - * @generate_cnt: number of RNG generate requests - * @generate_tlen: total data size of generated data by the RNG - * @seed_cnt: number of times the RNG was seeded - * @err_cnt: number of error for RNG requests - */ -struct crypto_istat_rng { - atomic64_t generate_cnt; - atomic64_t generate_tlen; - atomic64_t seed_cnt; - atomic64_t err_cnt; -}; - /** * struct rng_alg - random number generator definition * @@ -46,7 +32,6 @@ struct crypto_istat_rng { * size of the seed is defined with @seedsize . * @set_ent: Set entropy that would otherwise be obtained from * entropy source. Internal use only. - * @stat: Statistics for rng algorithm * @seedsize: The seed size required for a random number generator * initialization defined with this variable. Some * random number generators does not require a seed @@ -63,10 +48,6 @@ struct rng_alg { void (*set_ent)(struct crypto_rng *tfm, const u8 *data, unsigned int len); -#ifdef CONFIG_CRYPTO_STATS - struct crypto_istat_rng stat; -#endif - unsigned int seedsize; struct crypto_alg base; @@ -144,26 +125,6 @@ static inline void crypto_free_rng(struct crypto_rng *tfm) crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); } -static inline struct crypto_istat_rng *rng_get_stat(struct rng_alg *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_rng_errstat(struct rng_alg *alg, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&rng_get_stat(alg)->err_cnt); - - return err; -} - /** * crypto_rng_generate() - get random number * @tfm: cipher handle @@ -182,17 +143,7 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { - struct rng_alg *alg = crypto_rng_alg(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_rng *istat = rng_get_stat(alg); - - atomic64_inc(&istat->generate_cnt); - atomic64_add(dlen, &istat->generate_tlen); - } - - return crypto_rng_errstat(alg, - alg->generate(tfm, src, slen, dst, dlen)); + return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); } /** diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index c8857d7bdb37..74d47e23374e 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -64,28 +64,6 @@ struct crypto_lskcipher { struct crypto_tfm base; }; -/* - * struct crypto_istat_cipher - statistics for cipher algorithm - * @encrypt_cnt: number of encrypt requests - * @encrypt_tlen: total data size handled by encrypt requests - * @decrypt_cnt: number of decrypt requests - * @decrypt_tlen: total data size handled by decrypt requests - * @err_cnt: number of error for cipher requests - */ -struct crypto_istat_cipher { - atomic64_t encrypt_cnt; - atomic64_t encrypt_tlen; - atomic64_t decrypt_cnt; - atomic64_t decrypt_tlen; - atomic64_t err_cnt; -}; - -#ifdef CONFIG_CRYPTO_STATS -#define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat; -#else -#define SKCIPHER_ALG_COMMON_STAT -#endif - /* * struct skcipher_alg_common - common properties of skcipher_alg * @min_keysize: Minimum key size supported by the transformation. This is the @@ -103,7 +81,6 @@ struct crypto_istat_cipher { * @chunksize: Equal to the block size except for stream ciphers such as * CTR where it is set to the underlying block size. * @statesize: Size of the internal state for the algorithm. - * @stat: Statistics for cipher algorithm * @base: Definition of a generic crypto algorithm. */ #define SKCIPHER_ALG_COMMON { \ @@ -113,8 +90,6 @@ struct crypto_istat_cipher { unsigned int chunksize; \ unsigned int statesize; \ \ - SKCIPHER_ALG_COMMON_STAT \ - \ struct crypto_alg base; \ } struct skcipher_alg_common SKCIPHER_ALG_COMMON; diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 5730c67f0617..e163670d60f7 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h @@ -54,16 +54,16 @@ enum crypto_attr_type_t { CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */ CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */ - CRYPTOCFGA_STAT_LARVAL, /* struct crypto_stat */ - CRYPTOCFGA_STAT_HASH, /* struct crypto_stat */ - CRYPTOCFGA_STAT_BLKCIPHER, /* struct crypto_stat */ - CRYPTOCFGA_STAT_AEAD, /* struct crypto_stat */ - CRYPTOCFGA_STAT_COMPRESS, /* struct crypto_stat */ - CRYPTOCFGA_STAT_RNG, /* struct crypto_stat */ - CRYPTOCFGA_STAT_CIPHER, /* struct crypto_stat */ - CRYPTOCFGA_STAT_AKCIPHER, /* struct crypto_stat */ - CRYPTOCFGA_STAT_KPP, /* struct crypto_stat */ - CRYPTOCFGA_STAT_ACOMP, /* struct crypto_stat */ + CRYPTOCFGA_STAT_LARVAL, /* No longer supported */ + CRYPTOCFGA_STAT_HASH, /* No longer supported */ + CRYPTOCFGA_STAT_BLKCIPHER, /* No longer supported */ + CRYPTOCFGA_STAT_AEAD, /* No longer supported */ + CRYPTOCFGA_STAT_COMPRESS, /* No longer supported */ + CRYPTOCFGA_STAT_RNG, /* No longer supported */ + CRYPTOCFGA_STAT_CIPHER, /* No longer supported */ + CRYPTOCFGA_STAT_AKCIPHER, /* No longer supported */ + CRYPTOCFGA_STAT_KPP, /* No longer supported */ + CRYPTOCFGA_STAT_ACOMP, /* No longer supported */ __CRYPTOCFGA_MAX #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) @@ -79,6 +79,7 @@ struct crypto_user_alg { __u32 cru_flags; }; +/* No longer supported, do not use. */ struct crypto_stat_aead { char type[CRYPTO_MAX_NAME]; __u64 stat_encrypt_cnt; @@ -88,6 +89,7 @@ struct crypto_stat_aead { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_akcipher { char type[CRYPTO_MAX_NAME]; __u64 stat_encrypt_cnt; @@ -99,6 +101,7 @@ struct crypto_stat_akcipher { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_cipher { char type[CRYPTO_MAX_NAME]; __u64 stat_encrypt_cnt; @@ -108,6 +111,7 @@ struct crypto_stat_cipher { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_compress { char type[CRYPTO_MAX_NAME]; __u64 stat_compress_cnt; @@ -117,6 +121,7 @@ struct crypto_stat_compress { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_hash { char type[CRYPTO_MAX_NAME]; __u64 stat_hash_cnt; @@ -124,6 +129,7 @@ struct crypto_stat_hash { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_kpp { char type[CRYPTO_MAX_NAME]; __u64 stat_setsecret_cnt; @@ -132,6 +138,7 @@ struct crypto_stat_kpp { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_rng { char type[CRYPTO_MAX_NAME]; __u64 stat_generate_cnt; @@ -140,6 +147,7 @@ struct crypto_stat_rng { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_larval { char type[CRYPTO_MAX_NAME]; }; From 7bb9f42d9e588837e7b5683f58d6fa2e92a945ea Mon Sep 17 00:00:00 2001 From: Varshini Rajendran Date: Fri, 23 Feb 2024 22:53:10 +0530 Subject: [PATCH 67/79] dt-bindings: crypto: add sam9x7 in Atmel AES Add DT bindings for atmel AES. Signed-off-by: Varshini Rajendran Acked-by: Rob Herring Reviewed-by: Tudor Ambarus Signed-off-by: Herbert Xu --- .../devicetree/bindings/crypto/atmel,at91sam9g46-aes.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-aes.yaml b/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-aes.yaml index 0b7383b3106b..7dc0748444fd 100644 --- a/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-aes.yaml +++ b/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-aes.yaml @@ -12,7 +12,11 @@ maintainers: properties: compatible: - const: atmel,at91sam9g46-aes + oneOf: + - const: atmel,at91sam9g46-aes + - items: + - const: microchip,sam9x7-aes + - const: atmel,at91sam9g46-aes reg: maxItems: 1 From 4234f365ebd2657296ab135ecacda476905b4425 Mon Sep 17 00:00:00 2001 From: Varshini Rajendran Date: Fri, 23 Feb 2024 22:53:58 +0530 Subject: [PATCH 68/79] dt-bindings: crypto: add sam9x7 in Atmel SHA Add DT bindings for atmel SHA. Signed-off-by: Varshini Rajendran Acked-by: Rob Herring Reviewed-by: Tudor Ambarus Signed-off-by: Herbert Xu --- .../devicetree/bindings/crypto/atmel,at91sam9g46-sha.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-sha.yaml b/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-sha.yaml index ee2ffb034325..d378c53314dd 100644 --- a/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-sha.yaml +++ b/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-sha.yaml @@ -12,7 +12,11 @@ maintainers: properties: compatible: - const: atmel,at91sam9g46-sha + oneOf: + - const: atmel,at91sam9g46-sha + - items: + - const: microchip,sam9x7-sha + - const: atmel,at91sam9g46-sha reg: maxItems: 1 From 1e45f6051f2c83e61d59c6a19cf4b6a54af7093b Mon Sep 17 00:00:00 2001 From: Varshini Rajendran Date: Fri, 23 Feb 2024 22:54:45 +0530 Subject: [PATCH 69/79] dt-bindings: crypto: add sam9x7 in Atmel TDES Add DT bindings for atmel TDES. Signed-off-by: Varshini Rajendran Acked-by: Nicolas Ferre Acked-by: Conor Dooley Reviewed-by: Tudor Ambarus Signed-off-by: Herbert Xu --- .../devicetree/bindings/crypto/atmel,at91sam9g46-tdes.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-tdes.yaml b/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-tdes.yaml index 3d6ed24b1b00..6a441f79efea 100644 --- a/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-tdes.yaml +++ b/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-tdes.yaml @@ -12,7 +12,11 @@ maintainers: properties: compatible: - const: atmel,at91sam9g46-tdes + oneOf: + - const: atmel,at91sam9g46-tdes + - items: + - const: microchip,sam9x7-tdes + - const: atmel,at91sam9g46-tdes reg: maxItems: 1 From 7248e523a0d5a0eb06a710d79fc649e30616b890 Mon Sep 17 00:00:00 2001 From: Varshini Rajendran Date: Fri, 23 Feb 2024 22:55:40 +0530 Subject: [PATCH 70/79] dt-bindings: rng: atmel,at91-trng: add sam9x7 TRNG Add compatbile for Microchip sam9x7 TRNG. Signed-off-by: Varshini Rajendran Reviewed-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml b/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml index 3ce45456d867..b38f8252342e 100644 --- a/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml +++ b/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml @@ -21,6 +21,10 @@ properties: - enum: - microchip,sama7g5-trng - const: atmel,at91sam9g45-trng + - items: + - enum: + - microchip,sam9x7-trng + - const: microchip,sam9x60-trng clocks: maxItems: 1 From 262534ddc88dfea7474ed18adfecf856e4fbe054 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Sun, 25 Feb 2024 14:11:33 -0600 Subject: [PATCH 71/79] crypto: iaa - Fix async_disable descriptor leak The disable_async paths of iaa_compress/decompress() don't free idxd descriptors in the async_disable case. Currently this only happens in the testcases where req->dst is set to null. Add a test to free them in those paths. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 39a5fc905c4d..85ee4c965ccf 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1222,7 +1222,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, *compression_crc = idxd_desc->iax_completion->crc; - if (!ctx->async_mode) + if (!ctx->async_mode || disable_async) idxd_free_desc(wq, idxd_desc); out: return ret; @@ -1468,7 +1468,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, *dlen = req->dlen; - if (!ctx->async_mode) + if (!ctx->async_mode || disable_async) idxd_free_desc(wq, idxd_desc); /* Update stats */ From cdb083e73d632afcc5a931d31bb37445580f4bfb Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Sun, 25 Feb 2024 14:11:34 -0600 Subject: [PATCH 72/79] crypto: iaa - Fix comp/decomp delay statistics The comp/decomp delay statistics currently have no callers; somehow they were dropped during refactoring. There originally were also two sets, one for the async algorithm, the other for the synchronous version. Because the synchronous algorithm was dropped, one set should be removed. To keep it consistent with the rest of the stats, and since there's no ambiguity, remove the acomp/adecomp versions. Also add back the callers. Reported-by: Rex Zhang Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 9 +++++++ drivers/crypto/intel/iaa/iaa_crypto_stats.c | 28 --------------------- drivers/crypto/intel/iaa/iaa_crypto_stats.h | 8 +++--- 3 files changed, 13 insertions(+), 32 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 85ee4c965ccf..b54f93c64033 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1494,6 +1494,7 @@ static int iaa_comp_acompress(struct acomp_req *req) u32 compression_crc; struct idxd_wq *wq; struct device *dev; + u64 start_time_ns; int order = -1; compression_ctx = crypto_tfm_ctx(tfm); @@ -1567,8 +1568,10 @@ static int iaa_comp_acompress(struct acomp_req *req) " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, req->dst, req->dlen, sg_dma_len(req->dst)); + start_time_ns = iaa_get_ts(); ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, &req->dlen, &compression_crc, disable_async); + update_max_comp_delay_ns(start_time_ns); if (ret == -EINPROGRESS) return ret; @@ -1615,6 +1618,7 @@ static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) struct iaa_wq *iaa_wq; struct device *dev; struct idxd_wq *wq; + u64 start_time_ns; int order = -1; cpu = get_cpu(); @@ -1671,8 +1675,10 @@ alloc_dest: dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, req->dst, req->dlen, sg_dma_len(req->dst)); + start_time_ns = iaa_get_ts(); ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, dst_addr, &req->dlen, true); + update_max_decomp_delay_ns(start_time_ns); if (ret == -EOVERFLOW) { dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); req->dlen *= 2; @@ -1703,6 +1709,7 @@ static int iaa_comp_adecompress(struct acomp_req *req) int nr_sgs, cpu, ret = 0; struct iaa_wq *iaa_wq; struct device *dev; + u64 start_time_ns; struct idxd_wq *wq; if (!iaa_crypto_enabled) { @@ -1762,8 +1769,10 @@ static int iaa_comp_adecompress(struct acomp_req *req) " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, req->dst, req->dlen, sg_dma_len(req->dst)); + start_time_ns = iaa_get_ts(); ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, dst_addr, &req->dlen, false); + update_max_decomp_delay_ns(start_time_ns); if (ret == -EINPROGRESS) return ret; diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c index cbf87d0effe3..c9f83af4b307 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c @@ -22,8 +22,6 @@ static u64 total_decomp_calls; static u64 total_sw_decomp_calls; static u64 max_comp_delay_ns; static u64 max_decomp_delay_ns; -static u64 max_acomp_delay_ns; -static u64 max_adecomp_delay_ns; static u64 total_comp_bytes_out; static u64 total_decomp_bytes_in; static u64 total_completion_einval_errors; @@ -92,26 +90,6 @@ void update_max_decomp_delay_ns(u64 start_time_ns) max_decomp_delay_ns = time_diff; } -void update_max_acomp_delay_ns(u64 start_time_ns) -{ - u64 time_diff; - - time_diff = ktime_get_ns() - start_time_ns; - - if (time_diff > max_acomp_delay_ns) - max_acomp_delay_ns = time_diff; -} - -void update_max_adecomp_delay_ns(u64 start_time_ns) -{ - u64 time_diff; - - time_diff = ktime_get_ns() - start_time_ns; - - if (time_diff > max_adecomp_delay_ns) - max_adecomp_delay_ns = time_diff; -} - void update_wq_comp_calls(struct idxd_wq *idxd_wq) { struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); @@ -151,8 +129,6 @@ static void reset_iaa_crypto_stats(void) total_sw_decomp_calls = 0; max_comp_delay_ns = 0; max_decomp_delay_ns = 0; - max_acomp_delay_ns = 0; - max_adecomp_delay_ns = 0; total_comp_bytes_out = 0; total_decomp_bytes_in = 0; total_completion_einval_errors = 0; @@ -280,10 +256,6 @@ int __init iaa_crypto_debugfs_init(void) iaa_crypto_debugfs_root, &max_comp_delay_ns); debugfs_create_u64("max_decomp_delay_ns", 0644, iaa_crypto_debugfs_root, &max_decomp_delay_ns); - debugfs_create_u64("max_acomp_delay_ns", 0644, - iaa_crypto_debugfs_root, &max_comp_delay_ns); - debugfs_create_u64("max_adecomp_delay_ns", 0644, - iaa_crypto_debugfs_root, &max_decomp_delay_ns); debugfs_create_u64("total_comp_calls", 0644, iaa_crypto_debugfs_root, &total_comp_calls); debugfs_create_u64("total_decomp_calls", 0644, diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.h b/drivers/crypto/intel/iaa/iaa_crypto_stats.h index c10b87b86fa4..c916ca83f070 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_stats.h +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.h @@ -15,8 +15,6 @@ void update_total_sw_decomp_calls(void); void update_total_decomp_bytes_in(int n); void update_max_comp_delay_ns(u64 start_time_ns); void update_max_decomp_delay_ns(u64 start_time_ns); -void update_max_acomp_delay_ns(u64 start_time_ns); -void update_max_adecomp_delay_ns(u64 start_time_ns); void update_completion_einval_errs(void); void update_completion_timeout_errs(void); void update_completion_comp_buf_overflow_errs(void); @@ -26,6 +24,8 @@ void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n); void update_wq_decomp_calls(struct idxd_wq *idxd_wq); void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n); +static inline u64 iaa_get_ts(void) { return ktime_get_ns(); } + #else static inline int iaa_crypto_debugfs_init(void) { return 0; } static inline void iaa_crypto_debugfs_cleanup(void) {} @@ -37,8 +37,6 @@ static inline void update_total_sw_decomp_calls(void) {} static inline void update_total_decomp_bytes_in(int n) {} static inline void update_max_comp_delay_ns(u64 start_time_ns) {} static inline void update_max_decomp_delay_ns(u64 start_time_ns) {} -static inline void update_max_acomp_delay_ns(u64 start_time_ns) {} -static inline void update_max_adecomp_delay_ns(u64 start_time_ns) {} static inline void update_completion_einval_errs(void) {} static inline void update_completion_timeout_errs(void) {} static inline void update_completion_comp_buf_overflow_errs(void) {} @@ -48,6 +46,8 @@ static inline void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) {} static inline void update_wq_decomp_calls(struct idxd_wq *idxd_wq) {} static inline void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) {} +static inline u64 iaa_get_ts(void) { return 0; } + #endif // CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS #endif From bc9ce934c469d594d77c44b41a90570fc8881e4d Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Mon, 26 Feb 2024 13:52:25 +0200 Subject: [PATCH 73/79] MAINTAINERS: Remove T Ambarus from few mchp entries I have been no longer at Microchip for more than a year and I'm no longer interested in maintaining these drivers. Let other mchp people step up, thus remove myself. Thanks for the nice collaboration everyone! Signed-off-by: Tudor Ambarus Acked-by: Vinod Koul Signed-off-by: Herbert Xu --- MAINTAINERS | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 5a4051996f1e..dc1564ce2f01 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14234,7 +14234,6 @@ F: drivers/misc/xilinx_tmr_manager.c MICROCHIP AT91 DMA DRIVERS M: Ludovic Desroches -M: Tudor Ambarus L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: dmaengine@vger.kernel.org S: Supported @@ -14283,9 +14282,8 @@ F: Documentation/devicetree/bindings/media/microchip,csi2dc.yaml F: drivers/media/platform/microchip/microchip-csi2dc.c MICROCHIP ECC DRIVER -M: Tudor Ambarus L: linux-crypto@vger.kernel.org -S: Maintained +S: Orphan F: drivers/crypto/atmel-ecc.* MICROCHIP EIC DRIVER @@ -14390,9 +14388,8 @@ S: Maintained F: drivers/mmc/host/atmel-mci.c MICROCHIP NAND DRIVER -M: Tudor Ambarus L: linux-mtd@lists.infradead.org -S: Supported +S: Orphan F: Documentation/devicetree/bindings/mtd/atmel-nand.txt F: drivers/mtd/nand/raw/atmel/* From 12e37aef7ba20a5a0d6e076968a41293b019e90d Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Mon, 26 Feb 2024 19:57:00 +0100 Subject: [PATCH 74/79] hwrng: hisi - use dev_err_probe Replace dev_err + return with dev_err_probe. Signed-off-by: Martin Kaiser Reviewed-by: Jonathan Cameron Signed-off-by: Herbert Xu --- drivers/char/hw_random/hisi-rng.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c index b6f27566e0ba..4e501d5c121f 100644 --- a/drivers/char/hw_random/hisi-rng.c +++ b/drivers/char/hw_random/hisi-rng.c @@ -89,10 +89,8 @@ static int hisi_rng_probe(struct platform_device *pdev) rng->rng.read = hisi_rng_read; ret = devm_hwrng_register(&pdev->dev, &rng->rng); - if (ret) { - dev_err(&pdev->dev, "failed to register hwrng\n"); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "failed to register hwrng\n"); return 0; } From db8ac883855ee3ccf52eb47ac665dbc1efac3af9 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Thu, 29 Feb 2024 23:14:48 +1300 Subject: [PATCH 75/79] crypto: hisilicon/zip - fix the missing CRYPTO_ALG_ASYNC in cra_flags Add the missing CRYPTO_ALG_ASYNC flag since hisilizon zip driver works asynchronously. Cc: Zhou Wang Signed-off-by: Barry Song Acked-by: Yang Shen Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_crypto.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index c650c741a18d..94e2d66b04b6 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -591,6 +591,7 @@ static struct acomp_alg hisi_zip_acomp_deflate = { .base = { .cra_name = "deflate", .cra_driver_name = "hisi-deflate-acomp", + .cra_flags = CRYPTO_ALG_ASYNC, .cra_module = THIS_MODULE, .cra_priority = HZIP_ALG_PRIORITY, .cra_ctxsize = sizeof(struct hisi_zip_ctx), From 30dd94dba350043a32cfe9cb478ed621aae3c5c9 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Thu, 29 Feb 2024 23:14:49 +1300 Subject: [PATCH 76/79] crypto: iaa - fix the missing CRYPTO_ALG_ASYNC in cra_flags Add the missing CRYPTO_ALG_ASYNC flag since intel iaa driver works asynchronously. Signed-off-by: Barry Song Acked-by: Tom Zanussi Signed-off-by: Herbert Xu --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index b54f93c64033..1cd304de5388 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1823,6 +1823,7 @@ static struct acomp_alg iaa_acomp_fixed_deflate = { .base = { .cra_name = "deflate", .cra_driver_name = "deflate-iaa", + .cra_flags = CRYPTO_ALG_ASYNC, .cra_ctxsize = sizeof(struct iaa_compression_ctx), .cra_module = THIS_MODULE, .cra_priority = IAA_ALG_PRIORITY, From 43a7885ec0dfca2bdc60f2de736e55cf5e7b915d Mon Sep 17 00:00:00 2001 From: Vladis Dronov Date: Thu, 29 Feb 2024 18:36:03 +0100 Subject: [PATCH 77/79] crypto: tcrypt - add ffdhe2048(dh) test Commit 7dce59819750 ("crypto: dh - implement ffdheXYZ(dh) templates") implemented the said templates. Add ffdhe2048(dh) test as it is the fastest one. This is a requirement for the FIPS certification. Signed-off-by: Vladis Dronov Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index ea4d1cea9c06..8aea416f6480 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1851,6 +1851,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret = min(ret, tcrypt_test("cbc(aria)")); ret = min(ret, tcrypt_test("ctr(aria)")); break; + case 193: + ret = min(ret, tcrypt_test("ffdhe2048(dh)")); + break; case 200: test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, speed_template_16_24_32); From 77292bb8ca69c808741aadbd29207605296e24af Mon Sep 17 00:00:00 2001 From: Barry Song Date: Sat, 2 Mar 2024 08:27:45 +1300 Subject: [PATCH 78/79] crypto: scomp - remove memcpy if sg_nents is 1 and pages are lowmem while sg_nents is 1, which is always true for the current kernel as the only user - zswap is this case, we might have a chance to remove memcpy, thus improve the performance. Though sg_nents is 1, its buffer might cross two pages. If those pages are highmem, we have no cheap way to map them to contiguous virtual address because kmap doesn't support more than one page (kmap single higmem page could be still expensive for tlb) and vmap is expensive. So we also test and enure page is not highmem in order to safely use page_to_virt before removing the memcpy. The good news is that in the most majority of cases, we are lowmem, and we are always lowmem in those modern and popular hardware. Cc: Johannes Weiner Cc: Nhat Pham Cc: Yosry Ahmed Signed-off-by: Barry Song Tested-by: Chengming Zhou Signed-off-by: Herbert Xu --- crypto/scompress.c | 38 ++++++++++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/crypto/scompress.c b/crypto/scompress.c index 9cda4ef84a9b..93daf3eb9842 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -117,6 +117,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) struct crypto_scomp *scomp = *tfm_ctx; void **ctx = acomp_request_ctx(req); struct scomp_scratch *scratch; + void *src, *dst; unsigned int dlen; int ret; @@ -134,13 +135,25 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) scratch = raw_cpu_ptr(&scomp_scratch); spin_lock(&scratch->lock); - scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0); - if (dir) - ret = crypto_scomp_compress(scomp, scratch->src, req->slen, - scratch->dst, &req->dlen, *ctx); + if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) { + src = page_to_virt(sg_page(req->src)) + req->src->offset; + } else { + scatterwalk_map_and_copy(scratch->src, req->src, 0, + req->slen, 0); + src = scratch->src; + } + + if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst))) + dst = page_to_virt(sg_page(req->dst)) + req->dst->offset; else - ret = crypto_scomp_decompress(scomp, scratch->src, req->slen, - scratch->dst, &req->dlen, *ctx); + dst = scratch->dst; + + if (dir) + ret = crypto_scomp_compress(scomp, src, req->slen, + dst, &req->dlen, *ctx); + else + ret = crypto_scomp_decompress(scomp, src, req->slen, + dst, &req->dlen, *ctx); if (!ret) { if (!req->dst) { req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL); @@ -152,8 +165,17 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) ret = -ENOSPC; goto out; } - scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen, - 1); + if (dst == scratch->dst) { + scatterwalk_map_and_copy(scratch->dst, req->dst, 0, + req->dlen, 1); + } else { + int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE); + int i; + struct page *dst_page = sg_page(req->dst); + + for (i = 0; i < nr_pages; i++) + flush_dcache_page(dst_page + i); + } } out: spin_unlock(&scratch->lock); From 6a8dbd71a70620c42d4fa82509204ba18231f28d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 13 Mar 2024 09:49:37 +0800 Subject: [PATCH 79/79] Revert "crypto: remove CONFIG_CRYPTO_STATS" This reverts commit 2beb81fbf0c01a62515a1bcef326168494ee2bd0. While removing CONFIG_CRYPTO_STATS is a worthy goal, this also removed unrelated infrastructure such as crypto_comp_alg_common. Signed-off-by: Herbert Xu --- arch/s390/configs/debug_defconfig | 1 + arch/s390/configs/defconfig | 1 + crypto/Kconfig | 20 +++ crypto/Makefile | 2 + crypto/acompress.c | 47 ++++- crypto/aead.c | 84 ++++++++- crypto/ahash.c | 65 ++++++- crypto/akcipher.c | 31 ++++ crypto/compress.h | 5 + crypto/{crypto_user.c => crypto_user_base.c} | 10 +- crypto/crypto_user_stat.c | 176 +++++++++++++++++++ crypto/hash.h | 30 ++++ crypto/kpp.c | 30 ++++ crypto/lskcipher.c | 73 +++++++- crypto/rng.c | 44 ++++- crypto/scompress.c | 8 +- crypto/shash.c | 75 +++++++- crypto/sig.c | 13 ++ crypto/skcipher.c | 86 ++++++++- crypto/skcipher.h | 10 ++ include/crypto/acompress.h | 90 +++++++++- include/crypto/aead.h | 21 +++ include/crypto/akcipher.h | 78 +++++++- include/crypto/algapi.h | 3 + include/crypto/hash.h | 22 +++ include/crypto/internal/acompress.h | 7 +- include/crypto/internal/cryptouser.h | 16 ++ include/crypto/internal/scompress.h | 8 +- include/crypto/kpp.h | 58 +++++- include/crypto/rng.h | 51 +++++- include/crypto/skcipher.h | 25 +++ include/uapi/linux/cryptouser.h | 28 ++- 32 files changed, 1140 insertions(+), 78 deletions(-) rename crypto/{crypto_user.c => crypto_user_base.c} (98%) create mode 100644 crypto/crypto_user_stat.c create mode 100644 include/crypto/internal/cryptouser.h diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 063f0c11087d..cae2dd34fbb4 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -766,6 +766,7 @@ CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m +CONFIG_CRYPTO_STATS=y CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA1_S390=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index ab608ce768b7..42b988873e54 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -752,6 +752,7 @@ CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m +CONFIG_CRYPTO_STATS=y CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA1_S390=m diff --git a/crypto/Kconfig b/crypto/Kconfig index f937142aa94d..44661c2e30ca 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1456,6 +1456,26 @@ config CRYPTO_USER_API_ENABLE_OBSOLETE already been phased out from internal use by the kernel, and are only useful for userspace clients that still rely on them. +config CRYPTO_STATS + bool "Crypto usage statistics" + depends on CRYPTO_USER + help + Enable the gathering of crypto stats. + + Enabling this option reduces the performance of the crypto API. It + should only be enabled when there is actually a use case for it. + + This collects data sizes, numbers of requests, and numbers + of errors processed by: + - AEAD ciphers (encrypt, decrypt) + - asymmetric key ciphers (encrypt, decrypt, verify, sign) + - symmetric key ciphers (encrypt, decrypt) + - compression algorithms (compress, decompress) + - hash algorithms (hash) + - key-agreement protocol primitives (setsecret, generate + public key, compute shared secret) + - RNG (generate, seed) + endmenu config CRYPTO_HASH_INFO diff --git a/crypto/Makefile b/crypto/Makefile index de9a3312a2c8..408f0a1f9ab9 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -69,6 +69,8 @@ cryptomgr-y := algboss.o testmgr.o obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o obj-$(CONFIG_CRYPTO_USER) += crypto_user.o +crypto_user-y := crypto_user_base.o +crypto_user-$(CONFIG_CRYPTO_STATS) += crypto_user_stat.o obj-$(CONFIG_CRYPTO_CMAC) += cmac.o obj-$(CONFIG_CRYPTO_HMAC) += hmac.o obj-$(CONFIG_CRYPTO_VMAC) += vmac.o diff --git a/crypto/acompress.c b/crypto/acompress.c index 484a865b23cd..1c682810a484 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -25,7 +25,7 @@ static const struct crypto_type crypto_acomp_type; static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) { - return container_of(alg, struct acomp_alg, base); + return container_of(alg, struct acomp_alg, calg.base); } static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm) @@ -93,6 +93,32 @@ static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) return extsize; } +static inline int __crypto_acomp_report_stat(struct sk_buff *skb, + struct crypto_alg *alg) +{ + struct comp_alg_common *calg = __crypto_comp_alg_common(alg); + struct crypto_istat_compress *istat = comp_get_stat(calg); + struct crypto_stat_compress racomp; + + memset(&racomp, 0, sizeof(racomp)); + + strscpy(racomp.type, "acomp", sizeof(racomp.type)); + racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt); + racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen); + racomp.stat_decompress_cnt = atomic64_read(&istat->decompress_cnt); + racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen); + racomp.stat_err_cnt = atomic64_read(&istat->err_cnt); + + return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp); +} + +#ifdef CONFIG_CRYPTO_STATS +int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg) +{ + return __crypto_acomp_report_stat(skb, alg); +} +#endif + static const struct crypto_type crypto_acomp_type = { .extsize = crypto_acomp_extsize, .init_tfm = crypto_acomp_init_tfm, @@ -101,6 +127,9 @@ static const struct crypto_type crypto_acomp_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_acomp_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_acomp_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK, @@ -153,12 +182,24 @@ void acomp_request_free(struct acomp_req *req) } EXPORT_SYMBOL_GPL(acomp_request_free); -int crypto_register_acomp(struct acomp_alg *alg) +void comp_prepare_alg(struct comp_alg_common *alg) { + struct crypto_istat_compress *istat = comp_get_stat(alg); struct crypto_alg *base = &alg->base; - base->cra_type = &crypto_acomp_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + memset(istat, 0, sizeof(*istat)); +} + +int crypto_register_acomp(struct acomp_alg *alg) +{ + struct crypto_alg *base = &alg->calg.base; + + comp_prepare_alg(&alg->calg); + + base->cra_type = &crypto_acomp_type; base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS; return crypto_register_alg(base); diff --git a/crypto/aead.c b/crypto/aead.c index 0e75a69189df..54906633566a 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -20,6 +20,15 @@ #include "internal.h" +static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + return &alg->stat; +#else + return NULL; +#endif +} + static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { @@ -81,28 +90,62 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) } EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); +static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err) +{ + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; + + if (err && err != -EINPROGRESS && err != -EBUSY) + atomic64_inc(&istat->err_cnt); + + return err; +} + int crypto_aead_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct aead_alg *alg = crypto_aead_alg(aead); + struct crypto_istat_aead *istat; + int ret; + + istat = aead_get_stat(alg); + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + atomic64_inc(&istat->encrypt_cnt); + atomic64_add(req->cryptlen, &istat->encrypt_tlen); + } if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; + ret = -ENOKEY; + else + ret = alg->encrypt(req); - return crypto_aead_alg(aead)->encrypt(req); + return crypto_aead_errstat(istat, ret); } EXPORT_SYMBOL_GPL(crypto_aead_encrypt); int crypto_aead_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct aead_alg *alg = crypto_aead_alg(aead); + struct crypto_istat_aead *istat; + int ret; + + istat = aead_get_stat(alg); + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + atomic64_inc(&istat->encrypt_cnt); + atomic64_add(req->cryptlen, &istat->encrypt_tlen); + } if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; + ret = -ENOKEY; + else if (req->cryptlen < crypto_aead_authsize(aead)) + ret = -EINVAL; + else + ret = alg->decrypt(req); - if (req->cryptlen < crypto_aead_authsize(aead)) - return -EINVAL; - - return crypto_aead_alg(aead)->decrypt(req); + return crypto_aead_errstat(istat, ret); } EXPORT_SYMBOL_GPL(crypto_aead_decrypt); @@ -172,6 +215,26 @@ static void crypto_aead_free_instance(struct crypto_instance *inst) aead->free(aead); } +static int __maybe_unused crypto_aead_report_stat( + struct sk_buff *skb, struct crypto_alg *alg) +{ + struct aead_alg *aead = container_of(alg, struct aead_alg, base); + struct crypto_istat_aead *istat = aead_get_stat(aead); + struct crypto_stat_aead raead; + + memset(&raead, 0, sizeof(raead)); + + strscpy(raead.type, "aead", sizeof(raead.type)); + + raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); + raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); + raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); + raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); + raead.stat_err_cnt = atomic64_read(&istat->err_cnt); + + return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead); +} + static const struct crypto_type crypto_aead_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_aead_init_tfm, @@ -181,6 +244,9 @@ static const struct crypto_type crypto_aead_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_aead_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_aead_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, @@ -211,6 +277,7 @@ EXPORT_SYMBOL_GPL(crypto_has_aead); static int aead_prepare_alg(struct aead_alg *alg) { + struct crypto_istat_aead *istat = aead_get_stat(alg); struct crypto_alg *base = &alg->base; if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) > @@ -224,6 +291,9 @@ static int aead_prepare_alg(struct aead_alg *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AEAD; + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + memset(istat, 0, sizeof(*istat)); + return 0; } diff --git a/crypto/ahash.c b/crypto/ahash.c index bcd9de009a91..0ac83f7f701d 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -27,6 +27,22 @@ #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e +static inline struct crypto_istat_hash *ahash_get_stat(struct ahash_alg *alg) +{ + return hash_get_stat(&alg->halg); +} + +static inline int crypto_ahash_errstat(struct ahash_alg *alg, int err) +{ + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; + + if (err && err != -EINPROGRESS && err != -EBUSY) + atomic64_inc(&ahash_get_stat(alg)->err_cnt); + + return err; +} + /* * For an ahash tfm that is using an shash algorithm (instead of an ahash * algorithm), this returns the underlying shash tfm. @@ -328,47 +344,75 @@ static void ahash_restore_req(struct ahash_request *req, int err) int crypto_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ahash_alg *alg; if (likely(tfm->using_shash)) return shash_ahash_update(req, ahash_request_ctx(req)); - return crypto_ahash_alg(tfm)->update(req); + alg = crypto_ahash_alg(tfm); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + atomic64_add(req->nbytes, &ahash_get_stat(alg)->hash_tlen); + return crypto_ahash_errstat(alg, alg->update(req)); } EXPORT_SYMBOL_GPL(crypto_ahash_update); int crypto_ahash_final(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ahash_alg *alg; if (likely(tfm->using_shash)) return crypto_shash_final(ahash_request_ctx(req), req->result); - return crypto_ahash_alg(tfm)->final(req); + alg = crypto_ahash_alg(tfm); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + atomic64_inc(&ahash_get_stat(alg)->hash_cnt); + return crypto_ahash_errstat(alg, alg->final(req)); } EXPORT_SYMBOL_GPL(crypto_ahash_final); int crypto_ahash_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ahash_alg *alg; if (likely(tfm->using_shash)) return shash_ahash_finup(req, ahash_request_ctx(req)); - return crypto_ahash_alg(tfm)->finup(req); + alg = crypto_ahash_alg(tfm); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_hash *istat = ahash_get_stat(alg); + + atomic64_inc(&istat->hash_cnt); + atomic64_add(req->nbytes, &istat->hash_tlen); + } + return crypto_ahash_errstat(alg, alg->finup(req)); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ahash_alg *alg; + int err; if (likely(tfm->using_shash)) return shash_ahash_digest(req, prepare_shash_desc(req, tfm)); - if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; + alg = crypto_ahash_alg(tfm); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_hash *istat = ahash_get_stat(alg); - return crypto_ahash_alg(tfm)->digest(req); + atomic64_inc(&istat->hash_cnt); + atomic64_add(req->nbytes, &istat->hash_tlen); + } + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + err = -ENOKEY; + else + err = alg->digest(req); + + return crypto_ahash_errstat(alg, err); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); @@ -527,6 +571,12 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) __crypto_hash_alg_common(alg)->digestsize); } +static int __maybe_unused crypto_ahash_report_stat( + struct sk_buff *skb, struct crypto_alg *alg) +{ + return crypto_hash_report_stat(skb, alg, "ahash"); +} + static const struct crypto_type crypto_ahash_type = { .extsize = crypto_ahash_extsize, .init_tfm = crypto_ahash_init_tfm, @@ -536,6 +586,9 @@ static const struct crypto_type crypto_ahash_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_ahash_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_ahash_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, diff --git a/crypto/akcipher.c b/crypto/akcipher.c index e0ff5f4dda6d..52813f0b19e4 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -70,6 +70,30 @@ static void crypto_akcipher_free_instance(struct crypto_instance *inst) akcipher->free(akcipher); } +static int __maybe_unused crypto_akcipher_report_stat( + struct sk_buff *skb, struct crypto_alg *alg) +{ + struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg); + struct crypto_istat_akcipher *istat; + struct crypto_stat_akcipher rakcipher; + + istat = akcipher_get_stat(akcipher); + + memset(&rakcipher, 0, sizeof(rakcipher)); + + strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); + rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); + rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); + rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); + rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); + rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt); + rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt); + rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); + + return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, + sizeof(rakcipher), &rakcipher); +} + static const struct crypto_type crypto_akcipher_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_akcipher_init_tfm, @@ -79,6 +103,9 @@ static const struct crypto_type crypto_akcipher_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_akcipher_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_akcipher_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, @@ -104,11 +131,15 @@ EXPORT_SYMBOL_GPL(crypto_alloc_akcipher); static void akcipher_prepare_alg(struct akcipher_alg *alg) { + struct crypto_istat_akcipher *istat = akcipher_get_stat(alg); struct crypto_alg *base = &alg->base; base->cra_type = &crypto_akcipher_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER; + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + memset(istat, 0, sizeof(*istat)); } static int akcipher_default_op(struct akcipher_request *req) diff --git a/crypto/compress.h b/crypto/compress.h index 23ea43810810..19f65516d699 100644 --- a/crypto/compress.h +++ b/crypto/compress.h @@ -12,10 +12,15 @@ #include "internal.h" struct acomp_req; +struct comp_alg_common; struct sk_buff; int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req); void crypto_acomp_scomp_free_ctx(struct acomp_req *req); +int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg); + +void comp_prepare_alg(struct comp_alg_common *alg); + #endif /* _LOCAL_CRYPTO_COMPRESS_H */ diff --git a/crypto/crypto_user.c b/crypto/crypto_user_base.c similarity index 98% rename from crypto/crypto_user.c rename to crypto/crypto_user_base.c index 6c571834e86a..3fa20f12989f 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user_base.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "internal.h" @@ -32,7 +33,7 @@ struct crypto_dump_info { u16 nlmsg_flags; }; -static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) +struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) { struct crypto_alg *q, *alg = NULL; @@ -386,13 +387,6 @@ static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh, return crypto_del_default_rng(); } -static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, - struct nlattr **attrs) -{ - /* No longer supported */ - return -ENOTSUPP; -} - #define MSGSIZE(type) sizeof(struct type) static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = { diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c new file mode 100644 index 000000000000..d4f3d39b5137 --- /dev/null +++ b/crypto/crypto_user_stat.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Crypto user configuration API. + * + * Copyright (C) 2017-2018 Corentin Labbe + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x)) + +struct crypto_dump_info { + struct sk_buff *in_skb; + struct sk_buff *out_skb; + u32 nlmsg_seq; + u16 nlmsg_flags; +}; + +static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_stat_cipher rcipher; + + memset(&rcipher, 0, sizeof(rcipher)); + + strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); + + return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); +} + +static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_stat_compress rcomp; + + memset(&rcomp, 0, sizeof(rcomp)); + + strscpy(rcomp.type, "compression", sizeof(rcomp.type)); + + return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp); +} + +static int crypto_reportstat_one(struct crypto_alg *alg, + struct crypto_user_alg *ualg, + struct sk_buff *skb) +{ + memset(ualg, 0, sizeof(*ualg)); + + strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); + strscpy(ualg->cru_driver_name, alg->cra_driver_name, + sizeof(ualg->cru_driver_name)); + strscpy(ualg->cru_module_name, module_name(alg->cra_module), + sizeof(ualg->cru_module_name)); + + ualg->cru_type = 0; + ualg->cru_mask = 0; + ualg->cru_flags = alg->cra_flags; + ualg->cru_refcnt = refcount_read(&alg->cra_refcnt); + + if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) + goto nla_put_failure; + if (alg->cra_flags & CRYPTO_ALG_LARVAL) { + struct crypto_stat_larval rl; + + memset(&rl, 0, sizeof(rl)); + strscpy(rl.type, "larval", sizeof(rl.type)); + if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl)) + goto nla_put_failure; + goto out; + } + + if (alg->cra_type && alg->cra_type->report_stat) { + if (alg->cra_type->report_stat(skb, alg)) + goto nla_put_failure; + goto out; + } + + switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { + case CRYPTO_ALG_TYPE_CIPHER: + if (crypto_report_cipher(skb, alg)) + goto nla_put_failure; + break; + case CRYPTO_ALG_TYPE_COMPRESS: + if (crypto_report_comp(skb, alg)) + goto nla_put_failure; + break; + default: + pr_err("ERROR: Unhandled alg %d in %s\n", + alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL), + __func__); + } + +out: + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +static int crypto_reportstat_alg(struct crypto_alg *alg, + struct crypto_dump_info *info) +{ + struct sk_buff *in_skb = info->in_skb; + struct sk_buff *skb = info->out_skb; + struct nlmsghdr *nlh; + struct crypto_user_alg *ualg; + int err = 0; + + nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq, + CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags); + if (!nlh) { + err = -EMSGSIZE; + goto out; + } + + ualg = nlmsg_data(nlh); + + err = crypto_reportstat_one(alg, ualg, skb); + if (err) { + nlmsg_cancel(skb, nlh); + goto out; + } + + nlmsg_end(skb, nlh); + +out: + return err; +} + +int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, + struct nlattr **attrs) +{ + struct net *net = sock_net(in_skb->sk); + struct crypto_user_alg *p = nlmsg_data(in_nlh); + struct crypto_alg *alg; + struct sk_buff *skb; + struct crypto_dump_info info; + int err; + + if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) + return -EINVAL; + + alg = crypto_alg_match(p, 0); + if (!alg) + return -ENOENT; + + err = -ENOMEM; + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); + if (!skb) + goto drop_alg; + + info.in_skb = in_skb; + info.out_skb = skb; + info.nlmsg_seq = in_nlh->nlmsg_seq; + info.nlmsg_flags = 0; + + err = crypto_reportstat_alg(alg, &info); + +drop_alg: + crypto_mod_put(alg); + + if (err) { + kfree_skb(skb); + return err; + } + + return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid); +} + +MODULE_LICENSE("GPL"); diff --git a/crypto/hash.h b/crypto/hash.h index cf9aee07f77d..93f6ba0df263 100644 --- a/crypto/hash.h +++ b/crypto/hash.h @@ -8,9 +8,39 @@ #define _LOCAL_CRYPTO_HASH_H #include +#include #include "internal.h" +static inline struct crypto_istat_hash *hash_get_stat( + struct hash_alg_common *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + return &alg->stat; +#else + return NULL; +#endif +} + +static inline int crypto_hash_report_stat(struct sk_buff *skb, + struct crypto_alg *alg, + const char *type) +{ + struct hash_alg_common *halg = __crypto_hash_alg_common(alg); + struct crypto_istat_hash *istat = hash_get_stat(halg); + struct crypto_stat_hash rhash; + + memset(&rhash, 0, sizeof(rhash)); + + strscpy(rhash.type, type, sizeof(rhash.type)); + + rhash.stat_hash_cnt = atomic64_read(&istat->hash_cnt); + rhash.stat_hash_tlen = atomic64_read(&istat->hash_tlen); + rhash.stat_err_cnt = atomic64_read(&istat->err_cnt); + + return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); +} + extern const struct crypto_type crypto_shash_type; int hash_prepare_alg(struct hash_alg_common *alg); diff --git a/crypto/kpp.c b/crypto/kpp.c index ecc63a1a948d..33d44e59387f 100644 --- a/crypto/kpp.c +++ b/crypto/kpp.c @@ -66,6 +66,29 @@ static void crypto_kpp_free_instance(struct crypto_instance *inst) kpp->free(kpp); } +static int __maybe_unused crypto_kpp_report_stat( + struct sk_buff *skb, struct crypto_alg *alg) +{ + struct kpp_alg *kpp = __crypto_kpp_alg(alg); + struct crypto_istat_kpp *istat; + struct crypto_stat_kpp rkpp; + + istat = kpp_get_stat(kpp); + + memset(&rkpp, 0, sizeof(rkpp)); + + strscpy(rkpp.type, "kpp", sizeof(rkpp.type)); + + rkpp.stat_setsecret_cnt = atomic64_read(&istat->setsecret_cnt); + rkpp.stat_generate_public_key_cnt = + atomic64_read(&istat->generate_public_key_cnt); + rkpp.stat_compute_shared_secret_cnt = + atomic64_read(&istat->compute_shared_secret_cnt); + rkpp.stat_err_cnt = atomic64_read(&istat->err_cnt); + + return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp); +} + static const struct crypto_type crypto_kpp_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_kpp_init_tfm, @@ -75,6 +98,9 @@ static const struct crypto_type crypto_kpp_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_kpp_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_kpp_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, @@ -105,11 +131,15 @@ EXPORT_SYMBOL_GPL(crypto_has_kpp); static void kpp_prepare_alg(struct kpp_alg *alg) { + struct crypto_istat_kpp *istat = kpp_get_stat(alg); struct crypto_alg *base = &alg->base; base->cra_type = &crypto_kpp_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_KPP; + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + memset(istat, 0, sizeof(*istat)); } int crypto_register_kpp(struct kpp_alg *alg) diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c index 0a800292ca4e..0b6dd8aa21f2 100644 --- a/crypto/lskcipher.c +++ b/crypto/lskcipher.c @@ -29,6 +29,25 @@ static inline struct lskcipher_alg *__crypto_lskcipher_alg( return container_of(alg, struct lskcipher_alg, co.base); } +static inline struct crypto_istat_cipher *lskcipher_get_stat( + struct lskcipher_alg *alg) +{ + return skcipher_get_stat_common(&alg->co); +} + +static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err) +{ + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); + + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; + + if (err) + atomic64_inc(&istat->err_cnt); + + return err; +} + static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm, const u8 *key, unsigned int keylen) { @@ -128,13 +147,20 @@ static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src, u32 flags)) { unsigned long alignmask = crypto_lskcipher_alignmask(tfm); + struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + int ret; if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) & - alignmask) - return crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv, - crypt); + alignmask) { + ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv, + crypt); + goto out; + } - return crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL); + ret = crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL); + +out: + return crypto_lskcipher_errstat(alg, ret); } int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, @@ -142,6 +168,13 @@ int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, { struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); + + atomic64_inc(&istat->encrypt_cnt); + atomic64_add(len, &istat->encrypt_tlen); + } + return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt); } EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt); @@ -151,6 +184,13 @@ int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, { struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); + + atomic64_inc(&istat->decrypt_cnt); + atomic64_add(len, &istat->decrypt_tlen); + } + return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt); } EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt); @@ -282,6 +322,28 @@ static int __maybe_unused crypto_lskcipher_report( sizeof(rblkcipher), &rblkcipher); } +static int __maybe_unused crypto_lskcipher_report_stat( + struct sk_buff *skb, struct crypto_alg *alg) +{ + struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); + struct crypto_istat_cipher *istat; + struct crypto_stat_cipher rcipher; + + istat = lskcipher_get_stat(skcipher); + + memset(&rcipher, 0, sizeof(rcipher)); + + strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); + + rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); + rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); + rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); + rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); + rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); + + return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); +} + static const struct crypto_type crypto_lskcipher_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_lskcipher_init_tfm, @@ -291,6 +353,9 @@ static const struct crypto_type crypto_lskcipher_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_lskcipher_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_lskcipher_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, diff --git a/crypto/rng.c b/crypto/rng.c index 9d8804e46422..279dffdebf59 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -30,24 +30,30 @@ static int crypto_default_rng_refcnt; int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { + struct rng_alg *alg = crypto_rng_alg(tfm); u8 *buf = NULL; int err; + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + atomic64_inc(&rng_get_stat(alg)->seed_cnt); + if (!seed && slen) { buf = kmalloc(slen, GFP_KERNEL); + err = -ENOMEM; if (!buf) - return -ENOMEM; + goto out; err = get_random_bytes_wait(buf, slen); if (err) - goto out; + goto free_buf; seed = buf; } - err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); -out: + err = alg->seed(tfm, seed, slen); +free_buf: kfree_sensitive(buf); - return err; +out: + return crypto_rng_errstat(alg, err); } EXPORT_SYMBOL_GPL(crypto_rng_reset); @@ -85,6 +91,27 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "seedsize : %u\n", seedsize(alg)); } +static int __maybe_unused crypto_rng_report_stat( + struct sk_buff *skb, struct crypto_alg *alg) +{ + struct rng_alg *rng = __crypto_rng_alg(alg); + struct crypto_istat_rng *istat; + struct crypto_stat_rng rrng; + + istat = rng_get_stat(rng); + + memset(&rrng, 0, sizeof(rrng)); + + strscpy(rrng.type, "rng", sizeof(rrng.type)); + + rrng.stat_generate_cnt = atomic64_read(&istat->generate_cnt); + rrng.stat_generate_tlen = atomic64_read(&istat->generate_tlen); + rrng.stat_seed_cnt = atomic64_read(&istat->seed_cnt); + rrng.stat_err_cnt = atomic64_read(&istat->err_cnt); + + return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng); +} + static const struct crypto_type crypto_rng_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_rng_init_tfm, @@ -93,6 +120,9 @@ static const struct crypto_type crypto_rng_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_rng_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_rng_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, @@ -169,6 +199,7 @@ EXPORT_SYMBOL_GPL(crypto_del_default_rng); int crypto_register_rng(struct rng_alg *alg) { + struct crypto_istat_rng *istat = rng_get_stat(alg); struct crypto_alg *base = &alg->base; if (alg->seedsize > PAGE_SIZE / 8) @@ -178,6 +209,9 @@ int crypto_register_rng(struct rng_alg *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_RNG; + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + memset(istat, 0, sizeof(*istat)); + return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_rng); diff --git a/crypto/scompress.c b/crypto/scompress.c index 93daf3eb9842..60bbb7ea4060 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -270,6 +270,9 @@ static const struct crypto_type crypto_scomp_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_scomp_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_acomp_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, @@ -279,10 +282,11 @@ static const struct crypto_type crypto_scomp_type = { int crypto_register_scomp(struct scomp_alg *alg) { - struct crypto_alg *base = &alg->base; + struct crypto_alg *base = &alg->calg.base; + + comp_prepare_alg(&alg->calg); base->cra_type = &crypto_scomp_type; - base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; return crypto_register_alg(base); diff --git a/crypto/shash.c b/crypto/shash.c index 0ffe671b519e..c3f7f6a25280 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -16,6 +16,18 @@ #include "hash.h" +static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg) +{ + return hash_get_stat(&alg->halg); +} + +static inline int crypto_shash_errstat(struct shash_alg *alg, int err) +{ + if (IS_ENABLED(CONFIG_CRYPTO_STATS) && err) + atomic64_inc(&shash_get_stat(alg)->err_cnt); + return err; +} + int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { @@ -49,13 +61,29 @@ EXPORT_SYMBOL_GPL(crypto_shash_setkey); int crypto_shash_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return crypto_shash_alg(desc->tfm)->update(desc, data, len); + struct shash_alg *shash = crypto_shash_alg(desc->tfm); + int err; + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + atomic64_add(len, &shash_get_stat(shash)->hash_tlen); + + err = shash->update(desc, data, len); + + return crypto_shash_errstat(shash, err); } EXPORT_SYMBOL_GPL(crypto_shash_update); int crypto_shash_final(struct shash_desc *desc, u8 *out) { - return crypto_shash_alg(desc->tfm)->final(desc, out); + struct shash_alg *shash = crypto_shash_alg(desc->tfm); + int err; + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + atomic64_inc(&shash_get_stat(shash)->hash_cnt); + + err = shash->final(desc, out); + + return crypto_shash_errstat(shash, err); } EXPORT_SYMBOL_GPL(crypto_shash_final); @@ -71,7 +99,20 @@ static int shash_default_finup(struct shash_desc *desc, const u8 *data, int crypto_shash_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - return crypto_shash_alg(desc->tfm)->finup(desc, data, len, out); + struct crypto_shash *tfm = desc->tfm; + struct shash_alg *shash = crypto_shash_alg(tfm); + int err; + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_hash *istat = shash_get_stat(shash); + + atomic64_inc(&istat->hash_cnt); + atomic64_add(len, &istat->hash_tlen); + } + + err = shash->finup(desc, data, len, out); + + return crypto_shash_errstat(shash, err); } EXPORT_SYMBOL_GPL(crypto_shash_finup); @@ -88,11 +129,22 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct crypto_shash *tfm = desc->tfm; + struct shash_alg *shash = crypto_shash_alg(tfm); + int err; + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_hash *istat = shash_get_stat(shash); + + atomic64_inc(&istat->hash_cnt); + atomic64_add(len, &istat->hash_tlen); + } if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; + err = -ENOKEY; + else + err = shash->digest(desc, data, len, out); - return crypto_shash_alg(desc->tfm)->digest(desc, data, len, out); + return crypto_shash_errstat(shash, err); } EXPORT_SYMBOL_GPL(crypto_shash_digest); @@ -213,6 +265,12 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "digestsize : %u\n", salg->digestsize); } +static int __maybe_unused crypto_shash_report_stat( + struct sk_buff *skb, struct crypto_alg *alg) +{ + return crypto_hash_report_stat(skb, alg, "shash"); +} + const struct crypto_type crypto_shash_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_shash_init_tfm, @@ -222,6 +280,9 @@ const struct crypto_type crypto_shash_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_shash_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_shash_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, @@ -289,6 +350,7 @@ EXPORT_SYMBOL_GPL(crypto_clone_shash); int hash_prepare_alg(struct hash_alg_common *alg) { + struct crypto_istat_hash *istat = hash_get_stat(alg); struct crypto_alg *base = &alg->base; if (alg->digestsize > HASH_MAX_DIGESTSIZE) @@ -300,6 +362,9 @@ int hash_prepare_alg(struct hash_alg_common *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + memset(istat, 0, sizeof(*istat)); + return 0; } diff --git a/crypto/sig.c b/crypto/sig.c index 7645bedf3a1f..224c47019297 100644 --- a/crypto/sig.c +++ b/crypto/sig.c @@ -45,6 +45,16 @@ static int __maybe_unused crypto_sig_report(struct sk_buff *skb, return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(rsig), &rsig); } +static int __maybe_unused crypto_sig_report_stat(struct sk_buff *skb, + struct crypto_alg *alg) +{ + struct crypto_stat_akcipher rsig = {}; + + strscpy(rsig.type, "sig", sizeof(rsig.type)); + + return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rsig), &rsig); +} + static const struct crypto_type crypto_sig_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_sig_init_tfm, @@ -53,6 +63,9 @@ static const struct crypto_type crypto_sig_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_sig_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_sig_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_SIG_MASK, diff --git a/crypto/skcipher.c b/crypto/skcipher.c index ceed7f33a67b..bc70e159d27d 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -89,6 +89,25 @@ static inline struct skcipher_alg *__crypto_skcipher_alg( return container_of(alg, struct skcipher_alg, base); } +static inline struct crypto_istat_cipher *skcipher_get_stat( + struct skcipher_alg *alg) +{ + return skcipher_get_stat_common(&alg->co); +} + +static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err) +{ + struct crypto_istat_cipher *istat = skcipher_get_stat(alg); + + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; + + if (err && err != -EINPROGRESS && err != -EBUSY) + atomic64_inc(&istat->err_cnt); + + return err; +} + static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) { u8 *addr; @@ -635,12 +654,23 @@ int crypto_skcipher_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + int ret; + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_cipher *istat = skcipher_get_stat(alg); + + atomic64_inc(&istat->encrypt_cnt); + atomic64_add(req->cryptlen, &istat->encrypt_tlen); + } if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - if (alg->co.base.cra_type != &crypto_skcipher_type) - return crypto_lskcipher_encrypt_sg(req); - return alg->encrypt(req); + ret = -ENOKEY; + else if (alg->co.base.cra_type != &crypto_skcipher_type) + ret = crypto_lskcipher_encrypt_sg(req); + else + ret = alg->encrypt(req); + + return crypto_skcipher_errstat(alg, ret); } EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); @@ -648,12 +678,23 @@ int crypto_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + int ret; + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_cipher *istat = skcipher_get_stat(alg); + + atomic64_inc(&istat->decrypt_cnt); + atomic64_add(req->cryptlen, &istat->decrypt_tlen); + } if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - if (alg->co.base.cra_type != &crypto_skcipher_type) - return crypto_lskcipher_decrypt_sg(req); - return alg->decrypt(req); + ret = -ENOKEY; + else if (alg->co.base.cra_type != &crypto_skcipher_type) + ret = crypto_lskcipher_decrypt_sg(req); + else + ret = alg->decrypt(req); + + return crypto_skcipher_errstat(alg, ret); } EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); @@ -805,6 +846,28 @@ static int __maybe_unused crypto_skcipher_report( sizeof(rblkcipher), &rblkcipher); } +static int __maybe_unused crypto_skcipher_report_stat( + struct sk_buff *skb, struct crypto_alg *alg) +{ + struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); + struct crypto_istat_cipher *istat; + struct crypto_stat_cipher rcipher; + + istat = skcipher_get_stat(skcipher); + + memset(&rcipher, 0, sizeof(rcipher)); + + strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); + + rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); + rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); + rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); + rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); + rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); + + return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); +} + static const struct crypto_type crypto_skcipher_type = { .extsize = crypto_skcipher_extsize, .init_tfm = crypto_skcipher_init_tfm, @@ -814,6 +877,9 @@ static const struct crypto_type crypto_skcipher_type = { #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_skcipher_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_skcipher_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK, @@ -869,6 +935,7 @@ EXPORT_SYMBOL_GPL(crypto_has_skcipher); int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) { + struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg); struct crypto_alg *base = &alg->base; if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || @@ -881,6 +948,9 @@ int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + memset(istat, 0, sizeof(*istat)); + return 0; } diff --git a/crypto/skcipher.h b/crypto/skcipher.h index 703651367dd8..16c9484360da 100644 --- a/crypto/skcipher.h +++ b/crypto/skcipher.h @@ -10,6 +10,16 @@ #include #include "internal.h" +static inline struct crypto_istat_cipher *skcipher_get_stat_common( + struct skcipher_alg_common *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + return &alg->stat; +#else + return NULL; +#endif +} + int crypto_lskcipher_encrypt_sg(struct skcipher_request *req); int crypto_lskcipher_decrypt_sg(struct skcipher_request *req); int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm); diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index d042c90e0907..574cffc90730 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -56,6 +56,35 @@ struct crypto_acomp { struct crypto_tfm base; }; +/* + * struct crypto_istat_compress - statistics for compress algorithm + * @compress_cnt: number of compress requests + * @compress_tlen: total data size handled by compress requests + * @decompress_cnt: number of decompress requests + * @decompress_tlen: total data size handled by decompress requests + * @err_cnt: number of error for compress requests + */ +struct crypto_istat_compress { + atomic64_t compress_cnt; + atomic64_t compress_tlen; + atomic64_t decompress_cnt; + atomic64_t decompress_tlen; + atomic64_t err_cnt; +}; + +#ifdef CONFIG_CRYPTO_STATS +#define COMP_ALG_COMMON_STATS struct crypto_istat_compress stat; +#else +#define COMP_ALG_COMMON_STATS +#endif + +#define COMP_ALG_COMMON { \ + COMP_ALG_COMMON_STATS \ + \ + struct crypto_alg base; \ +} +struct comp_alg_common COMP_ALG_COMMON; + /** * DOC: Asynchronous Compression API * @@ -103,11 +132,23 @@ static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) return &tfm->base; } +static inline struct comp_alg_common *__crypto_comp_alg_common( + struct crypto_alg *alg) +{ + return container_of(alg, struct comp_alg_common, base); +} + static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm) { return container_of(tfm, struct crypto_acomp, base); } +static inline struct comp_alg_common *crypto_comp_alg_common( + struct crypto_acomp *tfm) +{ + return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg); +} + static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) { return tfm->reqsize; @@ -214,6 +255,27 @@ static inline void acomp_request_set_params(struct acomp_req *req, req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; } +static inline struct crypto_istat_compress *comp_get_stat( + struct comp_alg_common *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + return &alg->stat; +#else + return NULL; +#endif +} + +static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err) +{ + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; + + if (err && err != -EINPROGRESS && err != -EBUSY) + atomic64_inc(&comp_get_stat(alg)->err_cnt); + + return err; +} + /** * crypto_acomp_compress() -- Invoke asynchronous compress operation * @@ -225,7 +287,19 @@ static inline void acomp_request_set_params(struct acomp_req *req, */ static inline int crypto_acomp_compress(struct acomp_req *req) { - return crypto_acomp_reqtfm(req)->compress(req); + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + struct comp_alg_common *alg; + + alg = crypto_comp_alg_common(tfm); + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_compress *istat = comp_get_stat(alg); + + atomic64_inc(&istat->compress_cnt); + atomic64_add(req->slen, &istat->compress_tlen); + } + + return crypto_comp_errstat(alg, tfm->compress(req)); } /** @@ -239,7 +313,19 @@ static inline int crypto_acomp_compress(struct acomp_req *req) */ static inline int crypto_acomp_decompress(struct acomp_req *req) { - return crypto_acomp_reqtfm(req)->decompress(req); + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + struct comp_alg_common *alg; + + alg = crypto_comp_alg_common(tfm); + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_compress *istat = comp_get_stat(alg); + + atomic64_inc(&istat->decompress_cnt); + atomic64_add(req->slen, &istat->decompress_tlen); + } + + return crypto_comp_errstat(alg, tfm->decompress(req)); } #endif diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 0e8a41638678..51382befbe37 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -101,6 +101,22 @@ struct aead_request { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; +/* + * struct crypto_istat_aead - statistics for AEAD algorithm + * @encrypt_cnt: number of encrypt requests + * @encrypt_tlen: total data size handled by encrypt requests + * @decrypt_cnt: number of decrypt requests + * @decrypt_tlen: total data size handled by decrypt requests + * @err_cnt: number of error for AEAD requests + */ +struct crypto_istat_aead { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t err_cnt; +}; + /** * struct aead_alg - AEAD cipher definition * @maxauthsize: Set the maximum authentication tag size supported by the @@ -119,6 +135,7 @@ struct aead_request { * @setkey: see struct skcipher_alg * @encrypt: see struct skcipher_alg * @decrypt: see struct skcipher_alg + * @stat: statistics for AEAD algorithm * @ivsize: see struct skcipher_alg * @chunksize: see struct skcipher_alg * @init: Initialize the cryptographic transformation object. This function @@ -145,6 +162,10 @@ struct aead_alg { int (*init)(struct crypto_aead *tfm); void (*exit)(struct crypto_aead *tfm); +#ifdef CONFIG_CRYPTO_STATS + struct crypto_istat_aead stat; +#endif + unsigned int ivsize; unsigned int maxauthsize; unsigned int chunksize; diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 18a10cad07aa..31c111bebb68 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -54,6 +54,26 @@ struct crypto_akcipher { struct crypto_tfm base; }; +/* + * struct crypto_istat_akcipher - statistics for akcipher algorithm + * @encrypt_cnt: number of encrypt requests + * @encrypt_tlen: total data size handled by encrypt requests + * @decrypt_cnt: number of decrypt requests + * @decrypt_tlen: total data size handled by decrypt requests + * @verify_cnt: number of verify operation + * @sign_cnt: number of sign requests + * @err_cnt: number of error for akcipher requests + */ +struct crypto_istat_akcipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t verify_cnt; + atomic64_t sign_cnt; + atomic64_t err_cnt; +}; + /** * struct akcipher_alg - generic public key algorithm * @@ -90,6 +110,7 @@ struct crypto_akcipher { * @exit: Deinitialize the cryptographic transformation object. This is a * counterpart to @init, used to remove various changes set in * @init. + * @stat: Statistics for akcipher algorithm * * @base: Common crypto API algorithm data structure */ @@ -106,6 +127,10 @@ struct akcipher_alg { int (*init)(struct crypto_akcipher *tfm); void (*exit)(struct crypto_akcipher *tfm); +#ifdef CONFIG_CRYPTO_STATS + struct crypto_istat_akcipher stat; +#endif + struct crypto_alg base; }; @@ -277,6 +302,27 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) return alg->max_size(tfm); } +static inline struct crypto_istat_akcipher *akcipher_get_stat( + struct akcipher_alg *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + return &alg->stat; +#else + return NULL; +#endif +} + +static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err) +{ + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; + + if (err && err != -EINPROGRESS && err != -EBUSY) + atomic64_inc(&akcipher_get_stat(alg)->err_cnt); + + return err; +} + /** * crypto_akcipher_encrypt() - Invoke public key encrypt operation * @@ -290,8 +336,16 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) static inline int crypto_akcipher_encrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - return crypto_akcipher_alg(tfm)->encrypt(req); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_akcipher *istat = akcipher_get_stat(alg); + + atomic64_inc(&istat->encrypt_cnt); + atomic64_add(req->src_len, &istat->encrypt_tlen); + } + + return crypto_akcipher_errstat(alg, alg->encrypt(req)); } /** @@ -307,8 +361,16 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req) static inline int crypto_akcipher_decrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - return crypto_akcipher_alg(tfm)->decrypt(req); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_akcipher *istat = akcipher_get_stat(alg); + + atomic64_inc(&istat->decrypt_cnt); + atomic64_add(req->src_len, &istat->decrypt_tlen); + } + + return crypto_akcipher_errstat(alg, alg->decrypt(req)); } /** @@ -360,8 +422,12 @@ int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm, static inline int crypto_akcipher_sign(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - return crypto_akcipher_alg(tfm)->sign(req); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + atomic64_inc(&akcipher_get_stat(alg)->sign_cnt); + + return crypto_akcipher_errstat(alg, alg->sign(req)); } /** @@ -381,8 +447,12 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req) static inline int crypto_akcipher_verify(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - return crypto_akcipher_alg(tfm)->verify(req); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + atomic64_inc(&akcipher_get_stat(alg)->verify_cnt); + + return crypto_akcipher_errstat(alg, alg->verify(req)); } /** diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 156de41ca760..7a4a71af653f 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -61,6 +61,9 @@ struct crypto_type { void (*show)(struct seq_file *m, struct crypto_alg *alg); int (*report)(struct sk_buff *skb, struct crypto_alg *alg); void (*free)(struct crypto_instance *inst); +#ifdef CONFIG_CRYPTO_STATS + int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg); +#endif unsigned int type; unsigned int maskclear; diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 0014bdd81ab7..5d61f576cfc8 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -23,8 +23,27 @@ struct crypto_ahash; * crypto_unregister_shash(). */ +/* + * struct crypto_istat_hash - statistics for has algorithm + * @hash_cnt: number of hash requests + * @hash_tlen: total data size hashed + * @err_cnt: number of error for hash requests + */ +struct crypto_istat_hash { + atomic64_t hash_cnt; + atomic64_t hash_tlen; + atomic64_t err_cnt; +}; + +#ifdef CONFIG_CRYPTO_STATS +#define HASH_ALG_COMMON_STAT struct crypto_istat_hash stat; +#else +#define HASH_ALG_COMMON_STAT +#endif + /* * struct hash_alg_common - define properties of message digest + * @stat: Statistics for hash algorithm. * @digestsize: Size of the result of the transformation. A buffer of this size * must be available to the @final and @finup calls, so they can * store the resulting hash into it. For various predefined sizes, @@ -41,6 +60,8 @@ struct crypto_ahash; * information. */ #define HASH_ALG_COMMON { \ + HASH_ALG_COMMON_STAT \ + \ unsigned int digestsize; \ unsigned int statesize; \ \ @@ -222,6 +243,7 @@ struct shash_alg { }; }; #undef HASH_ALG_COMMON +#undef HASH_ALG_COMMON_STAT struct crypto_ahash { bool using_shash; /* Underlying algorithm is shash, not ahash */ diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 475e60a9f9ea..4ac46bafba9d 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -31,7 +31,9 @@ * @init. * * @reqsize: Context size for (de)compression requests + * @stat: Statistics for compress algorithm * @base: Common crypto API algorithm data structure + * @calg: Cmonn algorithm data structure shared with scomp */ struct acomp_alg { int (*compress)(struct acomp_req *req); @@ -42,7 +44,10 @@ struct acomp_alg { unsigned int reqsize; - struct crypto_alg base; + union { + struct COMP_ALG_COMMON; + struct comp_alg_common calg; + }; }; /* diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h new file mode 100644 index 000000000000..fd54074332f5 --- /dev/null +++ b/include/crypto/internal/cryptouser.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include + +struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); + +#ifdef CONFIG_CRYPTO_STATS +int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); +#else +static inline int crypto_reportstat(struct sk_buff *in_skb, + struct nlmsghdr *in_nlh, + struct nlattr **attrs) +{ + return -ENOTSUPP; +} +#endif diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index 5a75f2db18ce..858fe3965ae3 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -27,7 +27,9 @@ struct crypto_scomp { * @free_ctx: Function frees context allocated with alloc_ctx * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation + * @stat: Statistics for compress algorithm * @base: Common crypto API algorithm data structure + * @calg: Cmonn algorithm data structure shared with acomp */ struct scomp_alg { void *(*alloc_ctx)(struct crypto_scomp *tfm); @@ -38,7 +40,11 @@ struct scomp_alg { int (*decompress)(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx); - struct crypto_alg base; + + union { + struct COMP_ALG_COMMON; + struct comp_alg_common calg; + }; }; static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg) diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index 2d9c4de57b69..1988e24a0d1d 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -51,6 +51,20 @@ struct crypto_kpp { struct crypto_tfm base; }; +/* + * struct crypto_istat_kpp - statistics for KPP algorithm + * @setsecret_cnt: number of setsecrey operation + * @generate_public_key_cnt: number of generate_public_key operation + * @compute_shared_secret_cnt: number of compute_shared_secret operation + * @err_cnt: number of error for KPP requests + */ +struct crypto_istat_kpp { + atomic64_t setsecret_cnt; + atomic64_t generate_public_key_cnt; + atomic64_t compute_shared_secret_cnt; + atomic64_t err_cnt; +}; + /** * struct kpp_alg - generic key-agreement protocol primitives * @@ -73,6 +87,7 @@ struct crypto_kpp { * @exit: Undo everything @init did. * * @base: Common crypto API algorithm data structure + * @stat: Statistics for KPP algorithm */ struct kpp_alg { int (*set_secret)(struct crypto_kpp *tfm, const void *buffer, @@ -85,6 +100,10 @@ struct kpp_alg { int (*init)(struct crypto_kpp *tfm); void (*exit)(struct crypto_kpp *tfm); +#ifdef CONFIG_CRYPTO_STATS + struct crypto_istat_kpp stat; +#endif + struct crypto_alg base; }; @@ -272,6 +291,26 @@ struct kpp_secret { unsigned short len; }; +static inline struct crypto_istat_kpp *kpp_get_stat(struct kpp_alg *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + return &alg->stat; +#else + return NULL; +#endif +} + +static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err) +{ + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; + + if (err && err != -EINPROGRESS && err != -EBUSY) + atomic64_inc(&kpp_get_stat(alg)->err_cnt); + + return err; +} + /** * crypto_kpp_set_secret() - Invoke kpp operation * @@ -290,7 +329,12 @@ struct kpp_secret { static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, const void *buffer, unsigned int len) { - return crypto_kpp_alg(tfm)->set_secret(tfm, buffer, len); + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + atomic64_inc(&kpp_get_stat(alg)->setsecret_cnt); + + return crypto_kpp_errstat(alg, alg->set_secret(tfm, buffer, len)); } /** @@ -309,8 +353,12 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, static inline int crypto_kpp_generate_public_key(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); - return crypto_kpp_alg(tfm)->generate_public_key(req); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + atomic64_inc(&kpp_get_stat(alg)->generate_public_key_cnt); + + return crypto_kpp_errstat(alg, alg->generate_public_key(req)); } /** @@ -326,8 +374,12 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req) static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); - return crypto_kpp_alg(tfm)->compute_shared_secret(req); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + atomic64_inc(&kpp_get_stat(alg)->compute_shared_secret_cnt); + + return crypto_kpp_errstat(alg, alg->compute_shared_secret(req)); } /** diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 5ac4388f50e1..6abe5102e5fb 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -15,6 +15,20 @@ struct crypto_rng; +/* + * struct crypto_istat_rng: statistics for RNG algorithm + * @generate_cnt: number of RNG generate requests + * @generate_tlen: total data size of generated data by the RNG + * @seed_cnt: number of times the RNG was seeded + * @err_cnt: number of error for RNG requests + */ +struct crypto_istat_rng { + atomic64_t generate_cnt; + atomic64_t generate_tlen; + atomic64_t seed_cnt; + atomic64_t err_cnt; +}; + /** * struct rng_alg - random number generator definition * @@ -32,6 +46,7 @@ struct crypto_rng; * size of the seed is defined with @seedsize . * @set_ent: Set entropy that would otherwise be obtained from * entropy source. Internal use only. + * @stat: Statistics for rng algorithm * @seedsize: The seed size required for a random number generator * initialization defined with this variable. Some * random number generators does not require a seed @@ -48,6 +63,10 @@ struct rng_alg { void (*set_ent)(struct crypto_rng *tfm, const u8 *data, unsigned int len); +#ifdef CONFIG_CRYPTO_STATS + struct crypto_istat_rng stat; +#endif + unsigned int seedsize; struct crypto_alg base; @@ -125,6 +144,26 @@ static inline void crypto_free_rng(struct crypto_rng *tfm) crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); } +static inline struct crypto_istat_rng *rng_get_stat(struct rng_alg *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + return &alg->stat; +#else + return NULL; +#endif +} + +static inline int crypto_rng_errstat(struct rng_alg *alg, int err) +{ + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; + + if (err && err != -EINPROGRESS && err != -EBUSY) + atomic64_inc(&rng_get_stat(alg)->err_cnt); + + return err; +} + /** * crypto_rng_generate() - get random number * @tfm: cipher handle @@ -143,7 +182,17 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { - return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); + struct rng_alg *alg = crypto_rng_alg(tfm); + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_rng *istat = rng_get_stat(alg); + + atomic64_inc(&istat->generate_cnt); + atomic64_add(dlen, &istat->generate_tlen); + } + + return crypto_rng_errstat(alg, + alg->generate(tfm, src, slen, dst, dlen)); } /** diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 74d47e23374e..c8857d7bdb37 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -64,6 +64,28 @@ struct crypto_lskcipher { struct crypto_tfm base; }; +/* + * struct crypto_istat_cipher - statistics for cipher algorithm + * @encrypt_cnt: number of encrypt requests + * @encrypt_tlen: total data size handled by encrypt requests + * @decrypt_cnt: number of decrypt requests + * @decrypt_tlen: total data size handled by decrypt requests + * @err_cnt: number of error for cipher requests + */ +struct crypto_istat_cipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t err_cnt; +}; + +#ifdef CONFIG_CRYPTO_STATS +#define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat; +#else +#define SKCIPHER_ALG_COMMON_STAT +#endif + /* * struct skcipher_alg_common - common properties of skcipher_alg * @min_keysize: Minimum key size supported by the transformation. This is the @@ -81,6 +103,7 @@ struct crypto_lskcipher { * @chunksize: Equal to the block size except for stream ciphers such as * CTR where it is set to the underlying block size. * @statesize: Size of the internal state for the algorithm. + * @stat: Statistics for cipher algorithm * @base: Definition of a generic crypto algorithm. */ #define SKCIPHER_ALG_COMMON { \ @@ -90,6 +113,8 @@ struct crypto_lskcipher { unsigned int chunksize; \ unsigned int statesize; \ \ + SKCIPHER_ALG_COMMON_STAT \ + \ struct crypto_alg base; \ } struct skcipher_alg_common SKCIPHER_ALG_COMMON; diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index e163670d60f7..5730c67f0617 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h @@ -54,16 +54,16 @@ enum crypto_attr_type_t { CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */ CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */ - CRYPTOCFGA_STAT_LARVAL, /* No longer supported */ - CRYPTOCFGA_STAT_HASH, /* No longer supported */ - CRYPTOCFGA_STAT_BLKCIPHER, /* No longer supported */ - CRYPTOCFGA_STAT_AEAD, /* No longer supported */ - CRYPTOCFGA_STAT_COMPRESS, /* No longer supported */ - CRYPTOCFGA_STAT_RNG, /* No longer supported */ - CRYPTOCFGA_STAT_CIPHER, /* No longer supported */ - CRYPTOCFGA_STAT_AKCIPHER, /* No longer supported */ - CRYPTOCFGA_STAT_KPP, /* No longer supported */ - CRYPTOCFGA_STAT_ACOMP, /* No longer supported */ + CRYPTOCFGA_STAT_LARVAL, /* struct crypto_stat */ + CRYPTOCFGA_STAT_HASH, /* struct crypto_stat */ + CRYPTOCFGA_STAT_BLKCIPHER, /* struct crypto_stat */ + CRYPTOCFGA_STAT_AEAD, /* struct crypto_stat */ + CRYPTOCFGA_STAT_COMPRESS, /* struct crypto_stat */ + CRYPTOCFGA_STAT_RNG, /* struct crypto_stat */ + CRYPTOCFGA_STAT_CIPHER, /* struct crypto_stat */ + CRYPTOCFGA_STAT_AKCIPHER, /* struct crypto_stat */ + CRYPTOCFGA_STAT_KPP, /* struct crypto_stat */ + CRYPTOCFGA_STAT_ACOMP, /* struct crypto_stat */ __CRYPTOCFGA_MAX #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) @@ -79,7 +79,6 @@ struct crypto_user_alg { __u32 cru_flags; }; -/* No longer supported, do not use. */ struct crypto_stat_aead { char type[CRYPTO_MAX_NAME]; __u64 stat_encrypt_cnt; @@ -89,7 +88,6 @@ struct crypto_stat_aead { __u64 stat_err_cnt; }; -/* No longer supported, do not use. */ struct crypto_stat_akcipher { char type[CRYPTO_MAX_NAME]; __u64 stat_encrypt_cnt; @@ -101,7 +99,6 @@ struct crypto_stat_akcipher { __u64 stat_err_cnt; }; -/* No longer supported, do not use. */ struct crypto_stat_cipher { char type[CRYPTO_MAX_NAME]; __u64 stat_encrypt_cnt; @@ -111,7 +108,6 @@ struct crypto_stat_cipher { __u64 stat_err_cnt; }; -/* No longer supported, do not use. */ struct crypto_stat_compress { char type[CRYPTO_MAX_NAME]; __u64 stat_compress_cnt; @@ -121,7 +117,6 @@ struct crypto_stat_compress { __u64 stat_err_cnt; }; -/* No longer supported, do not use. */ struct crypto_stat_hash { char type[CRYPTO_MAX_NAME]; __u64 stat_hash_cnt; @@ -129,7 +124,6 @@ struct crypto_stat_hash { __u64 stat_err_cnt; }; -/* No longer supported, do not use. */ struct crypto_stat_kpp { char type[CRYPTO_MAX_NAME]; __u64 stat_setsecret_cnt; @@ -138,7 +132,6 @@ struct crypto_stat_kpp { __u64 stat_err_cnt; }; -/* No longer supported, do not use. */ struct crypto_stat_rng { char type[CRYPTO_MAX_NAME]; __u64 stat_generate_cnt; @@ -147,7 +140,6 @@ struct crypto_stat_rng { __u64 stat_err_cnt; }; -/* No longer supported, do not use. */ struct crypto_stat_larval { char type[CRYPTO_MAX_NAME]; };