From 1b553839e1326b6a73431e399f575862696be51b Mon Sep 17 00:00:00 2001 From: Alexander Egorenkov Date: Fri, 3 Sep 2021 09:39:48 +0200 Subject: s390/sclp: add detection of IPL-complete-control facility The presence of the IPL-complete-control facility can be derived from the hypervisor's SCLP info response. Signed-off-by: Alexander Egorenkov Reviewed-by: Christian Borntraeger Signed-off-by: Heiko Carstens --- arch/s390/include/asm/sclp.h | 1 + drivers/s390/char/sclp_early.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 04cb1e7582a6..236b34b75ddb 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -87,6 +87,7 @@ struct sclp_info { unsigned char has_diag318 : 1; unsigned char has_sipl : 1; unsigned char has_dirq : 1; + unsigned char has_iplcc : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index e9943a86c361..dd313ff57df3 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -49,8 +49,10 @@ static void __init sclp_early_facilities_detect(void) S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; if (sccb->fac91 & 0x40) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST; - if (sccb->cpuoff > 134) + if (sccb->cpuoff > 134) { sclp.has_diag318 = !!(sccb->byte_134 & 0x80); + sclp.has_iplcc = !!(sccb->byte_134 & 0x02); + } if (sccb->cpuoff > 137) sclp.has_sipl = !!(sccb->cbl & 0x4000); sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; -- cgit v1.2.3 From 2ba24343bdb8637135bd0d9b5e249c44bd751670 Mon Sep 17 00:00:00 2001 From: Alexander Egorenkov Date: Fri, 3 Sep 2021 11:08:48 +0200 Subject: s390/kexec: set end-of-ipl flag in last diag308 call If the facility IPL-complete-control is present then the last diag308 call made by kexec shall set the end-of-ipl flag in the subcode register to signal the hypervisor that this is the last diag308 call made by Linux. Only the diag308 calls made during a regular kexec need to set the end-of-ipl flag, in all other cases the hypervisor will ignore it. Signed-off-by: Alexander Egorenkov Reviewed-by: Heiko Carstens Signed-off-by: Heiko Carstens --- arch/s390/include/asm/ipl.h | 6 ++++++ arch/s390/kernel/machine_kexec.c | 10 ++++++++-- arch/s390/kernel/relocate_kernel.S | 3 ++- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 3f8ee257f9aa..a405b6bb89fb 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -133,6 +133,8 @@ int ipl_report_add_certificate(struct ipl_report *report, void *key, * DIAG 308 support */ enum diag308_subcode { + DIAG308_CLEAR_RESET = 0, + DIAG308_LOAD_NORMAL_RESET = 1, DIAG308_REL_HSA = 2, DIAG308_LOAD_CLEAR = 3, DIAG308_LOAD_NORMAL_DUMP = 4, @@ -141,6 +143,10 @@ enum diag308_subcode { DIAG308_LOAD_NORMAL = 7, }; +enum diag308_subcode_flags { + DIAG308_FLAG_EI = 1UL << 16, +}; + enum diag308_rc { DIAG308_RC_OK = 0x0001, DIAG308_RC_NOCONFIG = 0x0102, diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 6ebf02e15c85..ab761c008f98 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -26,8 +26,10 @@ #include #include #include +#include -typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); +typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long, + unsigned long); extern const unsigned char relocate_kernel[]; extern const unsigned long long relocate_kernel_len; @@ -243,6 +245,7 @@ void machine_crash_shutdown(struct pt_regs *regs) */ static void __do_machine_kexec(void *data) { + unsigned long diag308_subcode; relocate_kernel_t data_mover; struct kimage *image = data; @@ -251,7 +254,10 @@ static void __do_machine_kexec(void *data) __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */ /* Call the moving routine */ - (*data_mover)(&image->head, image->start); + diag308_subcode = DIAG308_CLEAR_RESET; + if (sclp.has_iplcc) + diag308_subcode |= DIAG308_FLAG_EI; + (*data_mover)(&image->head, image->start, diag308_subcode); /* Die if kexec returns */ disabled_wait(); diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S index 9438368c3632..a9a1a6f45375 100644 --- a/arch/s390/kernel/relocate_kernel.S +++ b/arch/s390/kernel/relocate_kernel.S @@ -14,6 +14,7 @@ * moves the new kernel to its destination... * %r2 = pointer to first kimage_entry_t * %r3 = start address - where to jump to after the job is done... + * %r4 = subcode * * %r5 will be used as temp. storage * %r6 holds the destination address @@ -56,7 +57,7 @@ ENTRY(relocate_kernel) jo 0b j .base .done: - sgr %r0,%r0 # clear register r0 + lgr %r0,%r4 # subcode cghi %r3,0 je .diag la %r4,load_psw-.base(%r13) # load psw-address into the register -- cgit v1.2.3 From 28d3417a946762bed46815e59627bbd749347906 Mon Sep 17 00:00:00 2001 From: Harald Freudenberger Date: Thu, 24 Mar 2022 13:23:53 +0100 Subject: s390/zcrypt: add display of ASYM master key verification pattern MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch extends the sysfs attribute mkvps for CCA cards to show the states and master key verification patterns for the old, current and new ASYM master key registers. With this patch now all relevant master key verification patterns related to a CCA HSM are available with the mkvps sysfs attribute. This is a requirement for some exploiters like the kubernetes cex plugin or initrd code needing to verify the master key verification patterns on HSMs before use. A sample output: cat /sys/devices/ap/card04/04.0005/mkvps AES NEW: empty 0x0000000000000000 AES CUR: valid 0xe9a49a58cd039bed AES OLD: valid 0x7d10d17bc8a409c4 APKA NEW: empty 0x0000000000000000 APKA CUR: valid 0x5f2f27aaa2d59b4a APKA OLD: valid 0x82a5e2cd5030d5ec ASYM NEW: empty 0x00000000000000000000000000000000 ASYM CUR: valid 0x650c25a89c27e716d0e692b6c83f10e5 ASYM OLD: valid 0xf8ae2acf8bfc57f0a0957c732c16078b Signed-off-by: Harald Freudenberger Reviewed-by: Jörg Schmidbauer Signed-off-by: Heiko Carstens --- drivers/s390/crypto/zcrypt_ccamisc.c | 9 +++++++++ drivers/s390/crypto/zcrypt_ccamisc.h | 6 ++++++ drivers/s390/crypto/zcrypt_cex4.c | 36 ++++++++++++++++++++++++++++++++---- 3 files changed, 47 insertions(+), 4 deletions(-) diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index 6a3c2b460965..a507cafff3c5 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -1708,6 +1708,15 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) rarray, &rlen, varray, &vlen); if (rc == 0 && rlen >= 10*8 && vlen >= 204) { memcpy(ci->serial, rarray, 8); + ci->new_asym_mk_state = (char) rarray[4*8]; + ci->cur_asym_mk_state = (char) rarray[5*8]; + ci->old_asym_mk_state = (char) rarray[6*8]; + if (ci->old_asym_mk_state == '2') + memcpy(ci->old_asym_mkvp, varray + 64, 16); + if (ci->cur_asym_mk_state == '2') + memcpy(ci->cur_asym_mkvp, varray + 84, 16); + if (ci->new_asym_mk_state == '3') + memcpy(ci->new_asym_mkvp, varray + 104, 16); ci->new_aes_mk_state = (char) rarray[7*8]; ci->cur_aes_mk_state = (char) rarray[8*8]; ci->old_aes_mk_state = (char) rarray[9*8]; diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h index 3513cd8ab9bc..78bf5631848e 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.h +++ b/drivers/s390/crypto/zcrypt_ccamisc.h @@ -251,12 +251,18 @@ struct cca_info { char new_apka_mk_state; /* '1' empty, '2' partially full, '3' full */ char cur_apka_mk_state; /* '1' invalid, '2' valid */ char old_apka_mk_state; /* '1' invalid, '2' valid */ + char new_asym_mk_state; /* '1' empty, '2' partially full, '3' full */ + char cur_asym_mk_state; /* '1' invalid, '2' valid */ + char old_asym_mk_state; /* '1' invalid, '2' valid */ u64 new_aes_mkvp; /* truncated sha256 of new aes master key */ u64 cur_aes_mkvp; /* truncated sha256 of current aes master key */ u64 old_aes_mkvp; /* truncated sha256 of old aes master key */ u64 new_apka_mkvp; /* truncated sha256 of new apka master key */ u64 cur_apka_mkvp; /* truncated sha256 of current apka mk */ u64 old_apka_mkvp; /* truncated sha256 of old apka mk */ + u8 new_asym_mkvp[16]; /* verify pattern of new asym master key */ + u8 cur_asym_mkvp[16]; /* verify pattern of current asym master key */ + u8 old_asym_mkvp[16]; /* verify pattern of old asym master key */ char serial[9]; /* serial number (8 ascii numbers + 0x00) */ }; diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index fe5664c7589e..f4319d072016 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -123,11 +123,12 @@ static ssize_t cca_mkvps_show(struct device *dev, &ci, zq->online); if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3') - n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n", - new_state[ci.new_aes_mk_state - '1'], - ci.new_aes_mkvp); + n += scnprintf(buf + n, PAGE_SIZE, + "AES NEW: %s 0x%016llx\n", + new_state[ci.new_aes_mk_state - '1'], + ci.new_aes_mkvp); else - n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n"); + n += scnprintf(buf + n, PAGE_SIZE, "AES NEW: - -\n"); if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2') n += scnprintf(buf + n, PAGE_SIZE - n, @@ -169,6 +170,33 @@ static ssize_t cca_mkvps_show(struct device *dev, else n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n"); + if (ci.new_asym_mk_state >= '1' && ci.new_asym_mk_state <= '3') + n += scnprintf(buf + n, PAGE_SIZE, + "ASYM NEW: %s 0x%016llx%016llx\n", + new_state[ci.new_asym_mk_state - '1'], + *((u64 *)(ci.new_asym_mkvp)), + *((u64 *)(ci.new_asym_mkvp + sizeof(u64)))); + else + n += scnprintf(buf + n, PAGE_SIZE, "ASYM NEW: - -\n"); + + if (ci.cur_asym_mk_state >= '1' && ci.cur_asym_mk_state <= '2') + n += scnprintf(buf + n, PAGE_SIZE - n, + "ASYM CUR: %s 0x%016llx%016llx\n", + cao_state[ci.cur_asym_mk_state - '1'], + *((u64 *)(ci.cur_asym_mkvp)), + *((u64 *)(ci.cur_asym_mkvp + sizeof(u64)))); + else + n += scnprintf(buf + n, PAGE_SIZE - n, "ASYM CUR: - -\n"); + + if (ci.old_asym_mk_state >= '1' && ci.old_asym_mk_state <= '2') + n += scnprintf(buf + n, PAGE_SIZE - n, + "ASYM OLD: %s 0x%016llx%016llx\n", + cao_state[ci.old_asym_mk_state - '1'], + *((u64 *)(ci.old_asym_mkvp)), + *((u64 *)(ci.old_asym_mkvp + sizeof(u64)))); + else + n += scnprintf(buf + n, PAGE_SIZE - n, "ASYM OLD: - -\n"); + return n; } -- cgit v1.2.3 From 7714e16f791d948c8bb58aa23bf8ef60b9bba646 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Wed, 30 Mar 2022 19:50:16 +0200 Subject: s390/smp: sort out physical vs virtual CPU0 lowcore pointer SPX instruction called from set_prefix() expects physical address of the lowcore to be installed, but instead the virtual address is passed. Note: this does not fix a bug currently, since virtual and physical addresses are identical. Reviewed-by: Heiko Carstens Signed-off-by: Alexander Gordeev Signed-off-by: Heiko Carstens --- arch/s390/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index d860ac300919..8d91eccc0963 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -494,7 +494,7 @@ static void __init setup_lowcore_dat_off(void) lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); lc->preempt_count = PREEMPT_DISABLED; - set_prefix((u32)(unsigned long) lc); + set_prefix(__pa(lc)); lowcore_ptr[0] = lc; } -- cgit v1.2.3 From 4da75a7fd04295a2623be0916a4748434513a78f Mon Sep 17 00:00:00 2001 From: Haowen Bai Date: Thu, 7 Apr 2022 10:16:47 +0800 Subject: s390/cio: simplify the calculation of variables Fix the following coccicheck warnings: ./arch/s390/include/asm/scsw.h:695:47-49: WARNING !A || A && B is equivalent to !A || B I apply a readable version just to get rid of a warning. Signed-off-by: Haowen Bai Reviewed-by: Peter Oberparleiter Link: https://lore.kernel.org/r/1649297808-5048-1-git-send-email-baihaowen@meizu.com Cc: Alexander Gordeev Cc: Christian Borntraeger Cc: Vasily Gorbik Cc: Sven Schnelle Signed-off-by: Heiko Carstens --- arch/s390/include/asm/scsw.h | 83 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 68 insertions(+), 15 deletions(-) diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h index a7c3ccf681da..7ce584aff5bb 100644 --- a/arch/s390/include/asm/scsw.h +++ b/arch/s390/include/asm/scsw.h @@ -508,9 +508,21 @@ static inline int scsw_cmd_is_valid_zcc(union scsw *scsw) */ static inline int scsw_cmd_is_valid_ectl(union scsw *scsw) { - return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && - !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && - (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS); + /* Must be status pending. */ + if (!(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND)) + return 0; + + /* Must have alert status. */ + if (!(scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS)) + return 0; + + /* Must be alone or together with primary, secondary or both, + * => no intermediate status. + */ + if (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) + return 0; + + return 1; } /** @@ -522,10 +534,25 @@ static inline int scsw_cmd_is_valid_ectl(union scsw *scsw) */ static inline int scsw_cmd_is_valid_pno(union scsw *scsw) { - return (scsw->cmd.fctl != 0) && - (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && - (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) || - (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)); + /* Must indicate at least one I/O function. */ + if (!scsw->cmd.fctl) + return 0; + + /* Must be status pending. */ + if (!(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND)) + return 0; + + /* Can be status pending alone, or with any combination of primary, + * secondary and alert => no intermediate status. + */ + if (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS)) + return 1; + + /* If intermediate, must be suspended. */ + if (scsw->cmd.actl & SCSW_ACTL_SUSPENDED) + return 1; + + return 0; } /** @@ -675,9 +702,21 @@ static inline int scsw_tm_is_valid_q(union scsw *scsw) */ static inline int scsw_tm_is_valid_ectl(union scsw *scsw) { - return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && - !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && - (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS); + /* Must be status pending. */ + if (!(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND)) + return 0; + + /* Must have alert status. */ + if (!(scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS)) + return 0; + + /* Must be alone or together with primary, secondary or both, + * => no intermediate status. + */ + if (scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) + return 0; + + return 1; } /** @@ -689,11 +728,25 @@ static inline int scsw_tm_is_valid_ectl(union scsw *scsw) */ static inline int scsw_tm_is_valid_pno(union scsw *scsw) { - return (scsw->tm.fctl != 0) && - (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && - (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) || - ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && - (scsw->tm.actl & SCSW_ACTL_SUSPENDED))); + /* Must indicate at least one I/O function. */ + if (!scsw->tm.fctl) + return 0; + + /* Must be status pending. */ + if (!(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND)) + return 0; + + /* Can be status pending alone, or with any combination of primary, + * secondary and alert => no intermediate status. + */ + if (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS)) + return 1; + + /* If intermediate, must be suspended. */ + if (scsw->tm.actl & SCSW_ACTL_SUSPENDED) + return 1; + + return 0; } /** -- cgit v1.2.3 From d9b38e9d0fd3be59122af56a299f84c951453598 Mon Sep 17 00:00:00 2001 From: Harald Freudenberger Date: Wed, 23 Mar 2022 12:13:32 +0100 Subject: s390/ap: uevent on apmask/aqpmask change MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch introduces user space notifications for changes on the apmask or aqmask attributes. So it could be possible to write a udev rule to load/unload the vfio_ap kernel module based on changes of these masks. On chance of the apmask or aqmask an AP change event will be produced with an uevent environment variable showing the new APMASK or AQMASK mask. So a change on the apmask triggers an uvevent like this: KERNEL[490.160396] change /devices/ap (ap) ACTION=change DEVPATH=/devices/ap SUBSYSTEM=ap APMASK=0xffffffdfffffffffffffffffffffffffffffffffffffffffffffffffffffffff SEQNUM=13367 and a change on the aqmask looks like this: KERNEL[283.217642] change /devices/ap (ap) ACTION=change DEVPATH=/devices/ap SUBSYSTEM=ap AQMASK=0xfbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff SEQNUM=13348 Only real changes to the masks are processed - the old and new masks are compared and no action is done if the values are equal (and thus no uevent). The emit of the uevent is the very last action done when a mask change is processed. However, there is no guarantee that all unbind/bind actions caused by the apmask/aqmask changes are completed when the apmask/aqmask change uevent is received in userspace. Signed-off-by: Harald Freudenberger Tested-by: Thomas Huth Reviewed-by: Jürgen Christ Signed-off-by: Heiko Carstens --- drivers/s390/crypto/ap_bus.c | 40 ++++++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index fdf16cb70881..dc37732412d7 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -693,6 +693,24 @@ void ap_send_online_uevent(struct ap_device *ap_dev, int online) } EXPORT_SYMBOL(ap_send_online_uevent); +static void ap_send_mask_changed_uevent(unsigned long *newapm, + unsigned long *newaqm) +{ + char buf[100]; + char *envp[] = { buf, NULL }; + + if (newapm) + snprintf(buf, sizeof(buf), + "APMASK=0x%016lx%016lx%016lx%016lx\n", + newapm[0], newapm[1], newapm[2], newapm[3]); + else + snprintf(buf, sizeof(buf), + "AQMASK=0x%016lx%016lx%016lx%016lx\n", + newaqm[0], newaqm[1], newaqm[2], newaqm[3]); + + kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp); +} + /* * calc # of bound APQNs */ @@ -1355,7 +1373,7 @@ static int apmask_commit(unsigned long *newapm) static ssize_t apmask_store(struct bus_type *bus, const char *buf, size_t count) { - int rc; + int rc, changes = 0; DECLARE_BITMAP(newapm, AP_DEVICES); if (mutex_lock_interruptible(&ap_perms_mutex)) @@ -1365,14 +1383,19 @@ static ssize_t apmask_store(struct bus_type *bus, const char *buf, if (rc) goto done; - rc = apmask_commit(newapm); + changes = memcmp(ap_perms.apm, newapm, APMASKSIZE); + if (changes) + rc = apmask_commit(newapm); done: mutex_unlock(&ap_perms_mutex); if (rc) return rc; - ap_bus_revise_bindings(); + if (changes) { + ap_bus_revise_bindings(); + ap_send_mask_changed_uevent(newapm, NULL); + } return count; } @@ -1443,7 +1466,7 @@ static int aqmask_commit(unsigned long *newaqm) static ssize_t aqmask_store(struct bus_type *bus, const char *buf, size_t count) { - int rc; + int rc, changes = 0; DECLARE_BITMAP(newaqm, AP_DOMAINS); if (mutex_lock_interruptible(&ap_perms_mutex)) @@ -1453,14 +1476,19 @@ static ssize_t aqmask_store(struct bus_type *bus, const char *buf, if (rc) goto done; - rc = aqmask_commit(newaqm); + changes = memcmp(ap_perms.aqm, newaqm, APMASKSIZE); + if (changes) + rc = aqmask_commit(newaqm); done: mutex_unlock(&ap_perms_mutex); if (rc) return rc; - ap_bus_revise_bindings(); + if (changes) { + ap_bus_revise_bindings(); + ap_send_mask_changed_uevent(NULL, newaqm); + } return count; } -- cgit v1.2.3 From 6acb086d9f78578992c146746387e136db8b4998 Mon Sep 17 00:00:00 2001 From: Harald Freudenberger Date: Fri, 1 Apr 2022 16:59:09 +0200 Subject: s390/zcrypt: cleanup CPRB struct definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch does a little cleanup on the CPRBX struct in zcrypt.h and the redundant CPRB struct definition in zcrypt_msgtype6.c. Especially some of the misleading fields from the CPRBX struct have been removed. There is no semantic change coming with this patch. The field names changed in the XCRB struct are only related to reserved fields which should never been used. Signed-off-by: Harald Freudenberger Reviewed-by: Jürgen Christ Signed-off-by: Heiko Carstens --- arch/s390/include/uapi/asm/zcrypt.h | 26 +++++++-------- drivers/s390/crypto/zcrypt_msgtype6.c | 59 +---------------------------------- 2 files changed, 13 insertions(+), 72 deletions(-) diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h index 2f04a5499d74..8da75fdf13f8 100644 --- a/arch/s390/include/uapi/asm/zcrypt.h +++ b/arch/s390/include/uapi/asm/zcrypt.h @@ -4,7 +4,7 @@ * * zcrypt 2.2.1 (user-visible header) * - * Copyright IBM Corp. 2001, 2019 + * Copyright IBM Corp. 2001, 2022 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * @@ -85,7 +85,7 @@ struct ica_rsa_modexpo_crt { struct CPRBX { __u16 cprb_len; /* CPRB length 220 */ __u8 cprb_ver_id; /* CPRB version id. 0x02 */ - __u8 pad_000[3]; /* Alignment pad bytes */ + __u8 _pad_000[3]; /* Alignment pad bytes */ __u8 func_id[2]; /* function id 0x5432 */ __u8 cprb_flags[4]; /* Flags */ __u32 req_parml; /* request parameter buffer len */ @@ -95,19 +95,19 @@ struct CPRBX { __u32 rpl_datal; /* reply data block len */ __u32 rpld_datal; /* replied data block len */ __u32 req_extbl; /* request extension block len */ - __u8 pad_001[4]; /* reserved */ + __u8 _pad_001[4]; /* reserved */ __u32 rpld_extbl; /* replied extension block len */ - __u8 padx000[16 - sizeof(__u8 *)]; + __u8 _pad_002[16 - sizeof(__u8 *)]; __u8 __user *req_parmb; /* request parm block 'address' */ - __u8 padx001[16 - sizeof(__u8 *)]; + __u8 _pad_003[16 - sizeof(__u8 *)]; __u8 __user *req_datab; /* request data block 'address' */ - __u8 padx002[16 - sizeof(__u8 *)]; + __u8 _pad_004[16 - sizeof(__u8 *)]; __u8 __user *rpl_parmb; /* reply parm block 'address' */ - __u8 padx003[16 - sizeof(__u8 *)]; + __u8 _pad_005[16 - sizeof(__u8 *)]; __u8 __user *rpl_datab; /* reply data block 'address' */ - __u8 padx004[16 - sizeof(__u8 *)]; + __u8 _pad_006[16 - sizeof(__u8 *)]; __u8 __user *req_extb; /* request extension block 'addr'*/ - __u8 padx005[16 - sizeof(__u8 *)]; + __u8 _pad_007[16 - sizeof(__u8 *)]; __u8 __user *rpl_extb; /* reply extension block 'address'*/ __u16 ccp_rtcode; /* server return code */ __u16 ccp_rscode; /* server reason code */ @@ -115,12 +115,10 @@ struct CPRBX { __u8 logon_id[8]; /* Logon Identifier */ __u8 mac_value[8]; /* Mac Value */ __u8 mac_content_flgs; /* Mac content flag byte */ - __u8 pad_002; /* Alignment */ + __u8 _pad_008; /* Alignment */ __u16 domain; /* Domain */ - __u8 usage_domain[4]; /* Usage domain */ - __u8 cntrl_domain[4]; /* Control domain */ - __u8 S390enf_mask[4]; /* S/390 enforcement mask */ - __u8 pad_004[36]; /* reserved */ + __u8 _pad_009[12]; /* reserved, checked for zeros */ + __u8 _pad_010[36]; /* reserved */ } __attribute__((packed)); /** diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 57d885158cf0..494451cf0588 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Copyright IBM Corp. 2001, 2012 + * Copyright IBM Corp. 2001, 2022 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * @@ -44,63 +44,6 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \ "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); -/* - * CPRB - * Note that all shorts, ints and longs are little-endian. - * All pointer fields are 32-bits long, and mean nothing - * - * A request CPRB is followed by a request_parameter_block. - * - * The request (or reply) parameter block is organized thus: - * function code - * VUD block - * key block - */ -struct CPRB { - unsigned short cprb_len; /* CPRB length */ - unsigned char cprb_ver_id; /* CPRB version id. */ - unsigned char pad_000; /* Alignment pad byte. */ - unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */ - unsigned char srpi_verb; /* SRPI verb type */ - unsigned char flags; /* flags */ - unsigned char func_id[2]; /* function id */ - unsigned char checkpoint_flag; /* */ - unsigned char resv2; /* reserved */ - unsigned short req_parml; /* request parameter buffer */ - /* length 16-bit little endian */ - unsigned char req_parmp[4]; /* request parameter buffer * - * pointer (means nothing: the * - * parameter buffer follows * - * the CPRB). */ - unsigned char req_datal[4]; /* request data buffer */ - /* length ULELONG */ - unsigned char req_datap[4]; /* request data buffer */ - /* pointer */ - unsigned short rpl_parml; /* reply parameter buffer */ - /* length 16-bit little endian */ - unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */ - unsigned char rpl_parmp[4]; /* reply parameter buffer * - * pointer (means nothing: the * - * parameter buffer follows * - * the CPRB). */ - unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */ - unsigned char rpl_datap[4]; /* reply data buffer */ - /* pointer */ - unsigned short ccp_rscode; /* server reason code ULESHORT */ - unsigned short ccp_rtcode; /* server return code ULESHORT */ - unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/ - unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */ - unsigned char repd_datal[4]; /* replied data length ULELONG */ - unsigned char req_pc[2]; /* PC identifier */ - unsigned char res_origin[8]; /* resource origin */ - unsigned char mac_value[8]; /* Mac Value */ - unsigned char logon_id[8]; /* Logon Identifier */ - unsigned char usage_domain[2]; /* cdx */ - unsigned char resv3[18]; /* reserved for requestor */ - unsigned short svr_namel; /* server name length ULESHORT */ - unsigned char svr_name[8]; /* server name */ -} __packed; - struct function_and_rules_block { unsigned char function_code[2]; unsigned short ulen; -- cgit v1.2.3 From 2004b57cde6b21170d058244b53043105d89f83f Mon Sep 17 00:00:00 2001 From: Harald Freudenberger Date: Mon, 4 Apr 2022 17:12:37 +0200 Subject: s390/zcrypt: code cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch tries to fix as much as possible of the checkpatch.pl --strict findings: CHECK: Logical continuations should be on the previous line CHECK: No space is necessary after a cast CHECK: Alignment should match open parenthesis CHECK: 'useable' may be misspelled - perhaps 'usable'? WARNING: Possible repeated word: 'is' CHECK: spaces preferred around that '*' (ctx:VxV) CHECK: Comparison to NULL could be written "!msg" CHECK: Prefer kzalloc(sizeof(*zc)...) over kzalloc(sizeof(struct...)...) CHECK: Unnecessary parentheses around resp_type->work CHECK: Avoid CamelCase: There is no functional change comming with this patch, only code cleanup, renaming, whitespaces, indenting, ... but no semantic change in any way. Also the API (zcrypt and pkey header file) is semantically unchanged. Signed-off-by: Harald Freudenberger Reviewed-by: Jürgen Christ Signed-off-by: Heiko Carstens --- arch/s390/include/uapi/asm/pkey.h | 2 +- arch/s390/include/uapi/asm/zcrypt.h | 16 +- drivers/s390/crypto/ap_bus.c | 56 +++---- drivers/s390/crypto/ap_bus.h | 1 + drivers/s390/crypto/ap_queue.c | 7 +- drivers/s390/crypto/pkey_api.c | 149 ++++++++--------- drivers/s390/crypto/zcrypt_api.c | 215 ++++++++++++------------- drivers/s390/crypto/zcrypt_api.h | 4 +- drivers/s390/crypto/zcrypt_card.c | 2 +- drivers/s390/crypto/zcrypt_cca_key.h | 58 +++---- drivers/s390/crypto/zcrypt_ccamisc.c | 269 +++++++++++++++---------------- drivers/s390/crypto/zcrypt_cex2a.c | 11 +- drivers/s390/crypto/zcrypt_cex2c.c | 10 +- drivers/s390/crypto/zcrypt_cex4.c | 8 +- drivers/s390/crypto/zcrypt_ep11misc.c | 168 ++++++++++---------- drivers/s390/crypto/zcrypt_ep11misc.h | 2 +- drivers/s390/crypto/zcrypt_error.h | 3 +- drivers/s390/crypto/zcrypt_msgtype50.c | 31 ++-- drivers/s390/crypto/zcrypt_msgtype6.c | 281 +++++++++++++++++---------------- drivers/s390/crypto/zcrypt_msgtype6.h | 26 +-- drivers/s390/crypto/zcrypt_queue.c | 2 +- 21 files changed, 676 insertions(+), 645 deletions(-) diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h index 7349e96d28a0..924b876f992c 100644 --- a/arch/s390/include/uapi/asm/pkey.h +++ b/arch/s390/include/uapi/asm/pkey.h @@ -171,7 +171,7 @@ struct pkey_skey2pkey { #define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey) /* - * Verify the given CCA AES secure key for being able to be useable with + * Verify the given CCA AES secure key for being able to be usable with * the pkey module. Check for correct key type and check for having at * least one crypto card being able to handle this key (master key * or old master key verification pattern matches). diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h index 8da75fdf13f8..d83713f67530 100644 --- a/arch/s390/include/uapi/asm/zcrypt.h +++ b/arch/s390/include/uapi/asm/zcrypt.h @@ -236,8 +236,8 @@ struct zcrypt_device_matrix_ext { }; #define AUTOSELECT 0xFFFFFFFF -#define AUTOSEL_AP ((__u16) 0xFFFF) -#define AUTOSEL_DOM ((__u16) 0xFFFF) +#define AUTOSEL_AP ((__u16)0xFFFF) +#define AUTOSEL_DOM ((__u16)0xFFFF) #define ZCRYPT_IOCTL_MAGIC 'z' @@ -303,12 +303,12 @@ struct zcrypt_device_matrix_ext { /** * Supported ioctl calls */ -#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) -#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) -#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) -#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) +#define ICARSAMODEXPO _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) +#define ICARSACRT _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) +#define ZSECSENDCPRB _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) +#define ZSENDEP11CPRB _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) -#define ZCRYPT_DEVICE_STATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x5f, 0) +#define ZCRYPT_DEVICE_STATUS _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x5f, 0) #define ZCRYPT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x58, char[MAX_ZDEV_CARDIDS_EXT]) #define ZCRYPT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x59, char[MAX_ZDEV_CARDIDS_EXT]) #define ZCRYPT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x5a, int[MAX_ZDEV_CARDIDS_EXT]) @@ -350,7 +350,7 @@ struct zcrypt_device_matrix { }; /* Deprecated: use ZCRYPT_DEVICE_STATUS */ -#define ZDEVICESTATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0) +#define ZDEVICESTATUS _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0) /* Deprecated: use ZCRYPT_STATUS_MASK */ #define Z90STAT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x48, char[64]) /* Deprecated: use ZCRYPT_QDEPTH_MASK */ diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index dc37732412d7..5c13d2079d96 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -179,7 +179,7 @@ static int ap_qci_available(void) * ap_apft_available(): Test if AP facilities test (APFT) * facility is available. * - * Returns 1 if APFT is is available. + * Returns 1 if APFT is available. */ static int ap_apft_available(void) { @@ -722,7 +722,7 @@ struct __ap_calc_ctrs { static int __ap_calc_helper(struct device *dev, void *arg) { - struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *) arg; + struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *)arg; if (is_queue_dev(dev)) { pctrs->apqns++; @@ -738,7 +738,7 @@ static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound) struct __ap_calc_ctrs ctrs; memset(&ctrs, 0, sizeof(ctrs)); - bus_for_each_dev(&ap_bus_type, NULL, (void *) &ctrs, __ap_calc_helper); + bus_for_each_dev(&ap_bus_type, NULL, (void *)&ctrs, __ap_calc_helper); *apqns = ctrs.apqns; *bound = ctrs.bound; @@ -799,7 +799,7 @@ EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete); static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data) { if (is_queue_dev(dev) && - AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data) + AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long)data) device_unregister(dev); return 0; } @@ -812,8 +812,8 @@ static int __ap_revise_reserved(struct device *dev, void *dummy) card = AP_QID_CARD(to_ap_queue(dev)->qid); queue = AP_QID_QUEUE(to_ap_queue(dev)->qid); mutex_lock(&ap_perms_mutex); - devres = test_bit_inv(card, ap_perms.apm) - && test_bit_inv(queue, ap_perms.aqm); + devres = test_bit_inv(card, ap_perms.apm) && + test_bit_inv(queue, ap_perms.aqm); mutex_unlock(&ap_perms_mutex); drvres = to_ap_drv(dev->driver)->flags & AP_DRIVER_FLAG_DEFAULT; @@ -844,8 +844,8 @@ int ap_owned_by_def_drv(int card, int queue) mutex_lock(&ap_perms_mutex); - if (test_bit_inv(card, ap_perms.apm) - && test_bit_inv(queue, ap_perms.aqm)) + if (test_bit_inv(card, ap_perms.apm) && + test_bit_inv(queue, ap_perms.aqm)) rc = 1; mutex_unlock(&ap_perms_mutex); @@ -894,8 +894,8 @@ static int ap_device_probe(struct device *dev) card = AP_QID_CARD(to_ap_queue(dev)->qid); queue = AP_QID_QUEUE(to_ap_queue(dev)->qid); mutex_lock(&ap_perms_mutex); - devres = test_bit_inv(card, ap_perms.apm) - && test_bit_inv(queue, ap_perms.aqm); + devres = test_bit_inv(card, ap_perms.apm) && + test_bit_inv(queue, ap_perms.aqm); mutex_unlock(&ap_perms_mutex); drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; if (!!devres != !!drvres) @@ -916,8 +916,9 @@ static int ap_device_probe(struct device *dev) if (is_queue_dev(dev)) hash_del(&to_ap_queue(dev)->hnode); spin_unlock_bh(&ap_queues_lock); - } else + } else { ap_check_bindings_complete(); + } out: if (rc) @@ -998,8 +999,8 @@ void ap_bus_force_rescan(void) EXPORT_SYMBOL(ap_bus_force_rescan); /* -* A config change has happened, force an ap bus rescan. -*/ + * A config change has happened, force an ap bus rescan. + */ void ap_bus_cfg_chg(void) { AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__); @@ -1123,7 +1124,7 @@ int ap_parse_mask_str(const char *str, if (bits & 0x07) return -EINVAL; - size = BITS_TO_LONGS(bits)*sizeof(unsigned long); + size = BITS_TO_LONGS(bits) * sizeof(unsigned long); newmap = kmalloc(size, GFP_KERNEL); if (!newmap) return -ENOMEM; @@ -1259,8 +1260,9 @@ static ssize_t poll_thread_store(struct bus_type *bus, rc = ap_poll_thread_start(); if (rc) count = rc; - } else + } else { ap_poll_thread_stop(); + } return count; } @@ -1633,9 +1635,9 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func) apinfo.mode = (func >> 26) & 0x07; apinfo.cat = AP_DEVICE_TYPE_CEX8; status = ap_qact(qid, 0, &apinfo); - if (status.response_code == AP_RESPONSE_NORMAL - && apinfo.cat >= AP_DEVICE_TYPE_CEX2A - && apinfo.cat <= AP_DEVICE_TYPE_CEX8) + if (status.response_code == AP_RESPONSE_NORMAL && + apinfo.cat >= AP_DEVICE_TYPE_CEX2A && + apinfo.cat <= AP_DEVICE_TYPE_CEX8) comp_type = apinfo.cat; } if (!comp_type) @@ -1655,7 +1657,7 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func) */ static int __match_card_device_with_id(struct device *dev, const void *data) { - return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *) data; + return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *)data; } /* @@ -1664,7 +1666,7 @@ static int __match_card_device_with_id(struct device *dev, const void *data) */ static int __match_queue_device_with_qid(struct device *dev, const void *data) { - return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data; + return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long)data; } /* @@ -1673,8 +1675,8 @@ static int __match_queue_device_with_qid(struct device *dev, const void *data) */ static int __match_queue_device_with_queue_id(struct device *dev, const void *data) { - return is_queue_dev(dev) - && AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long) data; + return is_queue_dev(dev) && + AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long)data; } /* Helper function for notify_config_changed */ @@ -1727,7 +1729,7 @@ static inline void notify_scan_complete(void) static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac) { bus_for_each_dev(&ap_bus_type, NULL, - (void *)(long) ac->id, + (void *)(long)ac->id, __ap_queue_devices_with_id_unregister); device_unregister(&ac->ap_dev.device); } @@ -1755,7 +1757,7 @@ static inline void ap_scan_domains(struct ap_card *ac) for (dom = 0; dom <= ap_max_domain_id; dom++) { qid = AP_MKQID(ac->id, dom); dev = bus_find_device(&ap_bus_type, NULL, - (void *)(long) qid, + (void *)(long)qid, __match_queue_device_with_qid); aq = dev ? to_ap_queue(dev) : NULL; if (!ap_test_config_usage_domain(dom)) { @@ -1901,7 +1903,7 @@ static inline void ap_scan_adapter(int ap) /* Is there currently a card device for this adapter ? */ dev = bus_find_device(&ap_bus_type, NULL, - (void *)(long) ap, + (void *)(long)ap, __match_card_device_with_id); ac = dev ? to_ap_card(dev) : NULL; @@ -2102,7 +2104,7 @@ static void ap_scan_bus(struct work_struct *unused) if (ap_domain_index >= 0) { struct device *dev = bus_find_device(&ap_bus_type, NULL, - (void *)(long) ap_domain_index, + (void *)(long)ap_domain_index, __match_queue_device_with_queue_id); if (dev) put_device(dev); @@ -2137,7 +2139,7 @@ static int __init ap_debug_init(void) static void __init ap_perms_init(void) { - /* all resources useable if no kernel parameter string given */ + /* all resources usable if no kernel parameter string given */ memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm)); memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm)); memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm)); diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 6a65885f5f43..0c40af157df2 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -317,6 +317,7 @@ struct ap_perms { unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)]; unsigned long adm[BITS_TO_LONGS(AP_DOMAINS)]; }; + extern struct ap_perms ap_perms; extern struct mutex ap_perms_mutex; diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 205045cd998d..c48b0db824e3 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -99,7 +99,7 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) { struct ap_queue_status status; - if (msg == NULL) + if (!msg) return -EINVAL; status = ap_dqap(qid, psmid, msg, length, NULL, NULL); switch (status.response_code) { @@ -603,7 +603,7 @@ static ssize_t interrupt_show(struct device *dev, static DEVICE_ATTR_RO(interrupt); static ssize_t config_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { struct ap_queue *aq = to_ap_queue(dev); int rc; @@ -827,8 +827,9 @@ int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) aq->requestq_count++; aq->total_request_count++; atomic64_inc(&aq->card->total_request_count); - } else + } else { rc = -ENODEV; + } /* Send/receive as many request from the queue as possible. */ ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL)); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 7f69ca695fc2..7329caa7d467 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -232,7 +232,7 @@ static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey) int i, rc; u16 card, dom; u32 nr_apqns, *apqns = NULL; - struct ep11keyblob *kb = (struct ep11keyblob *) key; + struct ep11keyblob *kb = (struct ep11keyblob *)key; zcrypt_wait_api_operational(); @@ -267,12 +267,12 @@ static int pkey_verifykey(const struct pkey_seckey *seckey, u16 *pcardnr, u16 *pdomain, u16 *pkeysize, u32 *pattributes) { - struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; + struct secaeskeytoken *t = (struct secaeskeytoken *)seckey; u16 cardnr, domain; int rc; /* check the secure key for valid AES secure key */ - rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *) seckey, 0); + rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *)seckey, 0); if (rc) goto out; if (pattributes) @@ -425,9 +425,9 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, t = (struct clearaeskeytoken *)key; if (keylen != sizeof(*t) + t->len) goto out; - if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16) - || (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24) - || (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32)) + if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16) || + (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24) || + (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32)) memcpy(ckey.clrkey, t->clearkey, t->len); else goto out; @@ -541,7 +541,6 @@ int pkey_keyblob2pkey(const u8 *key, u32 keylen, DEBUG_DBG("%s rc=%d\n", __func__, rc); return rc; - } EXPORT_SYMBOL(pkey_keyblob2pkey); @@ -588,9 +587,11 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns, } else if (ktype == PKEY_TYPE_CCA_DATA) { rc = cca_genseckey(card, dom, ksize, keybuf); *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); - } else /* TOKVER_CCA_VLSC */ + } else { + /* TOKVER_CCA_VLSC */ rc = cca_gencipherkey(card, dom, ksize, kflags, keybuf, keybufsize); + } if (rc == 0) break; } @@ -645,9 +646,11 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns, rc = cca_clr2seckey(card, dom, ksize, clrkey, keybuf); *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); - } else /* TOKVER_CCA_VLSC */ + } else { + /* TOKVER_CCA_VLSC */ rc = cca_clr2cipherkey(card, dom, ksize, kflags, clrkey, keybuf, keybufsize); + } if (rc == 0) break; } @@ -667,8 +670,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, if (keylen < sizeof(struct keytoken_header)) return -EINVAL; - if (hdr->type == TOKTYPE_CCA_INTERNAL - && hdr->version == TOKVER_CCA_AES) { + if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_AES) { struct secaeskeytoken *t = (struct secaeskeytoken *)key; rc = cca_check_secaeskeytoken(debug_info, 3, key, 0); @@ -677,7 +680,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, if (ktype) *ktype = PKEY_TYPE_CCA_DATA; if (ksize) - *ksize = (enum pkey_key_size) t->bitsize; + *ksize = (enum pkey_key_size)t->bitsize; rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1); @@ -697,8 +700,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, *cardnr = ((struct pkey_apqn *)_apqns)->card; *domain = ((struct pkey_apqn *)_apqns)->domain; - } else if (hdr->type == TOKTYPE_CCA_INTERNAL - && hdr->version == TOKVER_CCA_VLSC) { + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_VLSC) { struct cipherkeytoken *t = (struct cipherkeytoken *)key; rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1); @@ -734,8 +737,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, *cardnr = ((struct pkey_apqn *)_apqns)->card; *domain = ((struct pkey_apqn *)_apqns)->domain; - } else if (hdr->type == TOKTYPE_NON_CCA - && hdr->version == TOKVER_EP11_AES) { + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES) { struct ep11keyblob *kb = (struct ep11keyblob *)key; rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); @@ -757,8 +760,9 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, *cardnr = ((struct pkey_apqn *)_apqns)->card; *domain = ((struct pkey_apqn *)_apqns)->domain; - } else + } else { rc = -EINVAL; + } out: kfree(_apqns); @@ -816,16 +820,17 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { card = apqns[i].card; dom = apqns[i].domain; - if (hdr->type == TOKTYPE_CCA_INTERNAL - && hdr->version == TOKVER_CCA_AES) + if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_AES) { rc = cca_sec2protkey(card, dom, key, pkey->protkey, &pkey->len, &pkey->type); - else if (hdr->type == TOKTYPE_CCA_INTERNAL - && hdr->version == TOKVER_CCA_VLSC) + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_VLSC) { rc = cca_cipher2protkey(card, dom, key, pkey->protkey, &pkey->len, &pkey->type); - else { /* EP11 AES secure key blob */ - struct ep11keyblob *kb = (struct ep11keyblob *) key; + } else { + /* EP11 AES secure key blob */ + struct ep11keyblob *kb = (struct ep11keyblob *)key; pkey->len = sizeof(pkey->protkey); rc = ep11_kblob2protkey(card, dom, key, kb->head.len, @@ -851,10 +856,10 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, zcrypt_wait_api_operational(); - if (hdr->type == TOKTYPE_NON_CCA - && (hdr->version == TOKVER_EP11_AES_WITH_HEADER - || hdr->version == TOKVER_EP11_ECC_WITH_HEADER) - && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + if (hdr->type == TOKTYPE_NON_CCA && + (hdr->version == TOKVER_EP11_AES_WITH_HEADER || + hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { int minhwtype = 0, api = 0; struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(struct ep11kblob_header)); @@ -869,11 +874,11 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, minhwtype, api, kb->wkvp); if (rc) goto out; - } else if (hdr->type == TOKTYPE_NON_CCA - && hdr->version == TOKVER_EP11_AES - && is_ep11_keyblob(key)) { + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES && + is_ep11_keyblob(key)) { int minhwtype = 0, api = 0; - struct ep11keyblob *kb = (struct ep11keyblob *) key; + struct ep11keyblob *kb = (struct ep11keyblob *)key; if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) return -EINVAL; @@ -931,8 +936,9 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, cur_mkvp, old_mkvp, 1); if (rc) goto out; - } else + } else { return -EINVAL; + } if (apqns) { if (*nr_apqns < _nr_apqns) @@ -961,9 +967,9 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype, int minhwtype = ZCRYPT_CEX3C; if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - cur_mkvp = *((u64 *) cur_mkvp); + cur_mkvp = *((u64 *)cur_mkvp); if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) - old_mkvp = *((u64 *) alt_mkvp); + old_mkvp = *((u64 *)alt_mkvp); if (ktype == PKEY_TYPE_CCA_CIPHER) minhwtype = ZCRYPT_CEX6; rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, @@ -975,9 +981,9 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype, u64 cur_mkvp = 0, old_mkvp = 0; if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - cur_mkvp = *((u64 *) cur_mkvp); + cur_mkvp = *((u64 *)cur_mkvp); if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) - old_mkvp = *((u64 *) alt_mkvp); + old_mkvp = *((u64 *)alt_mkvp); rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, ZCRYPT_CEX7, APKA_MK_SET, cur_mkvp, old_mkvp, 1); @@ -996,8 +1002,9 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype, if (rc) goto out; - } else + } else { return -EINVAL; + } if (apqns) { if (*nr_apqns < _nr_apqns) @@ -1026,21 +1033,21 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, if (keylen < sizeof(struct keytoken_header)) return -EINVAL; - if (hdr->type == TOKTYPE_NON_CCA - && hdr->version == TOKVER_EP11_AES_WITH_HEADER - && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES_WITH_HEADER && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { /* EP11 AES key blob with header */ if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1)) return -EINVAL; - } else if (hdr->type == TOKTYPE_NON_CCA - && hdr->version == TOKVER_EP11_ECC_WITH_HEADER - && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_ECC_WITH_HEADER && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { /* EP11 ECC key blob with header */ if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1)) return -EINVAL; - } else if (hdr->type == TOKTYPE_NON_CCA - && hdr->version == TOKVER_EP11_AES - && is_ep11_keyblob(key)) { + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES && + is_ep11_keyblob(key)) { /* EP11 AES key blob with header in session field */ if (ep11_check_aes_key(debug_info, 3, key, keylen, 1)) return -EINVAL; @@ -1088,15 +1095,15 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { card = apqns[i].card; dom = apqns[i].domain; - if (hdr->type == TOKTYPE_NON_CCA - && (hdr->version == TOKVER_EP11_AES_WITH_HEADER - || hdr->version == TOKVER_EP11_ECC_WITH_HEADER) - && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) + if (hdr->type == TOKTYPE_NON_CCA && + (hdr->version == TOKVER_EP11_AES_WITH_HEADER || + hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) rc = ep11_kblob2protkey(card, dom, key, hdr->len, protkey, protkeylen, protkeytype); - else if (hdr->type == TOKTYPE_NON_CCA - && hdr->version == TOKVER_EP11_AES - && is_ep11_keyblob(key)) + else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES && + is_ep11_keyblob(key)) rc = ep11_kblob2protkey(card, dom, key, hdr->len, protkey, protkeylen, protkeytype); else if (hdr->type == TOKTYPE_CCA_INTERNAL && @@ -1144,7 +1151,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, switch (cmd) { case PKEY_GENSECK: { - struct pkey_genseck __user *ugs = (void __user *) arg; + struct pkey_genseck __user *ugs = (void __user *)arg; struct pkey_genseck kgs; if (copy_from_user(&kgs, ugs, sizeof(kgs))) @@ -1159,7 +1166,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_CLR2SECK: { - struct pkey_clr2seck __user *ucs = (void __user *) arg; + struct pkey_clr2seck __user *ucs = (void __user *)arg; struct pkey_clr2seck kcs; if (copy_from_user(&kcs, ucs, sizeof(kcs))) @@ -1175,7 +1182,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_SEC2PROTK: { - struct pkey_sec2protk __user *usp = (void __user *) arg; + struct pkey_sec2protk __user *usp = (void __user *)arg; struct pkey_sec2protk ksp; if (copy_from_user(&ksp, usp, sizeof(ksp))) @@ -1191,7 +1198,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_CLR2PROTK: { - struct pkey_clr2protk __user *ucp = (void __user *) arg; + struct pkey_clr2protk __user *ucp = (void __user *)arg; struct pkey_clr2protk kcp; if (copy_from_user(&kcp, ucp, sizeof(kcp))) @@ -1207,7 +1214,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_FINDCARD: { - struct pkey_findcard __user *ufc = (void __user *) arg; + struct pkey_findcard __user *ufc = (void __user *)arg; struct pkey_findcard kfc; if (copy_from_user(&kfc, ufc, sizeof(kfc))) @@ -1222,7 +1229,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_SKEY2PKEY: { - struct pkey_skey2pkey __user *usp = (void __user *) arg; + struct pkey_skey2pkey __user *usp = (void __user *)arg; struct pkey_skey2pkey ksp; if (copy_from_user(&ksp, usp, sizeof(ksp))) @@ -1236,7 +1243,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_VERIFYKEY: { - struct pkey_verifykey __user *uvk = (void __user *) arg; + struct pkey_verifykey __user *uvk = (void __user *)arg; struct pkey_verifykey kvk; if (copy_from_user(&kvk, uvk, sizeof(kvk))) @@ -1251,7 +1258,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_GENPROTK: { - struct pkey_genprotk __user *ugp = (void __user *) arg; + struct pkey_genprotk __user *ugp = (void __user *)arg; struct pkey_genprotk kgp; if (copy_from_user(&kgp, ugp, sizeof(kgp))) @@ -1265,7 +1272,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_VERIFYPROTK: { - struct pkey_verifyprotk __user *uvp = (void __user *) arg; + struct pkey_verifyprotk __user *uvp = (void __user *)arg; struct pkey_verifyprotk kvp; if (copy_from_user(&kvp, uvp, sizeof(kvp))) @@ -1275,7 +1282,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_KBLOB2PROTK: { - struct pkey_kblob2pkey __user *utp = (void __user *) arg; + struct pkey_kblob2pkey __user *utp = (void __user *)arg; struct pkey_kblob2pkey ktp; u8 *kkey; @@ -1294,7 +1301,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_GENSECK2: { - struct pkey_genseck2 __user *ugs = (void __user *) arg; + struct pkey_genseck2 __user *ugs = (void __user *)arg; struct pkey_genseck2 kgs; struct pkey_apqn *apqns; size_t klen = KEYBLOBBUFSIZE; @@ -1336,7 +1343,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_CLR2SECK2: { - struct pkey_clr2seck2 __user *ucs = (void __user *) arg; + struct pkey_clr2seck2 __user *ucs = (void __user *)arg; struct pkey_clr2seck2 kcs; struct pkey_apqn *apqns; size_t klen = KEYBLOBBUFSIZE; @@ -1379,7 +1386,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_VERIFYKEY2: { - struct pkey_verifykey2 __user *uvk = (void __user *) arg; + struct pkey_verifykey2 __user *uvk = (void __user *)arg; struct pkey_verifykey2 kvk; u8 *kkey; @@ -1400,7 +1407,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_KBLOB2PROTK2: { - struct pkey_kblob2pkey2 __user *utp = (void __user *) arg; + struct pkey_kblob2pkey2 __user *utp = (void __user *)arg; struct pkey_kblob2pkey2 ktp; struct pkey_apqn *apqns = NULL; u8 *kkey; @@ -1427,7 +1434,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_APQNS4K: { - struct pkey_apqns4key __user *uak = (void __user *) arg; + struct pkey_apqns4key __user *uak = (void __user *)arg; struct pkey_apqns4key kak; struct pkey_apqn *apqns = NULL; size_t nr_apqns, len; @@ -1476,7 +1483,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_APQNS4KT: { - struct pkey_apqns4keytype __user *uat = (void __user *) arg; + struct pkey_apqns4keytype __user *uat = (void __user *)arg; struct pkey_apqns4keytype kat; struct pkey_apqn *apqns = NULL; size_t nr_apqns, len; @@ -1518,7 +1525,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, break; } case PKEY_KBLOB2PROTK3: { - struct pkey_kblob2pkey3 __user *utp = (void __user *) arg; + struct pkey_kblob2pkey3 __user *utp = (void __user *)arg; struct pkey_kblob2pkey3 ktp; struct pkey_apqn *apqns = NULL; u32 protkeylen = PROTKEYBLOBBUFSIZE; @@ -1708,7 +1715,7 @@ static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf, loff_t off, size_t count) { int rc; - struct pkey_seckey *seckey = (struct pkey_seckey *) buf; + struct pkey_seckey *seckey = (struct pkey_seckey *)buf; if (off != 0 || count < sizeof(struct secaeskeytoken)) return -EINVAL; diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index aa6dc3c0c353..f94b43ce9a65 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -104,7 +104,7 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) struct zcrypt_ops *zops; list_for_each_entry(zops, &zcrypt_ops_list, list) - if ((zops->variant == variant) && + if (zops->variant == variant && (!strncmp(zops->name, name, sizeof(zops->name)))) return zops; return NULL; @@ -438,8 +438,8 @@ static int zcdn_create(const char *name) strncpy(nodename, name, sizeof(nodename)); else snprintf(nodename, sizeof(nodename), - ZCRYPT_NAME "_%d", (int) MINOR(devt)); - nodename[sizeof(nodename)-1] = '\0'; + ZCRYPT_NAME "_%d", (int)MINOR(devt)); + nodename[sizeof(nodename) - 1] = '\0'; if (dev_set_name(&zcdndev->device, nodename)) { rc = -EINVAL; goto unlockout; @@ -519,7 +519,7 @@ static ssize_t zcrypt_read(struct file *filp, char __user *buf, /* * zcrypt_write(): Not allowed. * - * Write is is not allowed + * Write is not allowed */ static ssize_t zcrypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) @@ -549,7 +549,7 @@ static int zcrypt_open(struct inode *inode, struct file *filp) perms = &zcdndev->perms; } #endif - filp->private_data = (void *) perms; + filp->private_data = (void *)perms; atomic_inc(&zcrypt_open_count); return stream_open(inode, filp); @@ -713,7 +713,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, pref_zq = NULL; spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { - /* Check for useable accelarator or CCA card */ + /* Check for usable accelarator or CCA card */ if (!zc->online || !zc->card->config || zc->card->chkstop || !(zc->card->functions & 0x18000000)) continue; @@ -733,7 +733,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) continue; for_each_zcrypt_queue(zq, zc) { - /* check if device is useable and eligible */ + /* check if device is usable and eligible */ if (!zq->online || !zq->ops->rsa_modexpo || !zq->queue->config || zq->queue->chkstop) continue; @@ -823,7 +823,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, pref_zq = NULL; spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { - /* Check for useable accelarator or CCA card */ + /* Check for usable accelarator or CCA card */ if (!zc->online || !zc->card->config || zc->card->chkstop || !(zc->card->functions & 0x18000000)) continue; @@ -843,7 +843,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) continue; for_each_zcrypt_queue(zq, zc) { - /* check if device is useable and eligible */ + /* check if device is usable and eligible */ if (!zq->online || !zq->ops->rsa_modexpo_crt || !zq->queue->config || zq->queue->chkstop) continue; @@ -893,7 +893,7 @@ out: static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, struct zcrypt_track *tr, - struct ica_xcRB *xcRB) + struct ica_xcRB *xcrb) { struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; @@ -904,9 +904,9 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, int cpen, qpen, qid = 0, rc = -ENODEV; struct module *mod; - trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); + trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); - xcRB->status = 0; + xcrb->status = 0; ap_init_message(&ap_msg); #ifdef CONFIG_ZCRYPT_DEBUG @@ -915,11 +915,11 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) { ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n", __func__, tr->fi.cmd); - xcRB->agent_ID = 0x4646; + xcrb->agent_ID = 0x4646; } #endif - rc = prep_cca_ap_msg(userspace, xcRB, &ap_msg, &func_code, &domain); + rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); if (rc) goto out; @@ -948,13 +948,13 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, pref_zq = NULL; spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { - /* Check for useable CCA card */ + /* Check for usable CCA card */ if (!zc->online || !zc->card->config || zc->card->chkstop || !(zc->card->functions & 0x10000000)) continue; /* Check for user selected CCA card */ - if (xcRB->user_defined != AUTOSELECT && - xcRB->user_defined != zc->card->id) + if (xcrb->user_defined != AUTOSELECT && + xcrb->user_defined != zc->card->id) continue; /* check if request size exceeds card max msg size */ if (ap_msg.len > zc->card->maxmsgsize) @@ -971,7 +971,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) continue; for_each_zcrypt_queue(zq, zc) { - /* check for device useable and eligible */ + /* check for device usable and eligible */ if (!zq->online || !zq->ops->send_cprb || !zq->queue->config || zq->queue->chkstop || (tdom != AUTOSEL_DOM && @@ -998,7 +998,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, if (!pref_zq) { ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", - __func__, xcRB->user_defined, *domain); + __func__, xcrb->user_defined, *domain); rc = -ENODEV; goto out; } @@ -1016,7 +1016,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, } #endif - rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcRB, &ap_msg); + rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg); spin_lock(&zcrypt_list_lock); zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); @@ -1028,14 +1028,14 @@ out: tr->last_rc = rc; tr->last_qid = qid; } - trace_s390_zcrypt_rep(xcRB, func_code, rc, + trace_s390_zcrypt_rep(xcrb, func_code, rc, AP_QID_CARD(qid), AP_QID_QUEUE(qid)); return rc; } -long zcrypt_send_cprb(struct ica_xcRB *xcRB) +long zcrypt_send_cprb(struct ica_xcRB *xcrb) { - return _zcrypt_send_cprb(false, &ap_perms, NULL, xcRB); + return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb); } EXPORT_SYMBOL(zcrypt_send_cprb); @@ -1089,7 +1089,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, ap_msg.fi.cmd = tr->fi.cmd; #endif - target_num = (unsigned short) xcrb->targets_num; + target_num = (unsigned short)xcrb->targets_num; /* empty list indicates autoselect (all available targets) */ targets = NULL; @@ -1103,9 +1103,9 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, goto out; } - uptr = (struct ep11_target_dev __force __user *) xcrb->targets; + uptr = (struct ep11_target_dev __force __user *)xcrb->targets; if (z_copy_from_user(userspace, targets, uptr, - target_num * sizeof(*targets))) { + target_num * sizeof(*targets))) { func_code = 0; rc = -EFAULT; goto out_free; @@ -1132,7 +1132,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, pref_zq = NULL; spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { - /* Check for useable EP11 card */ + /* Check for usable EP11 card */ if (!zc->online || !zc->card->config || zc->card->chkstop || !(zc->card->functions & 0x04000000)) continue; @@ -1155,7 +1155,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) continue; for_each_zcrypt_queue(zq, zc) { - /* check if device is useable and eligible */ + /* check if device is usable and eligible */ if (!zq->online || !zq->ops->send_ep11_cprb || !zq->queue->config || zq->queue->chkstop || (targets && @@ -1184,11 +1184,11 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, if (!pref_zq) { if (targets && target_num == 1) { ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", - __func__, (int) targets->ap_id, - (int) targets->dom_id); + __func__, (int)targets->ap_id, + (int)targets->dom_id); } else if (targets) { ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n", - __func__, (int) target_num); + __func__, (int)target_num); } else { ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n", __func__); @@ -1245,7 +1245,7 @@ static long zcrypt_rng(char *buffer) pref_zq = NULL; spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { - /* Check for useable CCA card */ + /* Check for usable CCA card */ if (!zc->online || !zc->card->config || zc->card->chkstop || !(zc->card->functions & 0x10000000)) continue; @@ -1254,7 +1254,7 @@ static long zcrypt_rng(char *buffer) if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt)) continue; for_each_zcrypt_queue(zq, zc) { - /* check if device is useable and eligible */ + /* check if device is usable and eligible */ if (!zq->online || !zq->ops->rng || !zq->queue->config || zq->queue->chkstop) continue; @@ -1270,7 +1270,7 @@ static long zcrypt_rng(char *buffer) if (!pref_zq) { ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", - __func__); + __func__); rc = -ENODEV; goto out; } @@ -1381,8 +1381,8 @@ static void zcrypt_status_mask(char status[], size_t max_adapters) for_each_zcrypt_card(zc) { for_each_zcrypt_queue(zq, zc) { card = AP_QID_CARD(zq->queue->qid); - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index - || card >= max_adapters) + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || + card >= max_adapters) continue; status[card] = zc->online ? zc->user_space_type : 0x0d; } @@ -1402,8 +1402,8 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) for_each_zcrypt_card(zc) { for_each_zcrypt_queue(zq, zc) { card = AP_QID_CARD(zq->queue->qid); - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index - || card >= max_adapters) + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || + card >= max_adapters) continue; spin_lock(&zq->queue->lock); qdepth[card] = @@ -1429,13 +1429,13 @@ static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters) for_each_zcrypt_card(zc) { for_each_zcrypt_queue(zq, zc) { card = AP_QID_CARD(zq->queue->qid); - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index - || card >= max_adapters) + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || + card >= max_adapters) continue; spin_lock(&zq->queue->lock); cnt = zq->queue->total_request_count; spin_unlock(&zq->queue->lock); - reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX; + reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX; } } local_bh_enable(); @@ -1493,7 +1493,7 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg) int rc; struct zcrypt_track tr; struct ica_rsa_modexpo mex; - struct ica_rsa_modexpo __user *umex = (void __user *) arg; + struct ica_rsa_modexpo __user *umex = (void __user *)arg; memset(&tr, 0, sizeof(tr)); if (copy_from_user(&mex, umex, sizeof(mex))) @@ -1538,7 +1538,7 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg) int rc; struct zcrypt_track tr; struct ica_rsa_modexpo_crt crt; - struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; + struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg; memset(&tr, 0, sizeof(tr)); if (copy_from_user(&crt, ucrt, sizeof(crt))) @@ -1581,25 +1581,25 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg) static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) { int rc; - struct ica_xcRB xcRB; + struct ica_xcRB xcrb; struct zcrypt_track tr; - struct ica_xcRB __user *uxcRB = (void __user *) arg; + struct ica_xcRB __user *uxcrb = (void __user *)arg; memset(&tr, 0, sizeof(tr)); - if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) + if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) return -EFAULT; #ifdef CONFIG_ZCRYPT_DEBUG - if ((xcRB.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) { + if ((xcrb.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; - tr.fi.cmd = (u16)(xcRB.status >> 16); + tr.fi.cmd = (u16)(xcrb.status >> 16); } - xcRB.status = 0; + xcrb.status = 0; #endif do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB); + rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); if (rc == -EAGAIN) tr.again_counter++; #ifdef CONFIG_ZCRYPT_DEBUG @@ -1610,7 +1610,7 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) /* on failure: retry once again after a requested rescan */ if ((rc == -ENODEV) && (zcrypt_process_rescan())) do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB); + rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); if (rc == -EAGAIN) tr.again_counter++; } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); @@ -1618,8 +1618,8 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) rc = -EIO; if (rc) ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n", - rc, xcRB.status); - if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) + rc, xcrb.status); + if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) return -EFAULT; return rc; } @@ -1674,7 +1674,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, { int rc; struct ap_perms *perms = - (struct ap_perms *) filp->private_data; + (struct ap_perms *)filp->private_data; rc = zcrypt_check_ioctl(perms, cmd); if (rc) @@ -1698,7 +1698,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, if (!device_status) return -ENOMEM; zcrypt_device_status_mask_ext(device_status); - if (copy_to_user((char __user *) arg, device_status, + if (copy_to_user((char __user *)arg, device_status, total_size)) rc = -EFAULT; kfree(device_status); @@ -1708,7 +1708,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, char status[AP_DEVICES]; zcrypt_status_mask(status, AP_DEVICES); - if (copy_to_user((char __user *) arg, status, sizeof(status))) + if (copy_to_user((char __user *)arg, status, sizeof(status))) return -EFAULT; return 0; } @@ -1716,7 +1716,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, char qdepth[AP_DEVICES]; zcrypt_qdepth_mask(qdepth, AP_DEVICES); - if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) + if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) return -EFAULT; return 0; } @@ -1727,21 +1727,21 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, if (!reqcnt) return -ENOMEM; zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); - if (copy_to_user((int __user *) arg, reqcnt, + if (copy_to_user((int __user *)arg, reqcnt, sizeof(u32) * AP_DEVICES)) rc = -EFAULT; kfree(reqcnt); return rc; } case Z90STAT_REQUESTQ_COUNT: - return put_user(zcrypt_requestq_count(), (int __user *) arg); + return put_user(zcrypt_requestq_count(), (int __user *)arg); case Z90STAT_PENDINGQ_COUNT: - return put_user(zcrypt_pendingq_count(), (int __user *) arg); + return put_user(zcrypt_pendingq_count(), (int __user *)arg); case Z90STAT_TOTALOPEN_COUNT: return put_user(atomic_read(&zcrypt_open_count), - (int __user *) arg); + (int __user *)arg); case Z90STAT_DOMAIN_INDEX: - return put_user(ap_domain_index, (int __user *) arg); + return put_user(ap_domain_index, (int __user *)arg); /* * Deprecated ioctls */ @@ -1755,7 +1755,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, if (!device_status) return -ENOMEM; zcrypt_device_status_mask(device_status); - if (copy_to_user((char __user *) arg, device_status, + if (copy_to_user((char __user *)arg, device_status, total_size)) rc = -EFAULT; kfree(device_status); @@ -1766,7 +1766,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, char status[MAX_ZDEV_CARDIDS]; zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); - if (copy_to_user((char __user *) arg, status, sizeof(status))) + if (copy_to_user((char __user *)arg, status, sizeof(status))) return -EFAULT; return 0; } @@ -1775,7 +1775,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, char qdepth[MAX_ZDEV_CARDIDS]; zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); - if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) + if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) return -EFAULT; return 0; } @@ -1784,7 +1784,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, u32 reqcnt[MAX_ZDEV_CARDIDS]; zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); - if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) + if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt))) return -EFAULT; return 0; } @@ -1899,7 +1899,7 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, &ucrt32->outputdatalength); } -struct compat_ica_xcRB { +struct compat_ica_xcrb { unsigned short agent_ID; unsigned int user_defined; unsigned short request_ID; @@ -1919,66 +1919,66 @@ struct compat_ica_xcRB { unsigned int status; } __packed; -static long trans_xcRB32(struct ap_perms *perms, struct file *filp, +static long trans_xcrb32(struct ap_perms *perms, struct file *filp, unsigned int cmd, unsigned long arg) { - struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); - struct compat_ica_xcRB xcRB32; + struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); + struct compat_ica_xcrb xcrb32; struct zcrypt_track tr; - struct ica_xcRB xcRB64; + struct ica_xcRB xcrb64; long rc; memset(&tr, 0, sizeof(tr)); - if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) + if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32))) return -EFAULT; - xcRB64.agent_ID = xcRB32.agent_ID; - xcRB64.user_defined = xcRB32.user_defined; - xcRB64.request_ID = xcRB32.request_ID; - xcRB64.request_control_blk_length = - xcRB32.request_control_blk_length; - xcRB64.request_control_blk_addr = - compat_ptr(xcRB32.request_control_blk_addr); - xcRB64.request_data_length = - xcRB32.request_data_length; - xcRB64.request_data_address = - compat_ptr(xcRB32.request_data_address); - xcRB64.reply_control_blk_length = - xcRB32.reply_control_blk_length; - xcRB64.reply_control_blk_addr = - compat_ptr(xcRB32.reply_control_blk_addr); - xcRB64.reply_data_length = xcRB32.reply_data_length; - xcRB64.reply_data_addr = - compat_ptr(xcRB32.reply_data_addr); - xcRB64.priority_window = xcRB32.priority_window; - xcRB64.status = xcRB32.status; + xcrb64.agent_ID = xcrb32.agent_ID; + xcrb64.user_defined = xcrb32.user_defined; + xcrb64.request_ID = xcrb32.request_ID; + xcrb64.request_control_blk_length = + xcrb32.request_control_blk_length; + xcrb64.request_control_blk_addr = + compat_ptr(xcrb32.request_control_blk_addr); + xcrb64.request_data_length = + xcrb32.request_data_length; + xcrb64.request_data_address = + compat_ptr(xcrb32.request_data_address); + xcrb64.reply_control_blk_length = + xcrb32.reply_control_blk_length; + xcrb64.reply_control_blk_addr = + compat_ptr(xcrb32.reply_control_blk_addr); + xcrb64.reply_data_length = xcrb32.reply_data_length; + xcrb64.reply_data_addr = + compat_ptr(xcrb32.reply_data_addr); + xcrb64.priority_window = xcrb32.priority_window; + xcrb64.status = xcrb32.status; do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64); + rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); if (rc == -EAGAIN) tr.again_counter++; } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); /* on failure: retry once again after a requested rescan */ if ((rc == -ENODEV) && (zcrypt_process_rescan())) do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64); + rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); if (rc == -EAGAIN) tr.again_counter++; } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; - xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; - xcRB32.reply_data_length = xcRB64.reply_data_length; - xcRB32.status = xcRB64.status; - if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) + xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length; + xcrb32.reply_data_length = xcrb64.reply_data_length; + xcrb32.status = xcrb64.status; + if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32))) return -EFAULT; return rc; } static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, - unsigned long arg) + unsigned long arg) { int rc; struct ap_perms *perms = - (struct ap_perms *) filp->private_data; + (struct ap_perms *)filp->private_data; rc = zcrypt_check_ioctl(perms, cmd); if (rc) @@ -1989,7 +1989,7 @@ static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, if (cmd == ICARSACRT) return trans_modexpo_crt32(perms, filp, cmd, arg); if (cmd == ZSECSENDCPRB) - return trans_xcRB32(perms, filp, cmd, arg); + return trans_xcrb32(perms, filp, cmd, arg); return zcrypt_unlocked_ioctl(filp, cmd, arg); } #endif @@ -2033,10 +2033,10 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) * read method calls. */ if (zcrypt_rng_buffer_index == 0) { - rc = zcrypt_rng((char *) zcrypt_rng_buffer); + rc = zcrypt_rng((char *)zcrypt_rng_buffer); /* on failure: retry once again after a requested rescan */ if ((rc == -ENODEV) && (zcrypt_process_rescan())) - rc = zcrypt_rng((char *) zcrypt_rng_buffer); + rc = zcrypt_rng((char *)zcrypt_rng_buffer); if (rc < 0) return -EIO; zcrypt_rng_buffer_index = rc / sizeof(*data); @@ -2057,7 +2057,7 @@ int zcrypt_rng_device_add(void) mutex_lock(&zcrypt_rng_mutex); if (zcrypt_rng_device_count == 0) { - zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); + zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL); if (!zcrypt_rng_buffer) { rc = -ENOMEM; goto out; @@ -2069,13 +2069,14 @@ int zcrypt_rng_device_add(void) if (rc) goto out_free; zcrypt_rng_device_count = 1; - } else + } else { zcrypt_rng_device_count++; + } mutex_unlock(&zcrypt_rng_mutex); return 0; out_free: - free_page((unsigned long) zcrypt_rng_buffer); + free_page((unsigned long)zcrypt_rng_buffer); out: mutex_unlock(&zcrypt_rng_mutex); return rc; @@ -2087,7 +2088,7 @@ void zcrypt_rng_device_remove(void) zcrypt_rng_device_count--; if (zcrypt_rng_device_count == 0) { hwrng_unregister(&zcrypt_rng_dev); - free_page((unsigned long) zcrypt_rng_buffer); + free_page((unsigned long)zcrypt_rng_buffer); } mutex_unlock(&zcrypt_rng_mutex); } diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 93e77e83ad14..f299deb8b8c7 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -170,7 +170,7 @@ static inline unsigned long z_copy_from_user(bool userspace, { if (likely(userspace)) return copy_from_user(to, from, n); - memcpy(to, (void __force *) from, n); + memcpy(to, (void __force *)from, n); return 0; } @@ -181,7 +181,7 @@ static inline unsigned long z_copy_to_user(bool userspace, { if (likely(userspace)) return copy_to_user(to, from, n); - memcpy((void __force *) to, from, n); + memcpy((void __force *)to, from, n); return 0; } diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c index fcbd537530e8..6ca675042416 100644 --- a/drivers/s390/crypto/zcrypt_card.c +++ b/drivers/s390/crypto/zcrypt_card.c @@ -138,7 +138,7 @@ struct zcrypt_card *zcrypt_card_alloc(void) { struct zcrypt_card *zc; - zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL); + zc = kzalloc(sizeof(*zc), GFP_KERNEL); if (!zc) return NULL; INIT_LIST_HEAD(&zc->list); diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h index f09bb850763b..6229ba9c56d9 100644 --- a/drivers/s390/crypto/zcrypt_cca_key.h +++ b/drivers/s390/crypto/zcrypt_cca_key.h @@ -11,7 +11,7 @@ #ifndef _ZCRYPT_CCA_KEY_H_ #define _ZCRYPT_CCA_KEY_H_ -struct T6_keyBlock_hdr { +struct t6_keyblock_hdr { unsigned short blen; unsigned short ulen; unsigned short flags; @@ -63,7 +63,7 @@ struct cca_public_sec { * complement of the residue modulo 8 of the sum of * (p_len + q_len + dp_len + dq_len + u_len). */ -struct cca_pvt_ext_CRT_sec { +struct cca_pvt_ext_crt_sec { unsigned char section_identifier; unsigned char version; unsigned short section_length; @@ -108,9 +108,9 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p) .section_identifier = 0x04, }; struct { - struct T6_keyBlock_hdr t6_hdr; - struct cca_token_hdr pubHdr; - struct cca_public_sec pubSec; + struct t6_keyblock_hdr t6_hdr; + struct cca_token_hdr pubhdr; + struct cca_public_sec pubsec; char exponent[0]; } __packed *key = p; unsigned char *temp; @@ -127,8 +127,8 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p) memset(key, 0, sizeof(*key)); - key->pubHdr = static_pub_hdr; - key->pubSec = static_pub_sec; + key->pubhdr = static_pub_hdr; + key->pubsec = static_pub_sec; /* key parameter block */ temp = key->exponent; @@ -146,16 +146,16 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p) if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength)) return -EFAULT; - key->pubSec.modulus_bit_len = 8 * mex->inputdatalength; - key->pubSec.modulus_byte_len = mex->inputdatalength; - key->pubSec.exponent_len = mex->inputdatalength - i; - key->pubSec.section_length = sizeof(key->pubSec) + - 2*mex->inputdatalength - i; - key->pubHdr.token_length = - key->pubSec.section_length + sizeof(key->pubHdr); - key->t6_hdr.ulen = key->pubHdr.token_length + 4; - key->t6_hdr.blen = key->pubHdr.token_length + 6; - return sizeof(*key) + 2*mex->inputdatalength - i; + key->pubsec.modulus_bit_len = 8 * mex->inputdatalength; + key->pubsec.modulus_byte_len = mex->inputdatalength; + key->pubsec.exponent_len = mex->inputdatalength - i; + key->pubsec.section_length = sizeof(key->pubsec) + + 2 * mex->inputdatalength - i; + key->pubhdr.token_length = + key->pubsec.section_length + sizeof(key->pubhdr); + key->t6_hdr.ulen = key->pubhdr.token_length + 4; + key->t6_hdr.blen = key->pubhdr.token_length + 6; + return sizeof(*key) + 2 * mex->inputdatalength - i; } /** @@ -177,9 +177,9 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p) }; static char pk_exponent[3] = { 0x01, 0x00, 0x01 }; struct { - struct T6_keyBlock_hdr t6_hdr; + struct t6_keyblock_hdr t6_hdr; struct cca_token_hdr token; - struct cca_pvt_ext_CRT_sec pvt; + struct cca_pvt_ext_crt_sec pvt; char key_parts[0]; } __packed *key = p; struct cca_public_sec *pub; @@ -198,8 +198,8 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p) short_len = (crt->inputdatalength + 1) / 2; long_len = short_len + 8; - pad_len = -(3*long_len + 2*short_len) & 7; - key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength; + pad_len = -(3 * long_len + 2 * short_len) & 7; + key_len = 3 * long_len + 2 * short_len + pad_len + crt->inputdatalength; size = sizeof(*key) + key_len + sizeof(*pub) + 3; /* parameter block.key block */ @@ -223,15 +223,15 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p) /* key parts */ if (copy_from_user(key->key_parts, crt->np_prime, long_len) || copy_from_user(key->key_parts + long_len, - crt->nq_prime, short_len) || + crt->nq_prime, short_len) || copy_from_user(key->key_parts + long_len + short_len, - crt->bp_key, long_len) || - copy_from_user(key->key_parts + 2*long_len + short_len, - crt->bq_key, short_len) || - copy_from_user(key->key_parts + 2*long_len + 2*short_len, - crt->u_mult_inv, long_len)) + crt->bp_key, long_len) || + copy_from_user(key->key_parts + 2 * long_len + short_len, + crt->bq_key, short_len) || + copy_from_user(key->key_parts + 2 * long_len + 2 * short_len, + crt->u_mult_inv, long_len)) return -EFAULT; - memset(key->key_parts + 3*long_len + 2*short_len + pad_len, + memset(key->key_parts + 3 * long_len + 2 * short_len + pad_len, 0xff, crt->inputdatalength); pub = (struct cca_public_sec *)(key->key_parts + key_len); *pub = static_cca_pub_sec; @@ -241,7 +241,7 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p) * section. So, an arbitrary public exponent of 0x010001 will be * used. */ - memcpy((char *) (pub + 1), pk_exponent, 3); + memcpy((char *)(pub + 1), pk_exponent, 3); return size; } diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index a507cafff3c5..60ba20a133be 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -53,26 +53,26 @@ static DEFINE_SPINLOCK(cca_info_list_lock); int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl, const u8 *token, int keybitsize) { - struct secaeskeytoken *t = (struct secaeskeytoken *) token; + struct secaeskeytoken *t = (struct secaeskeytoken *)token; #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) if (t->type != TOKTYPE_CCA_INTERNAL) { if (dbg) DBF("%s token check failed, type 0x%02x != 0x%02x\n", - __func__, (int) t->type, TOKTYPE_CCA_INTERNAL); + __func__, (int)t->type, TOKTYPE_CCA_INTERNAL); return -EINVAL; } if (t->version != TOKVER_CCA_AES) { if (dbg) DBF("%s token check failed, version 0x%02x != 0x%02x\n", - __func__, (int) t->version, TOKVER_CCA_AES); + __func__, (int)t->version, TOKVER_CCA_AES); return -EINVAL; } if (keybitsize > 0 && t->bitsize != keybitsize) { if (dbg) DBF("%s token check failed, bitsize %d != %d\n", - __func__, (int) t->bitsize, keybitsize); + __func__, (int)t->bitsize, keybitsize); return -EINVAL; } @@ -93,7 +93,7 @@ int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl, const u8 *token, int keybitsize, int checkcpacfexport) { - struct cipherkeytoken *t = (struct cipherkeytoken *) token; + struct cipherkeytoken *t = (struct cipherkeytoken *)token; bool keybitsizeok = true; #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) @@ -101,37 +101,37 @@ int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl, if (t->type != TOKTYPE_CCA_INTERNAL) { if (dbg) DBF("%s token check failed, type 0x%02x != 0x%02x\n", - __func__, (int) t->type, TOKTYPE_CCA_INTERNAL); + __func__, (int)t->type, TOKTYPE_CCA_INTERNAL); return -EINVAL; } if (t->version != TOKVER_CCA_VLSC) { if (dbg) DBF("%s token check failed, version 0x%02x != 0x%02x\n", - __func__, (int) t->version, TOKVER_CCA_VLSC); + __func__, (int)t->version, TOKVER_CCA_VLSC); return -EINVAL; } if (t->algtype != 0x02) { if (dbg) DBF("%s token check failed, algtype 0x%02x != 0x02\n", - __func__, (int) t->algtype); + __func__, (int)t->algtype); return -EINVAL; } if (t->keytype != 0x0001) { if (dbg) DBF("%s token check failed, keytype 0x%04x != 0x0001\n", - __func__, (int) t->keytype); + __func__, (int)t->keytype); return -EINVAL; } if (t->plfver != 0x00 && t->plfver != 0x01) { if (dbg) DBF("%s token check failed, unknown plfver 0x%02x\n", - __func__, (int) t->plfver); + __func__, (int)t->plfver); return -EINVAL; } if (t->wpllen != 512 && t->wpllen != 576 && t->wpllen != 640) { if (dbg) DBF("%s token check failed, unknown wpllen %d\n", - __func__, (int) t->wpllen); + __func__, (int)t->wpllen); return -EINVAL; } if (keybitsize > 0) { @@ -180,26 +180,26 @@ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, const u8 *token, size_t keysize, int checkcpacfexport) { - struct eccprivkeytoken *t = (struct eccprivkeytoken *) token; + struct eccprivkeytoken *t = (struct eccprivkeytoken *)token; #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) if (t->type != TOKTYPE_CCA_INTERNAL_PKA) { if (dbg) DBF("%s token check failed, type 0x%02x != 0x%02x\n", - __func__, (int) t->type, TOKTYPE_CCA_INTERNAL_PKA); + __func__, (int)t->type, TOKTYPE_CCA_INTERNAL_PKA); return -EINVAL; } if (t->len > keysize) { if (dbg) DBF("%s token check failed, len %d > keysize %zu\n", - __func__, (int) t->len, keysize); + __func__, (int)t->len, keysize); return -EINVAL; } if (t->secid != 0x20) { if (dbg) DBF("%s token check failed, secid 0x%02x != 0x20\n", - __func__, (int) t->secid); + __func__, (int)t->secid); return -EINVAL; } if (checkcpacfexport && !(t->kutc & 0x01)) { @@ -222,9 +222,9 @@ EXPORT_SYMBOL(cca_check_sececckeytoken); * on failure. */ static int alloc_and_prep_cprbmem(size_t paramblen, - u8 **pcprbmem, - struct CPRBX **preqCPRB, - struct CPRBX **prepCPRB) + u8 **p_cprb_mem, + struct CPRBX **p_req_cprb, + struct CPRBX **p_rep_cprb) { u8 *cprbmem; size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen; @@ -238,8 +238,8 @@ static int alloc_and_prep_cprbmem(size_t paramblen, if (!cprbmem) return -ENOMEM; - preqcblk = (struct CPRBX *) cprbmem; - prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen); + preqcblk = (struct CPRBX *)cprbmem; + prepcblk = (struct CPRBX *)(cprbmem + cprbplusparamblen); /* fill request cprb struct */ preqcblk->cprb_len = sizeof(struct CPRBX); @@ -248,14 +248,14 @@ static int alloc_and_prep_cprbmem(size_t paramblen, preqcblk->rpl_msgbl = cprbplusparamblen; if (paramblen) { preqcblk->req_parmb = - ((u8 __user *) preqcblk) + sizeof(struct CPRBX); + ((u8 __user *)preqcblk) + sizeof(struct CPRBX); preqcblk->rpl_parmb = - ((u8 __user *) prepcblk) + sizeof(struct CPRBX); + ((u8 __user *)prepcblk) + sizeof(struct CPRBX); } - *pcprbmem = cprbmem; - *preqCPRB = preqcblk; - *prepCPRB = prepcblk; + *p_cprb_mem = cprbmem; + *p_req_cprb = preqcblk; + *p_rep_cprb = prepcblk; return 0; } @@ -286,9 +286,9 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb, pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr); pxcrb->request_control_blk_length = preqcblk->cprb_len + preqcblk->req_parml; - pxcrb->request_control_blk_addr = (void __user *) preqcblk; + pxcrb->request_control_blk_addr = (void __user *)preqcblk; pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl; - pxcrb->reply_control_blk_addr = (void __user *) prepcblk; + pxcrb->reply_control_blk_addr = (void __user *)prepcblk; } /* @@ -345,7 +345,7 @@ int cca_genseckey(u16 cardnr, u16 domain, preqcblk->domain = domain; /* fill request cprb param block with KG request */ - preqparm = (struct kgreqparm __force *) preqcblk->req_parmb; + preqparm = (struct kgreqparm __force *)preqcblk->req_parmb; memcpy(preqparm->subfunc_code, "KG", 2); preqparm->rule_array_len = sizeof(preqparm->rule_array_len); preqparm->lv1.len = sizeof(struct lv1); @@ -387,7 +387,7 @@ int cca_genseckey(u16 cardnr, u16 domain, rc = zcrypt_send_cprb(&xcrb); if (rc) { DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n", - __func__, (int) cardnr, (int) domain, rc); + __func__, (int)cardnr, (int)domain, rc); goto out; } @@ -395,16 +395,16 @@ int cca_genseckey(u16 cardnr, u16 domain, if (prepcblk->ccp_rtcode != 0) { DEBUG_ERR("%s secure key generate failure, card response %d/%d\n", __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); rc = -EIO; goto out; } /* process response cprb param block */ - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); - prepcblk->rpl_parmb = (u8 __user *) ptr; - prepparm = (struct kgrepparm *) ptr; + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); + prepcblk->rpl_parmb = (u8 __user *)ptr; + prepparm = (struct kgrepparm *)ptr; /* check length of the returned secure key token */ seckeysize = prepparm->lv3.keyblock.toklen @@ -419,7 +419,7 @@ int cca_genseckey(u16 cardnr, u16 domain, /* check secure key token */ rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR, - prepparm->lv3.keyblock.tok, 8*keysize); + prepparm->lv3.keyblock.tok, 8 * keysize); if (rc) { rc = -EIO; goto out; @@ -486,7 +486,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, preqcblk->domain = domain; /* fill request cprb param block with CM request */ - preqparm = (struct cmreqparm __force *) preqcblk->req_parmb; + preqparm = (struct cmreqparm __force *)preqcblk->req_parmb; memcpy(preqparm->subfunc_code, "CM", 2); memcpy(preqparm->rule_array, "AES ", 8); preqparm->rule_array_len = @@ -512,7 +512,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, } preqparm->lv1.len = sizeof(struct lv1) + keysize; memcpy(preqparm->lv1.clrkey, clrkey, keysize); - plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize); + plv2 = (struct lv2 *)(((u8 *)&preqparm->lv2) + keysize); plv2->len = sizeof(struct lv2); plv2->keyid.len = sizeof(struct keyid); plv2->keyid.attr = 0x30; @@ -525,7 +525,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, rc = zcrypt_send_cprb(&xcrb); if (rc) { DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int) cardnr, (int) domain, rc); + __func__, (int)cardnr, (int)domain, rc); goto out; } @@ -533,16 +533,16 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, if (prepcblk->ccp_rtcode != 0) { DEBUG_ERR("%s clear key import failure, card response %d/%d\n", __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); rc = -EIO; goto out; } /* process response cprb param block */ - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); - prepcblk->rpl_parmb = (u8 __user *) ptr; - prepparm = (struct cmrepparm *) ptr; + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); + prepcblk->rpl_parmb = (u8 __user *)ptr; + prepparm = (struct cmrepparm *)ptr; /* check length of the returned secure key token */ seckeysize = prepparm->lv3.keyblock.toklen @@ -557,7 +557,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, /* check secure key token */ rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR, - prepparm->lv3.keyblock.tok, 8*keysize); + prepparm->lv3.keyblock.tok, 8 * keysize); if (rc) { rc = -EIO; goto out; @@ -632,7 +632,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain, preqcblk->domain = domain; /* fill request cprb param block with USK request */ - preqparm = (struct uskreqparm __force *) preqcblk->req_parmb; + preqparm = (struct uskreqparm __force *)preqcblk->req_parmb; memcpy(preqparm->subfunc_code, "US", 2); preqparm->rule_array_len = sizeof(preqparm->rule_array_len); preqparm->lv1.len = sizeof(struct lv1); @@ -652,7 +652,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain, rc = zcrypt_send_cprb(&xcrb); if (rc) { DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int) cardnr, (int) domain, rc); + __func__, (int)cardnr, (int)domain, rc); goto out; } @@ -660,8 +660,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain, if (prepcblk->ccp_rtcode != 0) { DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n", __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) rc = -EAGAIN; else @@ -671,37 +671,37 @@ int cca_sec2protkey(u16 cardnr, u16 domain, if (prepcblk->ccp_rscode != 0) { DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n", __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); } /* process response cprb param block */ - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); - prepcblk->rpl_parmb = (u8 __user *) ptr; - prepparm = (struct uskrepparm *) ptr; + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); + prepcblk->rpl_parmb = (u8 __user *)ptr; + prepparm = (struct uskrepparm *)ptr; /* check the returned keyblock */ if (prepparm->lv3.ckb.version != 0x01 && prepparm->lv3.ckb.version != 0x02) { DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n", - __func__, (int) prepparm->lv3.ckb.version); + __func__, (int)prepparm->lv3.ckb.version); rc = -EIO; goto out; } /* copy the tanslated protected key */ switch (prepparm->lv3.ckb.len) { - case 16+32: + case 16 + 32: /* AES 128 protected key */ if (protkeytype) *protkeytype = PKEY_KEYTYPE_AES_128; break; - case 24+32: + case 24 + 32: /* AES 192 protected key */ if (protkeytype) *protkeytype = PKEY_KEYTYPE_AES_192; break; - case 32+32: + case 32 + 32: /* AES 256 protected key */ if (protkeytype) *protkeytype = PKEY_KEYTYPE_AES_256; @@ -751,7 +751,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, struct gkreqparm { u8 subfunc_code[2]; u16 rule_array_len; - char rule_array[2*8]; + char rule_array[2 * 8]; struct { u16 len; u8 key_type_1[8]; @@ -827,10 +827,10 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, preqcblk->req_parml = sizeof(struct gkreqparm); /* prepare request param block with GK request */ - preqparm = (struct gkreqparm __force *) preqcblk->req_parmb; + preqparm = (struct gkreqparm __force *)preqcblk->req_parmb; memcpy(preqparm->subfunc_code, "GK", 2); preqparm->rule_array_len = sizeof(uint16_t) + 2 * 8; - memcpy(preqparm->rule_array, "AES OP ", 2*8); + memcpy(preqparm->rule_array, "AES OP ", 2 * 8); /* prepare vud block */ preqparm->vud.len = sizeof(preqparm->vud); @@ -869,9 +869,9 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, /* patch the skeleton key token export flags inside the kb block */ if (keygenflags) { - t = (struct cipherkeytoken *) preqparm->kb.tlv3.gen_key_id_1; - t->kmf1 |= (u16) (keygenflags & 0x0000FF00); - t->kmf1 &= (u16) ~(keygenflags & 0x000000FF); + t = (struct cipherkeytoken *)preqparm->kb.tlv3.gen_key_id_1; + t->kmf1 |= (u16)(keygenflags & 0x0000FF00); + t->kmf1 &= (u16)~(keygenflags & 0x000000FF); } /* prepare xcrb struct */ @@ -882,7 +882,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, if (rc) { DEBUG_ERR( "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int) cardnr, (int) domain, rc); + __func__, (int)cardnr, (int)domain, rc); goto out; } @@ -891,16 +891,16 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, DEBUG_ERR( "%s cipher key generate failure, card response %d/%d\n", __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); rc = -EIO; goto out; } /* process response cprb param block */ - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); - prepcblk->rpl_parmb = (u8 __user *) ptr; - prepparm = (struct gkrepparm *) ptr; + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); + prepcblk->rpl_parmb = (u8 __user *)ptr; + prepparm = (struct gkrepparm *)ptr; /* do some plausibility checks on the key block */ if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) || @@ -921,7 +921,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, } /* copy the generated vlsc key token */ - t = (struct cipherkeytoken *) prepparm->kb.tlv1.gen_key; + t = (struct cipherkeytoken *)prepparm->kb.tlv1.gen_key; if (keybuf) { if (*keybufsize >= t->len) memcpy(keybuf, t, t->len); @@ -1006,7 +1006,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, preqcblk->req_parml = 0; /* prepare request param block with IP request */ - preq_ra_block = (struct rule_array_block __force *) preqcblk->req_parmb; + preq_ra_block = (struct rule_array_block __force *)preqcblk->req_parmb; memcpy(preq_ra_block->subfunc_code, "IP", 2); preq_ra_block->rule_array_len = sizeof(uint16_t) + 2 * 8; memcpy(preq_ra_block->rule_array, rule_array_1, 8); @@ -1050,7 +1050,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, if (rc) { DEBUG_ERR( "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int) cardnr, (int) domain, rc); + __func__, (int)cardnr, (int)domain, rc); goto out; } @@ -1059,16 +1059,16 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, DEBUG_ERR( "%s CSNBKPI2 failure, card response %d/%d\n", __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); rc = -EIO; goto out; } /* process response cprb param block */ - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); - prepcblk->rpl_parmb = (u8 __user *) ptr; - prepparm = (struct iprepparm *) ptr; + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); + prepcblk->rpl_parmb = (u8 __user *)ptr; + prepparm = (struct iprepparm *)ptr; /* do some plausibility checks on the key block */ if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) || @@ -1082,7 +1082,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, /* do not check the key here, it may be incomplete */ /* copy the vlsc key token back */ - t = (struct cipherkeytoken *) prepparm->kb.tlv1.key_token; + t = (struct cipherkeytoken *)prepparm->kb.tlv1.key_token; memcpy(key_token, t, t->len); *key_token_size = t->len; @@ -1117,9 +1117,9 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, /* patch the skeleton key token export flags */ if (keygenflags) { - t = (struct cipherkeytoken *) token; - t->kmf1 |= (u16) (keygenflags & 0x0000FF00); - t->kmf1 &= (u16) ~(keygenflags & 0x000000FF); + t = (struct cipherkeytoken *)token; + t->kmf1 |= (u16)(keygenflags & 0x0000FF00); + t->kmf1 &= (u16)~(keygenflags & 0x000000FF); } /* @@ -1241,7 +1241,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, preqcblk->domain = domain; /* fill request cprb param block with AU request */ - preqparm = (struct aureqparm __force *) preqcblk->req_parmb; + preqparm = (struct aureqparm __force *)preqcblk->req_parmb; memcpy(preqparm->subfunc_code, "AU", 2); preqparm->rule_array_len = sizeof(preqparm->rule_array_len) @@ -1267,7 +1267,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, if (rc) { DEBUG_ERR( "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int) cardnr, (int) domain, rc); + __func__, (int)cardnr, (int)domain, rc); goto out; } @@ -1276,8 +1276,8 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, DEBUG_ERR( "%s unwrap secure key failure, card response %d/%d\n", __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) rc = -EAGAIN; else @@ -1288,44 +1288,44 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, DEBUG_WARN( "%s unwrap secure key warning, card response %d/%d\n", __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); } /* process response cprb param block */ - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); - prepcblk->rpl_parmb = (u8 __user *) ptr; - prepparm = (struct aurepparm *) ptr; + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); + prepcblk->rpl_parmb = (u8 __user *)ptr; + prepparm = (struct aurepparm *)ptr; /* check the returned keyblock */ if (prepparm->vud.ckb.version != 0x01 && prepparm->vud.ckb.version != 0x02) { DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n", - __func__, (int) prepparm->vud.ckb.version); + __func__, (int)prepparm->vud.ckb.version); rc = -EIO; goto out; } if (prepparm->vud.ckb.algo != 0x02) { DEBUG_ERR( "%s reply param keyblock algo mismatch 0x%02x != 0x02\n", - __func__, (int) prepparm->vud.ckb.algo); + __func__, (int)prepparm->vud.ckb.algo); rc = -EIO; goto out; } /* copy the translated protected key */ switch (prepparm->vud.ckb.keylen) { - case 16+32: + case 16 + 32: /* AES 128 protected key */ if (protkeytype) *protkeytype = PKEY_KEYTYPE_AES_128; break; - case 24+32: + case 24 + 32: /* AES 192 protected key */ if (protkeytype) *protkeytype = PKEY_KEYTYPE_AES_192; break; - case 32+32: + case 32 + 32: /* AES 256 protected key */ if (protkeytype) *protkeytype = PKEY_KEYTYPE_AES_256; @@ -1410,7 +1410,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, preqcblk->domain = domain; /* fill request cprb param block with AU request */ - preqparm = (struct aureqparm __force *) preqcblk->req_parmb; + preqparm = (struct aureqparm __force *)preqcblk->req_parmb; memcpy(preqparm->subfunc_code, "AU", 2); preqparm->rule_array_len = sizeof(preqparm->rule_array_len) @@ -1436,7 +1436,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, if (rc) { DEBUG_ERR( "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int) cardnr, (int) domain, rc); + __func__, (int)cardnr, (int)domain, rc); goto out; } @@ -1445,8 +1445,8 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, DEBUG_ERR( "%s unwrap secure key failure, card response %d/%d\n", __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) rc = -EAGAIN; else @@ -1457,26 +1457,26 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, DEBUG_WARN( "%s unwrap secure key warning, card response %d/%d\n", __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); } /* process response cprb param block */ - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); - prepcblk->rpl_parmb = (u8 __user *) ptr; - prepparm = (struct aurepparm *) ptr; + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); + prepcblk->rpl_parmb = (u8 __user *)ptr; + prepparm = (struct aurepparm *)ptr; /* check the returned keyblock */ if (prepparm->vud.ckb.version != 0x02) { DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n", - __func__, (int) prepparm->vud.ckb.version); + __func__, (int)prepparm->vud.ckb.version); rc = -EIO; goto out; } if (prepparm->vud.ckb.algo != 0x81) { DEBUG_ERR( "%s reply param keyblock algo mismatch 0x%02x != 0x81\n", - __func__, (int) prepparm->vud.ckb.algo); + __func__, (int)prepparm->vud.ckb.algo); rc = -EIO; goto out; } @@ -1537,7 +1537,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, preqcblk->domain = domain; /* fill request cprb param block with FQ request */ - preqparm = (struct fqreqparm __force *) preqcblk->req_parmb; + preqparm = (struct fqreqparm __force *)preqcblk->req_parmb; memcpy(preqparm->subfunc_code, "FQ", 2); memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array)); preqparm->rule_array_len = @@ -1553,7 +1553,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, rc = zcrypt_send_cprb(&xcrb); if (rc) { DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int) cardnr, (int) domain, rc); + __func__, (int)cardnr, (int)domain, rc); goto out; } @@ -1561,20 +1561,20 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, if (prepcblk->ccp_rtcode != 0) { DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n", __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); rc = -EIO; goto out; } /* process response cprb param block */ - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); - prepcblk->rpl_parmb = (u8 __user *) ptr; - prepparm = (struct fqrepparm *) ptr; + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); + prepcblk->rpl_parmb = (u8 __user *)ptr; + prepparm = (struct fqrepparm *)ptr; ptr = prepparm->lvdata; /* check and possibly copy reply rule array */ - len = *((u16 *) ptr); + len = *((u16 *)ptr); if (len > sizeof(u16)) { ptr += sizeof(u16); len -= sizeof(u16); @@ -1585,7 +1585,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, ptr += len; } /* check and possible copy reply var array */ - len = *((u16 *) ptr); + len = *((u16 *)ptr); if (len > sizeof(u16)) { ptr += sizeof(u16); len -= sizeof(u16); @@ -1696,30 +1696,30 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) ci->hwtype = devstat.hwtype; /* prep page for rule array and var array use */ - pg = (u8 *) __get_free_page(GFP_KERNEL); + pg = (u8 *)__get_free_page(GFP_KERNEL); if (!pg) return -ENOMEM; rarray = pg; - varray = pg + PAGE_SIZE/2; - rlen = vlen = PAGE_SIZE/2; + varray = pg + PAGE_SIZE / 2; + rlen = vlen = PAGE_SIZE / 2; /* QF for this card/domain */ rc = cca_query_crypto_facility(cardnr, domain, "STATICSA", rarray, &rlen, varray, &vlen); - if (rc == 0 && rlen >= 10*8 && vlen >= 204) { + if (rc == 0 && rlen >= 10 * 8 && vlen >= 204) { memcpy(ci->serial, rarray, 8); - ci->new_asym_mk_state = (char) rarray[4*8]; - ci->cur_asym_mk_state = (char) rarray[5*8]; - ci->old_asym_mk_state = (char) rarray[6*8]; + ci->new_asym_mk_state = (char)rarray[4 * 8]; + ci->cur_asym_mk_state = (char)rarray[5 * 8]; + ci->old_asym_mk_state = (char)rarray[6 * 8]; if (ci->old_asym_mk_state == '2') memcpy(ci->old_asym_mkvp, varray + 64, 16); if (ci->cur_asym_mk_state == '2') memcpy(ci->cur_asym_mkvp, varray + 84, 16); if (ci->new_asym_mk_state == '3') memcpy(ci->new_asym_mkvp, varray + 104, 16); - ci->new_aes_mk_state = (char) rarray[7*8]; - ci->cur_aes_mk_state = (char) rarray[8*8]; - ci->old_aes_mk_state = (char) rarray[9*8]; + ci->new_aes_mk_state = (char)rarray[7 * 8]; + ci->cur_aes_mk_state = (char)rarray[8 * 8]; + ci->old_aes_mk_state = (char)rarray[9 * 8]; if (ci->old_aes_mk_state == '2') memcpy(&ci->old_aes_mkvp, varray + 172, 8); if (ci->cur_aes_mk_state == '2') @@ -1730,13 +1730,13 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) } if (!found) goto out; - rlen = vlen = PAGE_SIZE/2; + rlen = vlen = PAGE_SIZE / 2; rc = cca_query_crypto_facility(cardnr, domain, "STATICSB", rarray, &rlen, varray, &vlen); - if (rc == 0 && rlen >= 13*8 && vlen >= 240) { - ci->new_apka_mk_state = (char) rarray[10*8]; - ci->cur_apka_mk_state = (char) rarray[11*8]; - ci->old_apka_mk_state = (char) rarray[12*8]; + if (rc == 0 && rlen >= 13 * 8 && vlen >= 240) { + ci->new_apka_mk_state = (char)rarray[10 * 8]; + ci->cur_apka_mk_state = (char)rarray[11 * 8]; + ci->old_apka_mk_state = (char)rarray[12 * 8]; if (ci->old_apka_mk_state == '2') memcpy(&ci->old_apka_mkvp, varray + 208, 8); if (ci->cur_apka_mk_state == '2') @@ -1747,7 +1747,7 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) } out: - free_page((unsigned long) pg); + free_page((unsigned long)pg); return found == 2 ? 0 : -ENOENT; } @@ -1855,8 +1855,9 @@ static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain, if (pdomain) *pdomain = dom; rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1); - } else + } else { rc = -ENODEV; + } kvfree(device_status); return rc; @@ -1870,7 +1871,7 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify) { u64 mkvp; int minhwtype = 0; - const struct keytoken_header *hdr = (struct keytoken_header *) key; + const struct keytoken_header *hdr = (struct keytoken_header *)key; if (hdr->type != TOKTYPE_CCA_INTERNAL) return -EINVAL; @@ -1963,7 +1964,7 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, } /* apqn passed all filtering criterons, add to the array */ if (_nr_apqns < 256) - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom); + _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); } /* nothing found ? */ diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index 2bd49950ba81..83f692c9c197 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -34,10 +34,11 @@ #define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus * (max outputdatalength) + - * type80_hdr*/ + * type80_hdr + */ #define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg) -#define CEX2A_CLEANUP_TIME (15*HZ) +#define CEX2A_CLEANUP_TIME (15 * HZ) #define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME MODULE_AUTHOR("IBM Corporation"); @@ -117,9 +118,8 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev) zc->online = 1; rc = zcrypt_card_register(zc); - if (rc) { + if (rc) zcrypt_card_free(zc); - } return rc; } @@ -176,9 +176,8 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev) aq->request_timeout = CEX2A_CLEANUP_TIME; dev_set_drvdata(&ap_dev->device, zq); rc = zcrypt_queue_register(zq); - if (rc) { + if (rc) zcrypt_queue_free(zq); - } return rc; } diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c index 6360fdd06160..cb7849defce3 100644 --- a/drivers/s390/crypto/zcrypt_cex2c.c +++ b/drivers/s390/crypto/zcrypt_cex2c.c @@ -31,8 +31,8 @@ #define CEX2C_MAX_MOD_SIZE 256 /* 2048 bits */ #define CEX3C_MIN_MOD_SIZE 16 /* 128 bits */ #define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */ -#define CEX2C_MAX_XCRB_MESSAGE_SIZE (12*1024) -#define CEX2C_CLEANUP_TIME (15*HZ) +#define CEX2C_MAX_XCRB_MESSAGE_SIZE (12 * 1024) +#define CEX2C_CLEANUP_TIME (15 * HZ) MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("CEX2C/CEX3C Cryptographic Coprocessor device driver, " \ @@ -200,11 +200,11 @@ static int zcrypt_cex2c_rng_supported(struct ap_queue *aq) int rc, i; ap_init_message(&ap_msg); - ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL); + ap_msg.msg = (void *)get_zeroed_page(GFP_KERNEL); if (!ap_msg.msg) return -ENOMEM; - rng_type6CPRB_msgX(&ap_msg, 4, &domain); + rng_type6cprb_msgx(&ap_msg, 4, &domain); msg = ap_msg.msg; msg->cprbx.domain = AP_QID_QUEUE(aq->qid); @@ -233,7 +233,7 @@ static int zcrypt_cex2c_rng_supported(struct ap_queue *aq) else rc = 0; out_free: - free_page((unsigned long) ap_msg.msg); + free_page((unsigned long)ap_msg.msg); return rc; } diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index f4319d072016..b03916b7538b 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -33,7 +33,7 @@ * But the maximum time limit managed by the stomper code is set to 60sec. * Hence we have to wait at least that time period. */ -#define CEX4_CLEANUP_TIME (900*HZ) +#define CEX4_CLEANUP_TIME (900 * HZ) MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("CEX[45678] Cryptographic Card device driver, " \ @@ -364,8 +364,9 @@ static ssize_t ep11_mkvps_show(struct device *dev, bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp)); n += 2 * sizeof(di.cur_wkvp); n += scnprintf(buf + n, PAGE_SIZE - n, "\n"); - } else + } else { n = scnprintf(buf, PAGE_SIZE, "WK CUR: - -\n"); + } if (di.new_wk_state == '0') { n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n", @@ -376,8 +377,9 @@ static ssize_t ep11_mkvps_show(struct device *dev, bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp)); n += 2 * sizeof(di.new_wkvp); n += scnprintf(buf + n, PAGE_SIZE - n, "\n"); - } else + } else { n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n"); + } return n; } diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c index 98d33f932b0b..b1c29017be5b 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.c +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -119,8 +119,8 @@ static void __exit card_cache_free(void) int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, const u8 *key, size_t keylen, int checkcpacfexp) { - struct ep11kblob_header *hdr = (struct ep11kblob_header *) key; - struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr)); + struct ep11kblob_header *hdr = (struct ep11kblob_header *)key; + struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr)); #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) @@ -133,38 +133,38 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, if (hdr->type != TOKTYPE_NON_CCA) { if (dbg) DBF("%s key check failed, type 0x%02x != 0x%02x\n", - __func__, (int) hdr->type, TOKTYPE_NON_CCA); + __func__, (int)hdr->type, TOKTYPE_NON_CCA); return -EINVAL; } if (hdr->hver != 0x00) { if (dbg) DBF("%s key check failed, header version 0x%02x != 0x00\n", - __func__, (int) hdr->hver); + __func__, (int)hdr->hver); return -EINVAL; } if (hdr->version != TOKVER_EP11_AES_WITH_HEADER) { if (dbg) DBF("%s key check failed, version 0x%02x != 0x%02x\n", - __func__, (int) hdr->version, TOKVER_EP11_AES_WITH_HEADER); + __func__, (int)hdr->version, TOKVER_EP11_AES_WITH_HEADER); return -EINVAL; } if (hdr->len > keylen) { if (dbg) DBF("%s key check failed, header len %d keylen %zu mismatch\n", - __func__, (int) hdr->len, keylen); + __func__, (int)hdr->len, keylen); return -EINVAL; } if (hdr->len < sizeof(*hdr) + sizeof(*kb)) { if (dbg) DBF("%s key check failed, header len %d < %zu\n", - __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb)); + __func__, (int)hdr->len, sizeof(*hdr) + sizeof(*kb)); return -EINVAL; } if (kb->version != EP11_STRUCT_MAGIC) { if (dbg) DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n", - __func__, (int) kb->version, EP11_STRUCT_MAGIC); + __func__, (int)kb->version, EP11_STRUCT_MAGIC); return -EINVAL; } if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { @@ -186,8 +186,8 @@ EXPORT_SYMBOL(ep11_check_aes_key_with_hdr); int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, const u8 *key, size_t keylen, int checkcpacfexp) { - struct ep11kblob_header *hdr = (struct ep11kblob_header *) key; - struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr)); + struct ep11kblob_header *hdr = (struct ep11kblob_header *)key; + struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr)); #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) @@ -200,38 +200,38 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, if (hdr->type != TOKTYPE_NON_CCA) { if (dbg) DBF("%s key check failed, type 0x%02x != 0x%02x\n", - __func__, (int) hdr->type, TOKTYPE_NON_CCA); + __func__, (int)hdr->type, TOKTYPE_NON_CCA); return -EINVAL; } if (hdr->hver != 0x00) { if (dbg) DBF("%s key check failed, header version 0x%02x != 0x00\n", - __func__, (int) hdr->hver); + __func__, (int)hdr->hver); return -EINVAL; } if (hdr->version != TOKVER_EP11_ECC_WITH_HEADER) { if (dbg) DBF("%s key check failed, version 0x%02x != 0x%02x\n", - __func__, (int) hdr->version, TOKVER_EP11_ECC_WITH_HEADER); + __func__, (int)hdr->version, TOKVER_EP11_ECC_WITH_HEADER); return -EINVAL; } if (hdr->len > keylen) { if (dbg) DBF("%s key check failed, header len %d keylen %zu mismatch\n", - __func__, (int) hdr->len, keylen); + __func__, (int)hdr->len, keylen); return -EINVAL; } if (hdr->len < sizeof(*hdr) + sizeof(*kb)) { if (dbg) DBF("%s key check failed, header len %d < %zu\n", - __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb)); + __func__, (int)hdr->len, sizeof(*hdr) + sizeof(*kb)); return -EINVAL; } if (kb->version != EP11_STRUCT_MAGIC) { if (dbg) DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n", - __func__, (int) kb->version, EP11_STRUCT_MAGIC); + __func__, (int)kb->version, EP11_STRUCT_MAGIC); return -EINVAL; } if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { @@ -254,7 +254,7 @@ EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr); int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, const u8 *key, size_t keylen, int checkcpacfexp) { - struct ep11keyblob *kb = (struct ep11keyblob *) key; + struct ep11keyblob *kb = (struct ep11keyblob *)key; #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) @@ -267,32 +267,32 @@ int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, if (kb->head.type != TOKTYPE_NON_CCA) { if (dbg) DBF("%s key check failed, type 0x%02x != 0x%02x\n", - __func__, (int) kb->head.type, TOKTYPE_NON_CCA); + __func__, (int)kb->head.type, TOKTYPE_NON_CCA); return -EINVAL; } if (kb->head.version != TOKVER_EP11_AES) { if (dbg) DBF("%s key check failed, version 0x%02x != 0x%02x\n", - __func__, (int) kb->head.version, TOKVER_EP11_AES); + __func__, (int)kb->head.version, TOKVER_EP11_AES); return -EINVAL; } if (kb->head.len > keylen) { if (dbg) DBF("%s key check failed, header len %d keylen %zu mismatch\n", - __func__, (int) kb->head.len, keylen); + __func__, (int)kb->head.len, keylen); return -EINVAL; } if (kb->head.len < sizeof(*kb)) { if (dbg) DBF("%s key check failed, header len %d < %zu\n", - __func__, (int) kb->head.len, sizeof(*kb)); + __func__, (int)kb->head.len, sizeof(*kb)); return -EINVAL; } if (kb->version != EP11_STRUCT_MAGIC) { if (dbg) DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n", - __func__, (int) kb->version, EP11_STRUCT_MAGIC); + __func__, (int)kb->version, EP11_STRUCT_MAGIC); return -EINVAL; } if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { @@ -347,11 +347,11 @@ static int asn1tag_write(u8 *ptr, u8 tag, const u8 *pvalue, u16 valuelen) } if (valuelen > 127) { ptr[1] = 0x81; - ptr[2] = (u8) valuelen; + ptr[2] = (u8)valuelen; memcpy(ptr + 3, pvalue, valuelen); return 3 + valuelen; } - ptr[1] = (u8) valuelen; + ptr[1] = (u8)valuelen; memcpy(ptr + 2, pvalue, valuelen); return 2 + valuelen; } @@ -389,11 +389,11 @@ static inline void prep_urb(struct ep11_urb *u, struct ep11_cprb *req, size_t req_len, struct ep11_cprb *rep, size_t rep_len) { - u->targets = (u8 __user *) t; + u->targets = (u8 __user *)t; u->targets_num = nt; - u->req = (u8 __user *) req; + u->req = (u8 __user *)req; u->req_len = req_len; - u->resp = (u8 __user *) rep; + u->resp = (u8 __user *)rep; u->resp_len = rep_len; } @@ -462,7 +462,6 @@ static int check_reply_pl(const u8 *pl, const char *func) return 0; } - /* * Helper function which does an ep11 query with given query type. */ @@ -496,7 +495,7 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, req = alloc_cprb(sizeof(struct ep11_info_req_pl)); if (!req) goto out; - req_pl = (struct ep11_info_req_pl *) (((u8 *) req) + sizeof(*req)); + req_pl = (struct ep11_info_req_pl *)(((u8 *)req) + sizeof(*req)); prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */ req_pl->query_type_tag = 0x04; req_pl->query_type_len = sizeof(u32); @@ -508,10 +507,10 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen); if (!rep) goto out; - rep_pl = (struct ep11_info_rep_pl *) (((u8 *) rep) + sizeof(*rep)); + rep_pl = (struct ep11_info_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); + urb = kmalloc(sizeof(*urb), GFP_KERNEL); if (!urb) goto out; target.ap_id = cardnr; @@ -524,7 +523,7 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, if (rc) { DEBUG_ERR( "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int) cardnr, (int) domain, rc); + __func__, (int)cardnr, (int)domain, rc); goto out; } @@ -543,7 +542,7 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, goto out; } - memcpy(buf, ((u8 *) rep_pl) + sizeof(*rep_pl), rep_pl->data_len); + memcpy(buf, ((u8 *)rep_pl) + sizeof(*rep_pl), rep_pl->data_len); out: kfree(req); @@ -592,7 +591,7 @@ int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify) return -ENOMEM; rc = ep11_query_info(card, AUTOSEL_DOM, 0x01 /* module info query */, - sizeof(*pmqi), (u8 *) pmqi); + sizeof(*pmqi), (u8 *)pmqi); if (rc) { if (rc == -ENODEV) card_cache_scrub(card); @@ -632,7 +631,7 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info) return -ENOMEM; rc = ep11_query_info(card, domain, 0x03 /* domain info query */, - sizeof(*p_dom_info), (u8 *) p_dom_info); + sizeof(*p_dom_info), (u8 *)p_dom_info); if (rc) goto out; @@ -644,8 +643,8 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info) info->cur_wk_state = '1'; memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32); } - if (p_dom_info->dom_flags & 0x04 /* new wk present */ - || p_dom_info->dom_flags & 0x08 /* new wk committed */) { + if (p_dom_info->dom_flags & 0x04 || /* new wk present */ + p_dom_info->dom_flags & 0x08 /* new wk committed */) { info->new_wk_state = p_dom_info->dom_flags & 0x08 ? '2' : '1'; memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32); @@ -722,7 +721,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, req = alloc_cprb(sizeof(struct keygen_req_pl)); if (!req) goto out; - req_pl = (struct keygen_req_pl *) (((u8 *) req) + sizeof(*req)); + req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req)); api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1; prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */ req_pl->var_tag = 0x04; @@ -746,10 +745,10 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, rep = alloc_cprb(sizeof(struct keygen_rep_pl)); if (!rep) goto out; - rep_pl = (struct keygen_rep_pl *) (((u8 *) rep) + sizeof(*rep)); + rep_pl = (struct keygen_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); + urb = kmalloc(sizeof(*urb), GFP_KERNEL); if (!urb) goto out; target.ap_id = card; @@ -762,7 +761,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, if (rc) { DEBUG_ERR( "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int) card, (int) domain, rc); + __func__, (int)card, (int)domain, rc); goto out; } @@ -784,7 +783,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, /* copy key blob and set header values */ memcpy(keybuf, rep_pl->data, rep_pl->data_len); *keybufsize = rep_pl->data_len; - kb = (struct ep11keyblob *) keybuf; + kb = (struct ep11keyblob *)keybuf; kb->head.type = TOKTYPE_NON_CCA; kb->head.len = rep_pl->data_len; kb->head.version = TOKVER_EP11_AES; @@ -844,7 +843,7 @@ static int ep11_cryptsingle(u16 card, u16 domain, req = alloc_cprb(req_pl_size); if (!req) goto out; - req_pl = (struct crypt_req_pl *) (((u8 *) req) + sizeof(*req)); + req_pl = (struct crypt_req_pl *)(((u8 *)req) + sizeof(*req)); prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19)); req_pl->var_tag = 0x04; req_pl->var_len = sizeof(u32); @@ -852,7 +851,7 @@ static int ep11_cryptsingle(u16 card, u16 domain, req_pl->mech_tag = 0x04; req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0); req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */ - p = ((u8 *) req_pl) + sizeof(*req_pl); + p = ((u8 *)req_pl) + sizeof(*req_pl); if (iv) { memcpy(p, iv, 16); p += 16; @@ -866,10 +865,10 @@ static int ep11_cryptsingle(u16 card, u16 domain, rep = alloc_cprb(rep_pl_size); if (!rep) goto out; - rep_pl = (struct crypt_rep_pl *) (((u8 *) rep) + sizeof(*rep)); + rep_pl = (struct crypt_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); + urb = kmalloc(sizeof(*urb), GFP_KERNEL); if (!urb) goto out; target.ap_id = card; @@ -882,7 +881,7 @@ static int ep11_cryptsingle(u16 card, u16 domain, if (rc) { DEBUG_ERR( "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int) card, (int) domain, rc); + __func__, (int)card, (int)domain, rc); goto out; } @@ -894,13 +893,13 @@ static int ep11_cryptsingle(u16 card, u16 domain, rc = -EIO; goto out; } - p = ((u8 *) rep_pl) + sizeof(*rep_pl); - if (rep_pl->data_lenfmt <= 127) + p = ((u8 *)rep_pl) + sizeof(*rep_pl); + if (rep_pl->data_lenfmt <= 127) { n = rep_pl->data_lenfmt; - else if (rep_pl->data_lenfmt == 0x81) + } else if (rep_pl->data_lenfmt == 0x81) { n = *p++; - else if (rep_pl->data_lenfmt == 0x82) { - n = *((u16 *) p); + } else if (rep_pl->data_lenfmt == 0x82) { + n = *((u16 *)p); p += 2; } else { DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n", @@ -978,7 +977,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, req = alloc_cprb(req_pl_size); if (!req) goto out; - req_pl = (struct uw_req_pl *) (((u8 *) req) + sizeof(*req)); + req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req)); api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1; prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */ req_pl->attr_tag = 0x04; @@ -994,7 +993,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, req_pl->mech_tag = 0x04; req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0); req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */ - p = ((u8 *) req_pl) + sizeof(*req_pl); + p = ((u8 *)req_pl) + sizeof(*req_pl); if (iv) { memcpy(p, iv, 16); p += 16; @@ -1014,10 +1013,10 @@ static int ep11_unwrapkey(u16 card, u16 domain, rep = alloc_cprb(sizeof(struct uw_rep_pl)); if (!rep) goto out; - rep_pl = (struct uw_rep_pl *) (((u8 *) rep) + sizeof(*rep)); + rep_pl = (struct uw_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); + urb = kmalloc(sizeof(*urb), GFP_KERNEL); if (!urb) goto out; target.ap_id = card; @@ -1030,7 +1029,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, if (rc) { DEBUG_ERR( "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int) card, (int) domain, rc); + __func__, (int)card, (int)domain, rc); goto out; } @@ -1052,7 +1051,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, /* copy key blob and set header values */ memcpy(keybuf, rep_pl->data, rep_pl->data_len); *keybufsize = rep_pl->data_len; - kb = (struct ep11keyblob *) keybuf; + kb = (struct ep11keyblob *)keybuf; kb->head.type = TOKTYPE_NON_CCA; kb->head.len = rep_pl->data_len; kb->head.version = TOKVER_EP11_AES; @@ -1105,7 +1104,7 @@ static int ep11_wrapkey(u16 card, u16 domain, u8 *p; /* maybe the session field holds a header with key info */ - kb = (struct ep11keyblob *) key; + kb = (struct ep11keyblob *)key; if (kb->head.type == TOKTYPE_NON_CCA && kb->head.version == TOKVER_EP11_AES) { has_header = true; @@ -1120,7 +1119,7 @@ static int ep11_wrapkey(u16 card, u16 domain, goto out; if (!mech || mech == 0x80060001) req->flags |= 0x20; /* CPACF_WRAP needs special bit */ - req_pl = (struct wk_req_pl *) (((u8 *) req) + sizeof(*req)); + req_pl = (struct wk_req_pl *)(((u8 *)req) + sizeof(*req)); api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */ prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */ req_pl->var_tag = 0x04; @@ -1129,7 +1128,7 @@ static int ep11_wrapkey(u16 card, u16 domain, req_pl->mech_tag = 0x04; req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0); req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */ - p = ((u8 *) req_pl) + sizeof(*req_pl); + p = ((u8 *)req_pl) + sizeof(*req_pl); if (iv) { memcpy(p, iv, 16); p += 16; @@ -1152,10 +1151,10 @@ static int ep11_wrapkey(u16 card, u16 domain, rep = alloc_cprb(sizeof(struct wk_rep_pl)); if (!rep) goto out; - rep_pl = (struct wk_rep_pl *) (((u8 *) rep) + sizeof(*rep)); + rep_pl = (struct wk_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); + urb = kmalloc(sizeof(*urb), GFP_KERNEL); if (!urb) goto out; target.ap_id = card; @@ -1168,7 +1167,7 @@ static int ep11_wrapkey(u16 card, u16 domain, if (rc) { DEBUG_ERR( "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int) card, (int) domain, rc); + __func__, (int)card, (int)domain, rc); goto out; } @@ -1206,9 +1205,9 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, u8 encbuf[64], *kek = NULL; size_t clrkeylen, keklen, encbuflen = sizeof(encbuf); - if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) + if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) { clrkeylen = keybitsize / 8; - else { + } else { DEBUG_ERR( "%s unknown/unsupported keybitsize %d\n", __func__, keybitsize); @@ -1233,7 +1232,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, __func__, rc); goto out; } - kb = (struct ep11keyblob *) kek; + kb = (struct ep11keyblob *)kek; memset(&kb->head, 0, sizeof(kb->head)); /* Step 2: encrypt clear key value with the kek key */ @@ -1282,17 +1281,17 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen, struct ep11kblob_header *hdr; /* key with or without header ? */ - hdr = (struct ep11kblob_header *) keyblob; - if (hdr->type == TOKTYPE_NON_CCA - && (hdr->version == TOKVER_EP11_AES_WITH_HEADER - || hdr->version == TOKVER_EP11_ECC_WITH_HEADER) - && is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) { + hdr = (struct ep11kblob_header *)keyblob; + if (hdr->type == TOKTYPE_NON_CCA && + (hdr->version == TOKVER_EP11_AES_WITH_HEADER || + hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && + is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) { /* EP11 AES or ECC key with header */ key = keyblob + sizeof(struct ep11kblob_header); keylen = hdr->len - sizeof(struct ep11kblob_header); - } else if (hdr->type == TOKTYPE_NON_CCA - && hdr->version == TOKVER_EP11_AES - && is_ep11_keyblob(keyblob)) { + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES && + is_ep11_keyblob(keyblob)) { /* EP11 AES key (old style) */ key = keyblob; keylen = hdr->len; @@ -1300,8 +1299,9 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen, /* raw EP11 key blob */ key = keyblob; keylen = keybloblen; - } else + } else { return -EINVAL; + } /* alloc temp working buffer */ wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1)); @@ -1318,12 +1318,12 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen, __func__, rc); goto out; } - wki = (struct wk_info *) wkbuf; + wki = (struct wk_info *)wkbuf; /* check struct version and pkey type */ if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) { DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n", - __func__, (int) wki->version, (int) wki->pkeytype); + __func__, (int)wki->version, (int)wki->pkeytype); rc = -EIO; goto out; } @@ -1332,24 +1332,24 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen, switch (wki->pkeytype) { case 1: /* AES */ switch (wki->pkeysize) { - case 16+32: + case 16 + 32: /* AES 128 protected key */ if (protkeytype) *protkeytype = PKEY_KEYTYPE_AES_128; break; - case 24+32: + case 24 + 32: /* AES 192 protected key */ if (protkeytype) *protkeytype = PKEY_KEYTYPE_AES_192; break; - case 32+32: + case 32 + 32: /* AES 256 protected key */ if (protkeytype) *protkeytype = PKEY_KEYTYPE_AES_256; break; default: DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n", - __func__, (int) wki->pkeysize); + __func__, (int)wki->pkeysize); rc = -EIO; goto out; } @@ -1363,7 +1363,7 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen, case 2: /* TDES */ default: DEBUG_ERR("%s unknown/unsupported key type %d\n", - __func__, (int) wki->pkeytype); + __func__, (int)wki->pkeytype); rc = -EIO; goto out; } @@ -1445,7 +1445,7 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, } /* apqn passed all filtering criterons, add to the array */ if (_nr_apqns < 256) - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom); + _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); } /* nothing found ? */ diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h index 1e02b197c003..07445041869f 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.h +++ b/drivers/s390/crypto/zcrypt_ep11misc.h @@ -50,7 +50,7 @@ struct ep11keyblob { /* check ep11 key magic to find out if this is an ep11 key blob */ static inline bool is_ep11_keyblob(const u8 *key) { - struct ep11keyblob *kb = (struct ep11keyblob *) key; + struct ep11keyblob *kb = (struct ep11keyblob *)key; return (kb->version == EP11_STRUCT_MAGIC); } diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index 8b0ce600b749..d36177e65a3d 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h @@ -121,10 +121,11 @@ static inline int convert_error(struct zcrypt_queue *zq, ZCRYPT_DBF_WARN( "%s dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n", __func__, card, queue, ehdr->reply_code, apfs); - } else + } else { ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n", __func__, card, queue, ehdr->reply_code); + } return -EAGAIN; default: /* Assume request is valid and a retry will be worth it */ diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index 259145aa393f..7d245645fdd5 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -158,7 +158,6 @@ struct type80_hdr { int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode) { - if (!mex->inputdatalength) return -EINVAL; @@ -174,7 +173,6 @@ int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode) int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode) { - if (!crt->inputdatalength) return -EINVAL; @@ -239,8 +237,9 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq, mod = meb3->modulus + sizeof(meb3->modulus) - mod_len; exp = meb3->exponent + sizeof(meb3->exponent) - mod_len; inp = meb3->message + sizeof(meb3->message) - mod_len; - } else + } else { return -EINVAL; + } if (copy_from_user(mod, mex->n_modulus, mod_len) || copy_from_user(exp, mex->b_key, mod_len) || @@ -323,8 +322,9 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq, dq = crb3->dq + sizeof(crb3->dq) - short_len; u = crb3->u + sizeof(crb3->u) - short_len; inp = crb3->message + sizeof(crb3->message) - mod_len; - } else + } else { return -EINVAL; + } /* * correct the offset of p, bp and mult_inv according zcrypt.h @@ -392,7 +392,7 @@ static int convert_response_cex2a(struct zcrypt_queue *zq, unsigned int outputdatalength) { /* Response type byte is the second byte in the response. */ - unsigned char rtype = ((unsigned char *) reply->msg)[1]; + unsigned char rtype = ((unsigned char *)reply->msg)[1]; switch (rtype) { case TYPE82_RSP_CODE: @@ -406,11 +406,11 @@ static int convert_response_cex2a(struct zcrypt_queue *zq, pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - (int) rtype); + (int)rtype); ZCRYPT_DBF_ERR( "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", __func__, AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), (int) rtype); + AP_QID_QUEUE(zq->queue->qid), (int)rtype); ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } @@ -447,10 +447,11 @@ static void zcrypt_cex2a_receive(struct ap_queue *aq, memcpy(msg->msg, reply->msg, len); msg->len = len; } - } else + } else { memcpy(msg->msg, reply->msg, sizeof(error_reply)); + } out: - complete((struct completion *) msg->private); + complete((struct completion *)msg->private); } static atomic_t zcrypt_step = ATOMIC_INIT(0); @@ -475,7 +476,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq, if (!ap_msg->msg) return -ENOMEM; ap_msg->receive = zcrypt_cex2a_receive; - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg->private = &work; rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex); @@ -492,9 +493,11 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq, rc = convert_response_cex2a(zq, ap_msg, mex->outputdata, mex->outputdatalength); - } else + } else { /* Signal pending. */ ap_cancel_message(zq->queue, ap_msg); + } + out: ap_msg->private = NULL; if (rc) @@ -524,7 +527,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq, if (!ap_msg->msg) return -ENOMEM; ap_msg->receive = zcrypt_cex2a_receive; - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg->private = &work; rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt); @@ -541,9 +544,11 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq, rc = convert_response_cex2a(zq, ap_msg, crt->outputdata, crt->outputdatalength); - } else + } else { /* Signal pending. */ ap_cancel_message(zq->queue, ap_msg); + } + out: ap_msg->private = NULL; if (rc) diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 494451cf0588..8fb34b8eeb18 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -29,12 +29,13 @@ #define CEXXC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ -#define CEIL4(x) ((((x)+3)/4)*4) +#define CEIL4(x) ((((x) + 3) / 4) * 4) struct response_type { struct completion work; int type; }; + #define CEXXC_RESPONSE_TYPE_ICA 0 #define CEXXC_RESPONSE_TYPE_XCRB 1 #define CEXXC_RESPONSE_TYPE_EP11 2 @@ -178,7 +179,6 @@ int speed_idx_ep11(int req_type) } } - /* * Convert a ICAMEX message to a type6 MEX message. * @@ -188,7 +188,7 @@ int speed_idx_ep11(int req_type) * * Returns 0 on success or negative errno value. */ -static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq, +static int icamex_msg_to_type6mex_msgx(struct zcrypt_queue *zq, struct ap_message *ap_msg, struct ica_rsa_modexpo *mex) { @@ -226,19 +226,19 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq, return -EFAULT; /* Set up key which is located after the variable length text. */ - size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength); + size = zcrypt_type6_mex_key_en(mex, msg->text + mex->inputdatalength); if (size < 0) return size; size += sizeof(*msg) + mex->inputdatalength; /* message header, cprbx and f&r */ msg->hdr = static_type6_hdrX; - msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); - msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); + msg->hdr.tocardlen1 = size - sizeof(msg->hdr); + msg->hdr.fromcardlen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); msg->cprbx = static_cprbx; msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); - msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1; + msg->cprbx.rpl_msgbl = msg->hdr.fromcardlen1; msg->fr = static_pke_fnr; @@ -257,7 +257,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq, * * Returns 0 on success or negative errno value. */ -static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq, +static int icacrt_msg_to_type6crt_msgx(struct zcrypt_queue *zq, struct ap_message *ap_msg, struct ica_rsa_modexpo_crt *crt) { @@ -303,8 +303,8 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq, /* message header, cprbx and f&r */ msg->hdr = static_type6_hdrX; - msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); - msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); + msg->hdr.tocardlen1 = size - sizeof(msg->hdr); + msg->hdr.fromcardlen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); msg->cprbx = static_cprbx; msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); @@ -331,8 +331,8 @@ struct type86_fmt2_msg { struct type86_fmt2_ext fmt2; } __packed; -static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg, - struct ica_xcRB *xcRB, +static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg, + struct ica_xcRB *xcrb, unsigned int *fcode, unsigned short **dom) { @@ -345,19 +345,19 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg, struct CPRBX cprbx; } __packed * msg = ap_msg->msg; - int rcblen = CEIL4(xcRB->request_control_blk_length); + int rcblen = CEIL4(xcrb->request_control_blk_length); int req_sumlen, resp_sumlen; char *req_data = ap_msg->msg + sizeof(struct type6_hdr) + rcblen; char *function_code; - if (CEIL4(xcRB->request_control_blk_length) < - xcRB->request_control_blk_length) + if (CEIL4(xcrb->request_control_blk_length) < + xcrb->request_control_blk_length) return -EINVAL; /* overflow after alignment*/ /* length checks */ ap_msg->len = sizeof(struct type6_hdr) + - CEIL4(xcRB->request_control_blk_length) + - xcRB->request_data_length; + CEIL4(xcrb->request_control_blk_length) + + xcrb->request_data_length; if (ap_msg->len > ap_msg->bufsize) return -EINVAL; @@ -365,48 +365,49 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg, * Overflow check * sum must be greater (or equal) than the largest operand */ - req_sumlen = CEIL4(xcRB->request_control_blk_length) + - xcRB->request_data_length; - if ((CEIL4(xcRB->request_control_blk_length) <= - xcRB->request_data_length) ? - (req_sumlen < xcRB->request_data_length) : - (req_sumlen < CEIL4(xcRB->request_control_blk_length))) { + req_sumlen = CEIL4(xcrb->request_control_blk_length) + + xcrb->request_data_length; + if ((CEIL4(xcrb->request_control_blk_length) <= + xcrb->request_data_length) ? + req_sumlen < xcrb->request_data_length : + req_sumlen < CEIL4(xcrb->request_control_blk_length)) { return -EINVAL; } - if (CEIL4(xcRB->reply_control_blk_length) < - xcRB->reply_control_blk_length) + if (CEIL4(xcrb->reply_control_blk_length) < + xcrb->reply_control_blk_length) return -EINVAL; /* overflow after alignment*/ /* * Overflow check * sum must be greater (or equal) than the largest operand */ - resp_sumlen = CEIL4(xcRB->reply_control_blk_length) + - xcRB->reply_data_length; - if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ? - (resp_sumlen < xcRB->reply_data_length) : - (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) { + resp_sumlen = CEIL4(xcrb->reply_control_blk_length) + + xcrb->reply_data_length; + if ((CEIL4(xcrb->reply_control_blk_length) <= + xcrb->reply_data_length) ? + resp_sumlen < xcrb->reply_data_length : + resp_sumlen < CEIL4(xcrb->reply_control_blk_length)) { return -EINVAL; } /* prepare type6 header */ msg->hdr = static_type6_hdrX; - memcpy(msg->hdr.agent_id, &(xcRB->agent_ID), sizeof(xcRB->agent_ID)); - msg->hdr.ToCardLen1 = xcRB->request_control_blk_length; - if (xcRB->request_data_length) { + memcpy(msg->hdr.agent_id, &xcrb->agent_ID, sizeof(xcrb->agent_ID)); + msg->hdr.tocardlen1 = xcrb->request_control_blk_length; + if (xcrb->request_data_length) { msg->hdr.offset2 = msg->hdr.offset1 + rcblen; - msg->hdr.ToCardLen2 = xcRB->request_data_length; + msg->hdr.tocardlen2 = xcrb->request_data_length; } - msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length; - msg->hdr.FromCardLen2 = xcRB->reply_data_length; + msg->hdr.fromcardlen1 = xcrb->reply_control_blk_length; + msg->hdr.fromcardlen2 = xcrb->reply_data_length; /* prepare CPRB */ - if (z_copy_from_user(userspace, &(msg->cprbx), xcRB->request_control_blk_addr, - xcRB->request_control_blk_length)) + if (z_copy_from_user(userspace, &msg->cprbx, xcrb->request_control_blk_addr, + xcrb->request_control_blk_length)) return -EFAULT; if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > - xcRB->request_control_blk_length) + xcrb->request_control_blk_length) return -EINVAL; function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; memcpy(msg->hdr.function_code, function_code, @@ -416,8 +417,8 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg, *dom = (unsigned short *)&msg->cprbx.domain; /* check subfunction, US and AU need special flag with NQAP */ - if (memcmp(function_code, "US", 2) == 0 - || memcmp(function_code, "AU", 2) == 0) + if (memcmp(function_code, "US", 2) == 0 || + memcmp(function_code, "AU", 2) == 0) ap_msg->flags |= AP_MSG_FLAG_SPECIAL; #ifdef CONFIG_ZCRYPT_DEBUG @@ -443,16 +444,16 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg, } /* copy data block */ - if (xcRB->request_data_length && - z_copy_from_user(userspace, req_data, xcRB->request_data_address, - xcRB->request_data_length)) + if (xcrb->request_data_length && + z_copy_from_user(userspace, req_data, xcrb->request_data_address, + xcrb->request_data_length)) return -EFAULT; return 0; } static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap_msg, - struct ep11_urb *xcRB, + struct ep11_urb *xcrb, unsigned int *fcode, unsigned int *domain) { @@ -482,25 +483,25 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap unsigned int dom_val; /* domain id */ } __packed * payload_hdr = NULL; - if (CEIL4(xcRB->req_len) < xcRB->req_len) + if (CEIL4(xcrb->req_len) < xcrb->req_len) return -EINVAL; /* overflow after alignment*/ /* length checks */ - ap_msg->len = sizeof(struct type6_hdr) + CEIL4(xcRB->req_len); + ap_msg->len = sizeof(struct type6_hdr) + CEIL4(xcrb->req_len); if (ap_msg->len > ap_msg->bufsize) return -EINVAL; - if (CEIL4(xcRB->resp_len) < xcRB->resp_len) + if (CEIL4(xcrb->resp_len) < xcrb->resp_len) return -EINVAL; /* overflow after alignment*/ /* prepare type6 header */ msg->hdr = static_type6_ep11_hdr; - msg->hdr.ToCardLen1 = xcRB->req_len; - msg->hdr.FromCardLen1 = xcRB->resp_len; + msg->hdr.tocardlen1 = xcrb->req_len; + msg->hdr.fromcardlen1 = xcrb->resp_len; /* Import CPRB data from the ioctl input parameter */ - if (z_copy_from_user(userspace, &(msg->cprbx.cprb_len), - (char __force __user *)xcRB->req, xcRB->req_len)) { + if (z_copy_from_user(userspace, &msg->cprbx.cprb_len, + (char __force __user *)xcrb->req, xcrb->req_len)) { return -EFAULT; } @@ -518,7 +519,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap } else { lfmt = 1; /* length format #1 */ } - payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); + payload_hdr = (struct pld_hdr *)((&msg->pld_lenfmt) + lfmt); *fcode = payload_hdr->func_val & 0xFFFF; /* enable special processing based on the cprbs flags special bit */ @@ -567,9 +568,9 @@ struct type86_ep11_reply { } __packed; static int convert_type86_ica(struct zcrypt_queue *zq, - struct ap_message *reply, - char __user *outputdata, - unsigned int outputdatalength) + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) { static unsigned char static_pad[] = { 0x00, 0x02, @@ -622,18 +623,18 @@ static int convert_type86_ica(struct zcrypt_queue *zq, ZCRYPT_DBF_WARN("%s dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n", __func__, AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - (int) service_rc, (int) service_rs); + (int)service_rc, (int)service_rs); return -EINVAL; } zq->online = 0; pr_err("Crypto dev=%02x.%04x rc/rs=%d/%d online=0 rc=EAGAIN\n", AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - (int) service_rc, (int) service_rs); + (int)service_rc, (int)service_rs); ZCRYPT_DBF_ERR("%s dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n", __func__, AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - (int) service_rc, (int) service_rs); + (int)service_rc, (int)service_rs); ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } @@ -672,42 +673,42 @@ static int convert_type86_ica(struct zcrypt_queue *zq, * * @zq: crypto device pointer * @reply: reply AP message. - * @xcRB: pointer to XCRB + * @xcrb: pointer to XCRB * * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. */ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq, struct ap_message *reply, - struct ica_xcRB *xcRB) + struct ica_xcRB *xcrb) { struct type86_fmt2_msg *msg = reply->msg; char *data = reply->msg; /* Copy CPRB to user */ - if (xcRB->reply_control_blk_length < msg->fmt2.count1) { + if (xcrb->reply_control_blk_length < msg->fmt2.count1) { ZCRYPT_DBF_DBG("%s reply_control_blk_length %u < required %u => EMSGSIZE\n", - __func__, xcRB->reply_control_blk_length, + __func__, xcrb->reply_control_blk_length, msg->fmt2.count1); return -EMSGSIZE; } - if (z_copy_to_user(userspace, xcRB->reply_control_blk_addr, + if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr, data + msg->fmt2.offset1, msg->fmt2.count1)) return -EFAULT; - xcRB->reply_control_blk_length = msg->fmt2.count1; + xcrb->reply_control_blk_length = msg->fmt2.count1; /* Copy data buffer to user */ if (msg->fmt2.count2) { - if (xcRB->reply_data_length < msg->fmt2.count2) { + if (xcrb->reply_data_length < msg->fmt2.count2) { ZCRYPT_DBF_DBG("%s reply_data_length %u < required %u => EMSGSIZE\n", - __func__, xcRB->reply_data_length, + __func__, xcrb->reply_data_length, msg->fmt2.count2); return -EMSGSIZE; } - if (z_copy_to_user(userspace, xcRB->reply_data_addr, + if (z_copy_to_user(userspace, xcrb->reply_data_addr, data + msg->fmt2.offset2, msg->fmt2.count2)) return -EFAULT; } - xcRB->reply_data_length = msg->fmt2.count2; + xcrb->reply_data_length = msg->fmt2.count2; return 0; } @@ -717,35 +718,35 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq, * * @zq: crypto device pointer * @reply: reply AP message. - * @xcRB: pointer to EP11 user request block + * @xcrb: pointer to EP11 user request block * * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. */ static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq, struct ap_message *reply, - struct ep11_urb *xcRB) + struct ep11_urb *xcrb) { struct type86_fmt2_msg *msg = reply->msg; char *data = reply->msg; - if (xcRB->resp_len < msg->fmt2.count1) { + if (xcrb->resp_len < msg->fmt2.count1) { ZCRYPT_DBF_DBG("%s resp_len %u < required %u => EMSGSIZE\n", - __func__, (unsigned int)xcRB->resp_len, + __func__, (unsigned int)xcrb->resp_len, msg->fmt2.count1); return -EMSGSIZE; } /* Copy response CPRB to user */ - if (z_copy_to_user(userspace, (char __force __user *)xcRB->resp, + if (z_copy_to_user(userspace, (char __force __user *)xcrb->resp, data + msg->fmt2.offset1, msg->fmt2.count1)) return -EFAULT; - xcRB->resp_len = msg->fmt2.count1; + xcrb->resp_len = msg->fmt2.count1; return 0; } static int convert_type86_rng(struct zcrypt_queue *zq, - struct ap_message *reply, - char *buffer) + struct ap_message *reply, + char *buffer) { struct { struct type86_hdr hdr; @@ -761,9 +762,9 @@ static int convert_type86_rng(struct zcrypt_queue *zq, } static int convert_response_ica(struct zcrypt_queue *zq, - struct ap_message *reply, - char __user *outputdata, - unsigned int outputdatalength) + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) { struct type86x_reply *msg = reply->msg; @@ -773,13 +774,14 @@ static int convert_response_ica(struct zcrypt_queue *zq, return convert_error(zq, reply); case TYPE86_RSP_CODE: if (msg->cprbx.ccp_rtcode && - (msg->cprbx.ccp_rscode == 0x14f) && - (outputdatalength > 256)) { + msg->cprbx.ccp_rscode == 0x14f && + outputdatalength > 256) { if (zq->zcard->max_exp_bit_length <= 17) { zq->zcard->max_exp_bit_length = 17; return -EAGAIN; - } else + } else { return -EINVAL; + } } if (msg->hdr.reply_code) return convert_error(zq, reply); @@ -793,11 +795,11 @@ static int convert_response_ica(struct zcrypt_queue *zq, pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - (int) msg->hdr.type); + (int)msg->hdr.type); ZCRYPT_DBF_ERR( "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", __func__, AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); + AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type); ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } @@ -805,41 +807,41 @@ static int convert_response_ica(struct zcrypt_queue *zq, static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq, struct ap_message *reply, - struct ica_xcRB *xcRB) + struct ica_xcRB *xcrb) { struct type86x_reply *msg = reply->msg; switch (msg->hdr.type) { case TYPE82_RSP_CODE: case TYPE88_RSP_CODE: - xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ + xcrb->status = 0x0008044DL; /* HDD_InvalidParm */ return convert_error(zq, reply); case TYPE86_RSP_CODE: if (msg->hdr.reply_code) { - memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32)); + memcpy(&xcrb->status, msg->fmt2.apfs, sizeof(u32)); return convert_error(zq, reply); } if (msg->cprbx.cprb_ver_id == 0x02) - return convert_type86_xcrb(userspace, zq, reply, xcRB); + return convert_type86_xcrb(userspace, zq, reply, xcrb); fallthrough; /* wrong cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ - xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ + xcrb->status = 0x0008044DL; /* HDD_InvalidParm */ zq->online = 0; pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - (int) msg->hdr.type); + (int)msg->hdr.type); ZCRYPT_DBF_ERR( "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", __func__, AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); + AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type); ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } } static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq, - struct ap_message *reply, struct ep11_urb *xcRB) + struct ap_message *reply, struct ep11_urb *xcrb) { struct type86_ep11_reply *msg = reply->msg; @@ -851,26 +853,26 @@ static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq, if (msg->hdr.reply_code) return convert_error(zq, reply); if (msg->cprbx.cprb_ver_id == 0x04) - return convert_type86_ep11_xcrb(userspace, zq, reply, xcRB); + return convert_type86_ep11_xcrb(userspace, zq, reply, xcrb); fallthrough; /* wrong cprb version is an unknown resp */ default: /* Unknown response type, this should NEVER EVER happen */ zq->online = 0; pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - (int) msg->hdr.type); + (int)msg->hdr.type); ZCRYPT_DBF_ERR( "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", __func__, AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); + AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type); ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } } static int convert_response_rng(struct zcrypt_queue *zq, - struct ap_message *reply, - char *data) + struct ap_message *reply, + char *data) { struct type86x_reply *msg = reply->msg; @@ -889,11 +891,11 @@ static int convert_response_rng(struct zcrypt_queue *zq, pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - (int) msg->hdr.type); + (int)msg->hdr.type); ZCRYPT_DBF_ERR( "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", __func__, AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); + AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type); ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } @@ -908,15 +910,15 @@ static int convert_response_rng(struct zcrypt_queue *zq, * @reply: pointer to the AP reply message */ static void zcrypt_msgtype6_receive(struct ap_queue *aq, - struct ap_message *msg, - struct ap_message *reply) + struct ap_message *msg, + struct ap_message *reply) { static struct error_hdr error_reply = { .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; struct response_type *resp_type = - (struct response_type *) msg->private; + (struct response_type *)msg->private; struct type86x_reply *t86r; int len; @@ -925,7 +927,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, goto out; /* ap_msg->rc indicates the error */ t86r = reply->msg; if (t86r->hdr.type == TYPE86_RSP_CODE && - t86r->cprbx.cprb_ver_id == 0x02) { + t86r->cprbx.cprb_ver_id == 0x02) { switch (resp_type->type) { case CEXXC_RESPONSE_TYPE_ICA: len = sizeof(struct type86x_reply) + t86r->length - 2; @@ -948,10 +950,11 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, default: memcpy(msg->msg, &error_reply, sizeof(error_reply)); } - } else + } else { memcpy(msg->msg, reply->msg, sizeof(error_reply)); + } out: - complete(&(resp_type->work)); + complete(&resp_type->work); } /* @@ -998,7 +1001,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, memcpy(msg->msg, reply->msg, sizeof(error_reply)); } out: - complete(&(resp_type->work)); + complete(&resp_type->work); } static atomic_t zcrypt_step = ATOMIC_INIT(0); @@ -1019,15 +1022,15 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, }; int rc; - ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL); + ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); if (!ap_msg->msg) return -ENOMEM; ap_msg->bufsize = PAGE_SIZE; ap_msg->receive = zcrypt_msgtype6_receive; - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg->private = &resp_type; - rc = ICAMEX_msg_to_type6MEX_msgX(zq, ap_msg, mex); + rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex); if (rc) goto out_free; init_completion(&resp_type.work); @@ -1041,11 +1044,13 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, rc = convert_response_ica(zq, ap_msg, mex->outputdata, mex->outputdatalength); - } else + } else { /* Signal pending. */ ap_cancel_message(zq->queue, ap_msg); + } + out_free: - free_page((unsigned long) ap_msg->msg); + free_page((unsigned long)ap_msg->msg); ap_msg->private = NULL; ap_msg->msg = NULL; return rc; @@ -1067,15 +1072,15 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, }; int rc; - ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL); + ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); if (!ap_msg->msg) return -ENOMEM; ap_msg->bufsize = PAGE_SIZE; ap_msg->receive = zcrypt_msgtype6_receive; - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg->private = &resp_type; - rc = ICACRT_msg_to_type6CRT_msgX(zq, ap_msg, crt); + rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt); if (rc) goto out_free; init_completion(&resp_type.work); @@ -1093,8 +1098,9 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, /* Signal pending. */ ap_cancel_message(zq->queue, ap_msg); } + out_free: - free_page((unsigned long) ap_msg->msg); + free_page((unsigned long)ap_msg->msg); ap_msg->private = NULL; ap_msg->msg = NULL; return rc; @@ -1109,7 +1115,7 @@ out_free: * by the caller with ap_init_message(). Also the caller has to * make sure ap_release_message() is always called even on failure. */ -int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcRB, +int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb, struct ap_message *ap_msg, unsigned int *func_code, unsigned short **dom) { @@ -1122,12 +1128,12 @@ int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcRB, if (!ap_msg->msg) return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive; - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); if (!ap_msg->private) return -ENOMEM; - return XCRB_msg_to_type6CPRB_msgX(userspace, ap_msg, xcRB, func_code, dom); + return xcrb_msg_to_type6cprb_msgx(userspace, ap_msg, xcrb, func_code, dom); } /* @@ -1135,10 +1141,10 @@ int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcRB, * device to handle a send_cprb request. * @zq: pointer to zcrypt_queue structure that identifies the * CEXxC device to the request distributor - * @xcRB: pointer to the send_cprb request buffer + * @xcrb: pointer to the send_cprb request buffer */ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, - struct ica_xcRB *xcRB, + struct ica_xcRB *xcrb, struct ap_message *ap_msg) { int rc; @@ -1153,11 +1159,11 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, * Set the queue's reply buffer length minus 128 byte padding * as reply limit for the card firmware. */ - msg->hdr.FromCardLen1 = min_t(unsigned int, msg->hdr.FromCardLen1, + msg->hdr.fromcardlen1 = min_t(unsigned int, msg->hdr.fromcardlen1, zq->reply.bufsize - 128); - if (msg->hdr.FromCardLen2) - msg->hdr.FromCardLen2 = - zq->reply.bufsize - msg->hdr.FromCardLen1 - 128; + if (msg->hdr.fromcardlen2) + msg->hdr.fromcardlen2 = + zq->reply.bufsize - msg->hdr.fromcardlen1 - 128; init_completion(&rtype->work); rc = ap_queue_message(zq->queue, ap_msg); @@ -1167,10 +1173,12 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, if (rc == 0) { rc = ap_msg->rc; if (rc == 0) - rc = convert_response_xcrb(userspace, zq, ap_msg, xcRB); - } else + rc = convert_response_xcrb(userspace, zq, ap_msg, xcrb); + } else { /* Signal pending. */ ap_cancel_message(zq->queue, ap_msg); + } + out: if (rc) ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n", @@ -1201,7 +1209,7 @@ int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb, if (!ap_msg->msg) return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive_ep11; - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); if (!ap_msg->private) @@ -1215,7 +1223,7 @@ int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb, * device to handle a send_ep11_cprb request. * @zq: pointer to zcrypt_queue structure that identifies the * CEX4P device to the request distributor - * @xcRB: pointer to the ep11 user request block + * @xcrb: pointer to the ep11 user request block */ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *zq, struct ep11_urb *xcrb, @@ -1265,7 +1273,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * } else { lfmt = 1; /* length format #1 */ } - payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); + payload_hdr = (struct pld_hdr *)((&msg->pld_lenfmt) + lfmt); payload_hdr->dom_val = (unsigned int) AP_QID_QUEUE(zq->queue->qid); } @@ -1274,7 +1282,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * * Set the queue's reply buffer length minus the two prepend headers * as reply limit for the card firmware. */ - msg->hdr.FromCardLen1 = zq->reply.bufsize - + msg->hdr.fromcardlen1 = zq->reply.bufsize - sizeof(struct type86_hdr) - sizeof(struct type86_fmt2_ext); init_completion(&rtype->work); @@ -1286,9 +1294,11 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * rc = ap_msg->rc; if (rc == 0) rc = convert_response_ep11_xcrb(userspace, zq, ap_msg, xcrb); - } else + } else { /* Signal pending. */ ap_cancel_message(zq->queue, ap_msg); + } + out: if (rc) ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n", @@ -1309,13 +1319,13 @@ int prep_rng_ap_msg(struct ap_message *ap_msg, int *func_code, if (!ap_msg->msg) return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive; - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); if (!ap_msg->private) return -ENOMEM; - rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); + rng_type6cprb_msgx(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); *func_code = HWRNG; return 0; @@ -1354,9 +1364,10 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq, rc = ap_msg->rc; if (rc == 0) rc = convert_response_rng(zq, ap_msg, buffer); - } else + } else { /* Signal pending. */ ap_cancel_message(zq->queue, ap_msg); + } out: return rc; } diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h index 9da4f4175c44..6f5ced8d6cda 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.h +++ b/drivers/s390/crypto/zcrypt_msgtype6.h @@ -45,14 +45,14 @@ struct type6_hdr { unsigned char reserved5[2]; /* 0x0000 */ unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */ unsigned char reserved6[2]; /* 0x0000 */ - unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */ - unsigned int ToCardLen2; /* db len 0x00000000 for PKD */ - unsigned int ToCardLen3; /* 0x00000000 */ - unsigned int ToCardLen4; /* 0x00000000 */ - unsigned int FromCardLen1; /* response buffer length */ - unsigned int FromCardLen2; /* db len 0x00000000 for PKD */ - unsigned int FromCardLen3; /* 0x00000000 */ - unsigned int FromCardLen4; /* 0x00000000 */ + unsigned int tocardlen1; /* (request CPRB len + 3) & -4 */ + unsigned int tocardlen2; /* db len 0x00000000 for PKD */ + unsigned int tocardlen3; /* 0x00000000 */ + unsigned int tocardlen4; /* 0x00000000 */ + unsigned int fromcardlen1; /* response buffer length */ + unsigned int fromcardlen2; /* db len 0x00000000 for PKD */ + unsigned int fromcardlen3; /* 0x00000000 */ + unsigned int fromcardlen4; /* 0x00000000 */ } __packed; /** @@ -116,7 +116,7 @@ int speed_idx_ep11(int); * @ap_dev: AP device pointer * @ap_msg: pointer to AP message */ -static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg, +static inline void rng_type6cprb_msgx(struct ap_message *ap_msg, unsigned int random_number_length, unsigned int *domain) { @@ -134,8 +134,8 @@ static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg, .offset1 = 0x00000058, .agent_id = {'C', 'A'}, .function_code = {'R', 'L'}, - .ToCardLen1 = sizeof(*msg) - sizeof(msg->hdr), - .FromCardLen1 = sizeof(*msg) - sizeof(msg->hdr), + .tocardlen1 = sizeof(*msg) - sizeof(msg->hdr), + .fromcardlen1 = sizeof(*msg) - sizeof(msg->hdr), }; static struct CPRBX local_cprbx = { .cprb_len = 0x00dc, @@ -147,9 +147,9 @@ static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg, }; msg->hdr = static_type6_hdrX; - msg->hdr.FromCardLen2 = random_number_length, + msg->hdr.fromcardlen2 = random_number_length; msg->cprbx = local_cprbx; - msg->cprbx.rpl_datal = random_number_length, + msg->cprbx.rpl_datal = random_number_length; memcpy(msg->function_code, msg->hdr.function_code, 0x02); msg->rule_length = 0x0a; memcpy(msg->rule, "RANDOM ", 8); diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c index 1552a850a52e..cdc5a4b2c019 100644 --- a/drivers/s390/crypto/zcrypt_queue.c +++ b/drivers/s390/crypto/zcrypt_queue.c @@ -114,7 +114,7 @@ struct zcrypt_queue *zcrypt_queue_alloc(size_t reply_buf_size) { struct zcrypt_queue *zq; - zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL); + zq = kzalloc(sizeof(*zq), GFP_KERNEL); if (!zq) return NULL; zq->reply.msg = kmalloc(reply_buf_size, GFP_KERNEL); -- cgit v1.2.3 From f2f47d0ef72c30622e62471903ea19446ea79ee2 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Wed, 6 Apr 2022 08:01:24 +0200 Subject: s390/mmap: increase stack/mmap gap to 128MB This basically reverts commit 9e78a13bfb16 ("[S390] reduce miminum gap between stack and mmap_base"). 32MB is not enough space between stack and mmap for some programs. Given that compat task aren't common these days, lets revert back to 128MB. Signed-off-by: Sven Schnelle Reviewed-by: Heiko Carstens Signed-off-by: Heiko Carstens --- arch/s390/mm/mmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index e54f928503c5..d545f5c39f7e 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -58,9 +58,9 @@ static inline unsigned long mmap_base(unsigned long rnd, /* * Top of mmap area (just below the process stack). - * Leave at least a ~32 MB hole. + * Leave at least a ~128 MB hole. */ - gap_min = 32 * 1024 * 1024UL; + gap_min = SZ_128M; gap_max = (STACK_TOP / 6) * 5; if (gap < gap_min) -- cgit v1.2.3 From 57761da4dc5cd60bed2c81ba0edb7495c3c740b8 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Wed, 6 Apr 2022 08:35:26 +0200 Subject: s390/vdso: move vdso mapping to its own function This is a preparation patch for adding vdso randomization to s390. It adds a function vdso_size(), which will be used later in calculating the STACK_TOP value. It also moves the vdso mapping into a new function vdso_map(), to keep the code similar to other architectures. Signed-off-by: Sven Schnelle Reviewed-by: Heiko Carstens Signed-off-by: Heiko Carstens --- arch/s390/include/asm/processor.h | 1 + arch/s390/kernel/vdso.c | 24 +++++++++++++++++++----- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index ff1e25d515a8..a3ab8cbcc5e4 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -83,6 +83,7 @@ void cpu_detect_mhz_feature(void); extern const struct seq_operations cpuinfo_op; extern void execve_tail(void); extern void __bpon(void); +unsigned long vdso_size(void); /* * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 99694260cac9..22cb727d5821 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -160,10 +160,9 @@ int vdso_getcpu_init(void) } early_initcall(vdso_getcpu_init); /* Must be called before SMP init */ -int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len) { - unsigned long vdso_text_len, vdso_mapping_len; - unsigned long vvar_start, vdso_text_start; + unsigned long vvar_start, vdso_text_start, vdso_text_len; struct vm_special_mapping *vdso_mapping; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; @@ -180,8 +179,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_text_len = vdso64_end - vdso64_start; vdso_mapping = &vdso64_mapping; } - vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE; - vvar_start = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); + vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0); rc = vvar_start; if (IS_ERR_VALUE(vvar_start)) goto out; @@ -210,6 +208,22 @@ out: return rc; } +unsigned long vdso_size(void) +{ + unsigned long size = VVAR_NR_PAGES * PAGE_SIZE; + + if (is_compat_task()) + size += vdso32_end - vdso32_start; + else + size += vdso64_end - vdso64_start; + return PAGE_ALIGN(size); +} + +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + return map_vdso(0, vdso_size()); +} + static struct page ** __init vdso_setup_pages(void *start, void *end) { int pages = (end - start) >> PAGE_SHIFT; -- cgit v1.2.3 From 9e37a2e8546f9e48ea76c839116fa5174d14e033 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Wed, 6 Apr 2022 08:44:49 +0200 Subject: s390/vdso: map vdso above stack In the current code vdso is mapped below the stack. This is problematic when programs mapped to the top of the address space are allocating a lot of memory, because the heap will clash with the vdso. To avoid this map the vdso above the stack and move STACK_TOP so that it all fits into three level paging. Signed-off-by: Sven Schnelle Reviewed-by: Heiko Carstens Signed-off-by: Heiko Carstens --- arch/s390/include/asm/processor.h | 7 ++++--- arch/s390/kernel/vdso.c | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index a3ab8cbcc5e4..add764a2be8c 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -95,9 +95,10 @@ unsigned long vdso_size(void); (_REGION3_SIZE >> 1) : (_REGION2_SIZE >> 1)) #define TASK_SIZE_MAX (-PAGE_SIZE) -#define STACK_TOP (test_thread_flag(TIF_31BIT) ? \ - _REGION3_SIZE : _REGION2_SIZE) -#define STACK_TOP_MAX _REGION2_SIZE +#define VDSO_BASE (STACK_TOP + PAGE_SIZE) +#define VDSO_LIMIT (test_thread_flag(TIF_31BIT) ? _REGION3_SIZE : _REGION2_SIZE) +#define STACK_TOP (VDSO_LIMIT - vdso_size() - PAGE_SIZE) +#define STACK_TOP_MAX (_REGION2_SIZE - vdso_size() - PAGE_SIZE) #define HAVE_ARCH_PICK_MMAP_LAYOUT diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 22cb727d5821..7ba84a88ea2a 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -221,7 +221,7 @@ unsigned long vdso_size(void) int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { - return map_vdso(0, vdso_size()); + return map_vdso(VDSO_BASE, vdso_size()); } static struct page ** __init vdso_setup_pages(void *start, void *end) -- cgit v1.2.3 From 41cd81abafdc4e58a93fcb677712a76885e3ca25 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Wed, 6 Apr 2022 09:17:21 +0200 Subject: s390/vdso: add vdso randomization Randomize the address of vdso if randomize_va_space is enabled. Note that this keeps the vdso address on the same PMD as the stack to avoid allocating an extra page table just for vdso. Signed-off-by: Sven Schnelle Reviewed-by: Heiko Carstens Signed-off-by: Heiko Carstens --- arch/s390/kernel/vdso.c | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 7ba84a88ea2a..5075cde77b29 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -208,6 +209,31 @@ out: return rc; } +static unsigned long vdso_addr(unsigned long start, unsigned long len) +{ + unsigned long addr, end, offset; + + /* + * Round up the start address. It can start out unaligned as a result + * of stack start randomization. + */ + start = PAGE_ALIGN(start); + + /* Round the lowest possible end address up to a PMD boundary. */ + end = (start + len + PMD_SIZE - 1) & PMD_MASK; + if (end >= VDSO_BASE) + end = VDSO_BASE; + end -= len; + + if (end > start) { + offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); + addr = start + (offset << PAGE_SHIFT); + } else { + addr = start; + } + return addr; +} + unsigned long vdso_size(void) { unsigned long size = VVAR_NR_PAGES * PAGE_SIZE; @@ -221,7 +247,12 @@ unsigned long vdso_size(void) int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { - return map_vdso(VDSO_BASE, vdso_size()); + unsigned long addr = VDSO_BASE; + unsigned long size = vdso_size(); + + if (current->flags & PF_RANDOMIZE) + addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size); + return map_vdso(addr, size); } static struct page ** __init vdso_setup_pages(void *start, void *end) -- cgit v1.2.3 From d4b2945dc9c05a1b24282f4c6b2f41a2a52d18dd Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Wed, 13 Apr 2022 11:44:16 +0200 Subject: s390/vfio-ap: remove superfluous MODULE_DEVICE_TABLE declaration The vfio_ap module tries to register for the vfio_ap bus - but that's the interface that it provides itself, so this does not make much sense, thus let's simply drop this statement now. Signed-off-by: Thomas Huth Reviewed-by: Tony Krowiak Link: https://lore.kernel.org/r/20220413094416.412114-1-thuth@redhat.com Signed-off-by: Heiko Carstens --- drivers/s390/crypto/vfio_ap_drv.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index 29ebd54f8919..4ac9c6521ec1 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -46,8 +46,6 @@ static struct ap_device_id ap_queue_ids[] = { { /* end of sibling */ }, }; -MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids); - static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q) { struct ap_matrix_mdev *matrix_mdev; -- cgit v1.2.3 From 52c79e636a58da6650cec632e5e6d46467feefcb Mon Sep 17 00:00:00 2001 From: Niklas Schnelle Date: Fri, 18 Mar 2022 16:25:31 +0100 Subject: s390/pci: make better use of zpci_dbg() levels While the zpci_dbg() macro offers a level parameter this is currently largely unused. The only instance with higher importance than 3 is the UID checking change debug message which is not actually more important as the UID uniqueness guarantee is already exposed in sysfs so this should rather be 3 as well. On the other hand the "add ..." message which shows what devices are visible at the lowest level is essential during problem determination. By setting its level to 1, lowering the debug level can act as a filter to only show the available functions. On the error side the default level is set to 6 while all existing messages are printed at level 0. This is inconsistent and means there is no room for having messages be invisible on the default level so instead set the default level to 3 like for errors matching the default for debug messages. Reviewed-by: Matthew Rosato Reviewed-by: Pierre Morel Signed-off-by: Niklas Schnelle Signed-off-by: Heiko Carstens --- arch/s390/pci/pci.c | 2 +- arch/s390/pci/pci_clp.c | 2 +- arch/s390/pci/pci_debug.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index e563cb65c0c4..bc980fd313d5 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -799,7 +799,7 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state) struct zpci_dev *zdev; int rc; - zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state); + zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state); zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); if (!zdev) return ERR_PTR(-ENOMEM); diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 1057d7af4a55..375e0a5120bc 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -30,7 +30,7 @@ bool zpci_unique_uid; void update_uid_checking(bool new) { if (zpci_unique_uid != new) - zpci_dbg(1, "uid checking:%d\n", new); + zpci_dbg(3, "uid checking:%d\n", new); zpci_unique_uid = new; } diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c index 3408c0df3ebf..ca6bd98eec13 100644 --- a/arch/s390/pci/pci_debug.c +++ b/arch/s390/pci/pci_debug.c @@ -196,7 +196,7 @@ int __init zpci_debug_init(void) if (!pci_debug_err_id) return -EINVAL; debug_register_view(pci_debug_err_id, &debug_hex_ascii_view); - debug_set_level(pci_debug_err_id, 6); + debug_set_level(pci_debug_err_id, 3); debugfs_root = debugfs_create_dir("pci", NULL); return 0; -- cgit v1.2.3 From 723b5a9d2bb0f759c41c3cc41f7fd89a0d8278e5 Mon Sep 17 00:00:00 2001 From: Niklas Schnelle Date: Fri, 1 Apr 2022 14:04:14 +0200 Subject: s390/pci: don't log availability events as errors Availability events are logged in s390dbf in s390dbf/pci_error/hex_ascii even though they don't indicate an error condition. They have also become redundant as commit 6526a597a2e85 ("s390/pci: add simpler s390dbf traces for events") added an s390dbf/pci_msg/sprintf log entry for availability events which contains all non reserved fields of struct zpci_ccdf_avail. On the other hand the availability entries in the error log make it easy to miss actual errors and may even overwrite error entries if the message buffer wraps. Thus simply remove the availability events from the error log thereby establishing the rule that any content in s390dbf/pci_error indicates some kind of error. Reviewed-by: Matthew Rosato Reviewed-by: Pierre Morel Signed-off-by: Niklas Schnelle Signed-off-by: Heiko Carstens --- arch/s390/pci/pci_event.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index ea9db5cea64e..b9324ca2eb94 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -321,9 +321,6 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) zpci_dbg(3, "avl fid:%x, fh:%x, pec:%x\n", ccdf->fid, ccdf->fh, ccdf->pec); - zpci_err("avail CCDF:\n"); - zpci_err_hex(ccdf, sizeof(*ccdf)); - switch (ccdf->pec) { case 0x0301: /* Reserved|Standby -> Configured */ if (!zdev) { -- cgit v1.2.3 From cde8833e40dddd6e0f067f43b2734a1ad4495065 Mon Sep 17 00:00:00 2001 From: Niklas Schnelle Date: Thu, 24 Feb 2022 15:45:33 +0100 Subject: s390/pci: add PCI access type and length to error records Currently when a PCI instruction returns a non-zero condition code it can be very hard to tell from the s390dbf logs what kind of instruction was executed. In case of PCI memory I/O (MIO) instructions it is even impossible to tell if we attempted a load, store or block store or how large the access was because only the address is logged. Improve this by adding an indicator byte for the instruction type to the error record and also store the length of the access for MIO instructions where this can not be deduced from the request. We use the following indicator values: - 'l': PCI load - 's': PCI store - 'b': PCI store block - 'L': PCI load (MIO) - 'S': PCI store (MIO) - 'B': PCI store block (MIO) - 'M': MPCIFC - 'R': RPCIT Reviewed-by: Matthew Rosato Reviewed-by: Pierre Morel Signed-off-by: Niklas Schnelle Signed-off-by: Heiko Carstens --- arch/s390/pci/pci_insn.c | 54 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 15 deletions(-) diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c index 1710d006ee93..c49a93424812 100644 --- a/arch/s390/pci/pci_insn.c +++ b/arch/s390/pci/pci_insn.c @@ -18,14 +18,38 @@ #define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */ -static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset) +struct zpci_err_insn_data { + u8 insn; + u8 cc; + u8 status; + union { + struct { + u64 req; + u64 offset; + }; + struct { + u64 addr; + u64 len; + }; + }; +} __packed; + +static inline void zpci_err_insn_req(u8 insn, u8 cc, u8 status, + u64 req, u64 offset) +{ + struct zpci_err_insn_data data = { + .insn = insn, .cc = cc, .status = status, + .req = req, .offset = offset}; + + zpci_err_hex(&data, sizeof(data)); +} + +static inline void zpci_err_insn_addr(u8 insn, u8 cc, u8 status, + u64 addr, u64 len) { - struct { - u64 req; - u64 offset; - u8 cc; - u8 status; - } __packed data = {req, offset, cc, status}; + struct zpci_err_insn_data data = { + .insn = insn, .cc = cc, .status = status, + .addr = addr, .len = len}; zpci_err_hex(&data, sizeof(data)); } @@ -56,7 +80,7 @@ u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status) } while (cc == 2); if (cc) - zpci_err_insn(cc, *status, req, 0); + zpci_err_insn_req('M', cc, *status, req, 0); return cc; } @@ -89,7 +113,7 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range) } while (cc == 2); if (cc) - zpci_err_insn(cc, status, addr, range); + zpci_err_insn_addr('R', cc, status, addr, range); if (cc == 1 && (status == 4 || status == 16)) return -ENOMEM; @@ -154,7 +178,7 @@ int __zpci_load(u64 *data, u64 req, u64 offset) } while (cc == 2); if (cc) - zpci_err_insn(cc, status, req, offset); + zpci_err_insn_req('l', cc, status, req, offset); return (cc > 0) ? -EIO : cc; } @@ -198,7 +222,7 @@ int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len) cc = __pcilg_mio(data, (__force u64) addr, len, &status); if (cc) - zpci_err_insn(cc, status, 0, (__force u64) addr); + zpci_err_insn_addr('L', cc, status, (__force u64) addr, len); return (cc > 0) ? -EIO : cc; } @@ -235,7 +259,7 @@ int __zpci_store(u64 data, u64 req, u64 offset) } while (cc == 2); if (cc) - zpci_err_insn(cc, status, req, offset); + zpci_err_insn_req('s', cc, status, req, offset); return (cc > 0) ? -EIO : cc; } @@ -278,7 +302,7 @@ int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len) cc = __pcistg_mio(data, (__force u64) addr, len, &status); if (cc) - zpci_err_insn(cc, status, 0, (__force u64) addr); + zpci_err_insn_addr('S', cc, status, (__force u64) addr, len); return (cc > 0) ? -EIO : cc; } @@ -314,7 +338,7 @@ int __zpci_store_block(const u64 *data, u64 req, u64 offset) } while (cc == 2); if (cc) - zpci_err_insn(cc, status, req, offset); + zpci_err_insn_req('b', cc, status, req, offset); return (cc > 0) ? -EIO : cc; } @@ -358,7 +382,7 @@ int zpci_write_block(volatile void __iomem *dst, cc = __pcistb_mio(src, (__force u64) dst, len, &status); if (cc) - zpci_err_insn(cc, status, 0, (__force u64) dst); + zpci_err_insn_addr('B', cc, status, (__force u64) dst, len); return (cc > 0) ? -EIO : cc; } -- cgit v1.2.3 From 34fb0e703480a65754e1f8289d754dfc953ba8d4 Mon Sep 17 00:00:00 2001 From: Niklas Schnelle Date: Fri, 25 Feb 2022 09:45:24 +0100 Subject: s390/pci: add error record for CC 2 retries Currently it is not detectable from within Linux when PCI instructions are retried because of a busy condition. Detecting such conditions and especially how long they lasted can however be quite useful in problem determination. This patch enables this by adding an s390dbf error log when a CC 2 is first encountered as well as after the retried instruction. Despite being unlikely it may be possible that these added debug messages drown out important other messages so allow setting the debug level in zpci_err_insn*() and set their level to 1 so they can be filtered out if need be. Reviewed-by: Matthew Rosato Reviewed-by: Pierre Morel Signed-off-by: Niklas Schnelle Signed-off-by: Heiko Carstens --- arch/s390/include/asm/pci_debug.h | 7 +++- arch/s390/pci/pci_insn.c | 74 ++++++++++++++++++++++++++++++--------- 2 files changed, 63 insertions(+), 18 deletions(-) diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h index 5dfe47588277..3bb4e7e33a0e 100644 --- a/arch/s390/include/asm/pci_debug.h +++ b/arch/s390/include/asm/pci_debug.h @@ -17,9 +17,14 @@ extern debug_info_t *pci_debug_err_id; debug_text_event(pci_debug_err_id, 0, debug_buffer); \ } while (0) +static inline void zpci_err_hex_level(int level, void *addr, int len) +{ + debug_event(pci_debug_err_id, level, addr, len); +} + static inline void zpci_err_hex(void *addr, int len) { - debug_event(pci_debug_err_id, 0, addr, len); + zpci_err_hex_level(0, addr, len); } #endif diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c index c49a93424812..1a822b7799f8 100644 --- a/arch/s390/pci/pci_insn.c +++ b/arch/s390/pci/pci_insn.c @@ -34,24 +34,24 @@ struct zpci_err_insn_data { }; } __packed; -static inline void zpci_err_insn_req(u8 insn, u8 cc, u8 status, +static inline void zpci_err_insn_req(int lvl, u8 insn, u8 cc, u8 status, u64 req, u64 offset) { struct zpci_err_insn_data data = { .insn = insn, .cc = cc, .status = status, .req = req, .offset = offset}; - zpci_err_hex(&data, sizeof(data)); + zpci_err_hex_level(lvl, &data, sizeof(data)); } -static inline void zpci_err_insn_addr(u8 insn, u8 cc, u8 status, +static inline void zpci_err_insn_addr(int lvl, u8 insn, u8 cc, u8 status, u64 addr, u64 len) { struct zpci_err_insn_data data = { .insn = insn, .cc = cc, .status = status, .addr = addr, .len = len}; - zpci_err_hex(&data, sizeof(data)); + zpci_err_hex_level(lvl, &data, sizeof(data)); } /* Modify PCI Function Controls */ @@ -71,16 +71,24 @@ static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status) { + bool retried = false; u8 cc; do { cc = __mpcifc(req, fib, status); - if (cc == 2) + if (cc == 2) { msleep(ZPCI_INSN_BUSY_DELAY); + if (!retried) { + zpci_err_insn_req(1, 'M', cc, *status, req, 0); + retried = true; + } + } } while (cc == 2); if (cc) - zpci_err_insn_req('M', cc, *status, req, 0); + zpci_err_insn_req(0, 'M', cc, *status, req, 0); + else if (retried) + zpci_err_insn_req(1, 'M', cc, *status, req, 0); return cc; } @@ -104,16 +112,24 @@ static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status) int zpci_refresh_trans(u64 fn, u64 addr, u64 range) { + bool retried = false; u8 cc, status; do { cc = __rpcit(fn, addr, range, &status); - if (cc == 2) + if (cc == 2) { udelay(ZPCI_INSN_BUSY_DELAY); + if (!retried) { + zpci_err_insn_addr(1, 'R', cc, status, addr, range); + retried = true; + } + } } while (cc == 2); if (cc) - zpci_err_insn_addr('R', cc, status, addr, range); + zpci_err_insn_addr(0, 'R', cc, status, addr, range); + else if (retried) + zpci_err_insn_addr(1, 'R', cc, status, addr, range); if (cc == 1 && (status == 4 || status == 16)) return -ENOMEM; @@ -168,17 +184,25 @@ static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status) int __zpci_load(u64 *data, u64 req, u64 offset) { + bool retried = false; u8 status; int cc; do { cc = __pcilg(data, req, offset, &status); - if (cc == 2) + if (cc == 2) { udelay(ZPCI_INSN_BUSY_DELAY); + if (!retried) { + zpci_err_insn_req(1, 'l', cc, status, req, offset); + retried = true; + } + } } while (cc == 2); if (cc) - zpci_err_insn_req('l', cc, status, req, offset); + zpci_err_insn_req(0, 'l', cc, status, req, offset); + else if (retried) + zpci_err_insn_req(1, 'l', cc, status, req, offset); return (cc > 0) ? -EIO : cc; } @@ -222,7 +246,7 @@ int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len) cc = __pcilg_mio(data, (__force u64) addr, len, &status); if (cc) - zpci_err_insn_addr('L', cc, status, (__force u64) addr, len); + zpci_err_insn_addr(0, 'L', cc, status, (__force u64) addr, len); return (cc > 0) ? -EIO : cc; } @@ -249,17 +273,25 @@ static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status) int __zpci_store(u64 data, u64 req, u64 offset) { + bool retried = false; u8 status; int cc; do { cc = __pcistg(data, req, offset, &status); - if (cc == 2) + if (cc == 2) { udelay(ZPCI_INSN_BUSY_DELAY); + if (!retried) { + zpci_err_insn_req(1, 's', cc, status, req, offset); + retried = true; + } + } } while (cc == 2); if (cc) - zpci_err_insn_req('s', cc, status, req, offset); + zpci_err_insn_req(0, 's', cc, status, req, offset); + else if (retried) + zpci_err_insn_req(1, 's', cc, status, req, offset); return (cc > 0) ? -EIO : cc; } @@ -302,7 +334,7 @@ int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len) cc = __pcistg_mio(data, (__force u64) addr, len, &status); if (cc) - zpci_err_insn_addr('S', cc, status, (__force u64) addr, len); + zpci_err_insn_addr(0, 'S', cc, status, (__force u64) addr, len); return (cc > 0) ? -EIO : cc; } @@ -328,17 +360,25 @@ static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) int __zpci_store_block(const u64 *data, u64 req, u64 offset) { + bool retried = false; u8 status; int cc; do { cc = __pcistb(data, req, offset, &status); - if (cc == 2) + if (cc == 2) { udelay(ZPCI_INSN_BUSY_DELAY); + if (!retried) { + zpci_err_insn_req(0, 'b', cc, status, req, offset); + retried = true; + } + } } while (cc == 2); if (cc) - zpci_err_insn_req('b', cc, status, req, offset); + zpci_err_insn_req(0, 'b', cc, status, req, offset); + else if (retried) + zpci_err_insn_req(1, 'b', cc, status, req, offset); return (cc > 0) ? -EIO : cc; } @@ -382,7 +422,7 @@ int zpci_write_block(volatile void __iomem *dst, cc = __pcistb_mio(src, (__force u64) dst, len, &status); if (cc) - zpci_err_insn_addr('B', cc, status, (__force u64) dst, len); + zpci_err_insn_addr(0, 'B', cc, status, (__force u64) dst, len); return (cc > 0) ? -EIO : cc; } -- cgit v1.2.3 From 9a07731702d9e5787770f44d2be0c15742f27e39 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 19 Apr 2022 17:40:29 +0200 Subject: s390: add KCSAN instrumentation to barriers and spinlocks test_barrier fails on s390 because of the missing KCSAN instrumentation for several synchronization primitives. Add it to barriers by defining __mb(), __rmb(), __wmb(), __dma_rmb() and __dma_wmb(), and letting the common code in asm-generic/barrier.h do the rest. Spinlocks require instrumentation only on the unlock path; notify KCSAN that the CPU cannot move memory accesses outside of the spin lock. In reality it also cannot move stores inside of it, but this is not important and can be omitted. Reported-by: Tobias Huschle Signed-off-by: Ilya Leoshkevich Signed-off-by: Heiko Carstens --- arch/s390/include/asm/barrier.h | 16 ++++++++-------- arch/s390/include/asm/spinlock.h | 1 + 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 2c057e1f3200..82de2a7c4160 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -26,14 +26,14 @@ static __always_inline void bcr_serialize(void) asm volatile(__ASM_BCR_SERIALIZE : : : "memory"); } -#define mb() bcr_serialize() -#define rmb() barrier() -#define wmb() barrier() -#define dma_rmb() mb() -#define dma_wmb() mb() -#define __smp_mb() mb() -#define __smp_rmb() rmb() -#define __smp_wmb() wmb() +#define __mb() bcr_serialize() +#define __rmb() barrier() +#define __wmb() barrier() +#define __dma_rmb() __mb() +#define __dma_wmb() __mb() +#define __smp_mb() __mb() +#define __smp_rmb() __rmb() +#define __smp_wmb() __wmb() #define __smp_store_release(p, v) \ do { \ diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 24a54443c865..10a460762e94 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -77,6 +77,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp) static inline void arch_spin_unlock(arch_spinlock_t *lp) { typecheck(int, lp->lock); + kcsan_release(); asm_inline volatile( ALTERNATIVE("", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */ " sth %1,%0\n" -- cgit v1.2.3 From 6260f6427c944279f8aca108140db900699a30de Mon Sep 17 00:00:00 2001 From: Pingfan Liu Date: Fri, 22 Apr 2022 18:02:12 +0800 Subject: s390/irq: utilize RCU instead of irq_lock_sparse() in show_msi_interrupt() As demonstrated by commit 74bdf7815dfb ("genirq: Speedup show_interrupts()"), irq_desc can be accessed safely in RCU read section. Hence here resorting to rcu read lock to get rid of irq_lock_sparse(). Signed-off-by: Pingfan Liu Link: https://lore.kernel.org/r/20220422100212.22666-1-kernelfans@gmail.com Signed-off-by: Heiko Carstens --- arch/s390/kernel/irq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 3033f616e256..45393919fe61 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -205,7 +205,7 @@ static void show_msi_interrupt(struct seq_file *p, int irq) unsigned long flags; int cpu; - irq_lock_sparse(); + rcu_read_lock(); desc = irq_to_desc(irq); if (!desc) goto out; @@ -224,7 +224,7 @@ static void show_msi_interrupt(struct seq_file *p, int irq) seq_putc(p, '\n'); raw_spin_unlock_irqrestore(&desc->lock, flags); out: - irq_unlock_sparse(); + rcu_read_unlock(); } /* -- cgit v1.2.3 From 4ae46db99cd88444fffb4591a477cefaf5330c30 Mon Sep 17 00:00:00 2001 From: "Guilherme G. Piccoli" Date: Wed, 27 Apr 2022 19:49:07 -0300 Subject: s390/consoles: improve panic notifiers reliability Currently many console drivers for s390 rely on panic/reboot notifiers to invoke callbacks on these events. The panic() function disables local IRQs, secondary CPUs and preemption, so callbacks invoked on panic are effectively running in atomic context. Happens that most of these console callbacks from s390 doesn't take the proper care with regards to atomic context, like taking spinlocks that might be taken in other function/CPU and hence will cause a lockup situation. The goal for this patch is to improve the notifiers reliability, acting on 4 console drivers, as detailed below: (1) con3215: changed a regular spinlock to the trylock alternative. (2) con3270: also changed a regular spinlock to its trylock counterpart, but here we also have another problem: raw3270_activate_view() takes a different spinlock. So, we worked a helper to validate if this other lock is safe to acquire, and if so, raw3270_activate_view() should be safe. Notice though that there is a functional change here: it's now possible to continue the notifier code [reaching con3270_wait_write() and con3270_rebuild_update()] without executing raw3270_activate_view(). (3) sclp: a global lock is used heavily in the functions called from the notifier, so we added a check here - if the lock is taken already, we just bail-out, preventing the lockup. (4) sclp_vt220: same as (3), a lock validation was added to prevent the potential lockup problem. Besides (1)-(4), we also removed useless void functions, adding the code called from the notifier inside its own body, and changed the priority of such notifiers to execute late, since they are "heavyweight" for the panic environment, so we aim to reduce risks here. Changed return values to NOTIFY_DONE as well, the standard one. Signed-off-by: Guilherme G. Piccoli Link: https://lore.kernel.org/r/20220427224924.592546-14-gpiccoli@igalia.com Signed-off-by: Heiko Carstens --- drivers/s390/char/con3215.c | 25 +++++++++++++------------ drivers/s390/char/con3270.c | 31 ++++++++++++++++--------------- drivers/s390/char/raw3270.c | 15 +++++++++++++++ drivers/s390/char/raw3270.h | 1 + drivers/s390/char/sclp_con.c | 26 +++++++++++++++----------- drivers/s390/char/sclp_vt220.c | 42 +++++++++++++++++++++++------------------- 6 files changed, 83 insertions(+), 57 deletions(-) diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index f356607835d8..4ae07c7e2175 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -771,35 +771,36 @@ static struct tty_driver *con3215_device(struct console *c, int *index) } /* - * panic() calls con3215_flush through a panic_notifier - * before the system enters a disabled, endless loop. + * The below function is called as a panic/reboot notifier before the + * system enters a disabled, endless loop. + * + * Notice we must use the spin_trylock() alternative, to prevent lockups + * in atomic context (panic routine runs with secondary CPUs, local IRQs + * and preemption disabled). */ -static void con3215_flush(void) +static int con3215_notify(struct notifier_block *self, + unsigned long event, void *data) { struct raw3215_info *raw; unsigned long flags; raw = raw3215[0]; /* console 3215 is the first one */ - spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); + if (!spin_trylock_irqsave(get_ccwdev_lock(raw->cdev), flags)) + return NOTIFY_DONE; raw3215_make_room(raw, RAW3215_BUFFER_SIZE); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); -} -static int con3215_notify(struct notifier_block *self, - unsigned long event, void *data) -{ - con3215_flush(); - return NOTIFY_OK; + return NOTIFY_DONE; } static struct notifier_block on_panic_nb = { .notifier_call = con3215_notify, - .priority = 0, + .priority = INT_MIN + 1, /* run the callback late */ }; static struct notifier_block on_reboot_nb = { .notifier_call = con3215_notify, - .priority = 0, + .priority = INT_MIN + 1, /* run the callback late */ }; /* diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index e4592890f20a..10f6a37fb153 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -535,20 +535,26 @@ con3270_wait_write(struct con3270 *cp) } /* - * panic() calls con3270_flush through a panic_notifier - * before the system enters a disabled, endless loop. + * The below function is called as a panic/reboot notifier before the + * system enters a disabled, endless loop. + * + * Notice we must use the spin_trylock() alternative, to prevent lockups + * in atomic context (panic routine runs with secondary CPUs, local IRQs + * and preemption disabled). */ -static void -con3270_flush(void) +static int con3270_notify(struct notifier_block *self, + unsigned long event, void *data) { struct con3270 *cp; unsigned long flags; cp = condev; if (!cp->view.dev) - return; - raw3270_activate_view(&cp->view); - spin_lock_irqsave(&cp->view.lock, flags); + return NOTIFY_DONE; + if (!raw3270_view_lock_unavailable(&cp->view)) + raw3270_activate_view(&cp->view); + if (!spin_trylock_irqsave(&cp->view.lock, flags)) + return NOTIFY_DONE; con3270_wait_write(cp); cp->nr_up = 0; con3270_rebuild_update(cp); @@ -560,23 +566,18 @@ con3270_flush(void) con3270_wait_write(cp); } spin_unlock_irqrestore(&cp->view.lock, flags); -} -static int con3270_notify(struct notifier_block *self, - unsigned long event, void *data) -{ - con3270_flush(); - return NOTIFY_OK; + return NOTIFY_DONE; } static struct notifier_block on_panic_nb = { .notifier_call = con3270_notify, - .priority = 0, + .priority = INT_MIN + 1, /* run the callback late */ }; static struct notifier_block on_reboot_nb = { .notifier_call = con3270_notify, - .priority = 0, + .priority = INT_MIN + 1, /* run the callback late */ }; /* diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index dfde0d941c3c..4e2b3a1a3b2e 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -830,6 +830,21 @@ raw3270_create_device(struct ccw_device *cdev) return rp; } +/* + * This helper just validates that it is safe to activate a + * view in the panic() context, due to locking restrictions. + */ +int raw3270_view_lock_unavailable(struct raw3270_view *view) +{ + struct raw3270 *rp = view->dev; + + if (!rp) + return -ENODEV; + if (spin_is_locked(get_ccwdev_lock(rp->cdev))) + return -EBUSY; + return 0; +} + /* * Activate a view. */ diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index c6645167cd2b..4cb6b5ee44ca 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h @@ -160,6 +160,7 @@ struct raw3270_view { }; int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int); +int raw3270_view_lock_unavailable(struct raw3270_view *view); int raw3270_activate_view(struct raw3270_view *); void raw3270_del_view(struct raw3270_view *); void raw3270_deactivate_view(struct raw3270_view *); diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index fe5ee2646fcf..e5d947c763ea 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -220,30 +220,34 @@ sclp_console_device(struct console *c, int *index) } /* - * Make sure that all buffers will be flushed to the SCLP. + * This panic/reboot notifier makes sure that all buffers + * will be flushed to the SCLP. */ -static void -sclp_console_flush(void) +static int sclp_console_notify(struct notifier_block *self, + unsigned long event, void *data) { + /* + * Perform the lock check before effectively getting the + * lock on sclp_conbuf_emit() / sclp_console_sync_queue() + * to prevent potential lockups in atomic context. + */ + if (spin_is_locked(&sclp_con_lock)) + return NOTIFY_DONE; + sclp_conbuf_emit(); sclp_console_sync_queue(); -} -static int sclp_console_notify(struct notifier_block *self, - unsigned long event, void *data) -{ - sclp_console_flush(); - return NOTIFY_OK; + return NOTIFY_DONE; } static struct notifier_block on_panic_nb = { .notifier_call = sclp_console_notify, - .priority = 1, + .priority = INT_MIN + 1, /* run the callback late */ }; static struct notifier_block on_reboot_nb = { .notifier_call = sclp_console_notify, - .priority = 1, + .priority = INT_MIN + 1, /* run the callback late */ }; /* diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 3b4e7e5d9b71..a32f34a1c6d2 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -769,21 +769,6 @@ __initcall(sclp_vt220_tty_init); #ifdef CONFIG_SCLP_VT220_CONSOLE -static void __sclp_vt220_flush_buffer(void) -{ - unsigned long flags; - - sclp_vt220_emit_current(); - spin_lock_irqsave(&sclp_vt220_lock, flags); - del_timer(&sclp_vt220_timer); - while (sclp_vt220_queue_running) { - spin_unlock_irqrestore(&sclp_vt220_lock, flags); - sclp_sync_wait(); - spin_lock_irqsave(&sclp_vt220_lock, flags); - } - spin_unlock_irqrestore(&sclp_vt220_lock, flags); -} - static void sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count) { @@ -797,22 +782,41 @@ sclp_vt220_con_device(struct console *c, int *index) return sclp_vt220_driver; } +/* + * This panic/reboot notifier runs in atomic context, so + * locking restrictions apply to prevent potential lockups. + */ static int sclp_vt220_notify(struct notifier_block *self, unsigned long event, void *data) { - __sclp_vt220_flush_buffer(); - return NOTIFY_OK; + unsigned long flags; + + if (spin_is_locked(&sclp_vt220_lock)) + return NOTIFY_DONE; + + sclp_vt220_emit_current(); + + spin_lock_irqsave(&sclp_vt220_lock, flags); + del_timer(&sclp_vt220_timer); + while (sclp_vt220_queue_running) { + spin_unlock_irqrestore(&sclp_vt220_lock, flags); + sclp_sync_wait(); + spin_lock_irqsave(&sclp_vt220_lock, flags); + } + spin_unlock_irqrestore(&sclp_vt220_lock, flags); + + return NOTIFY_DONE; } static struct notifier_block on_panic_nb = { .notifier_call = sclp_vt220_notify, - .priority = 1, + .priority = INT_MIN + 1, /* run the callback late */ }; static struct notifier_block on_reboot_nb = { .notifier_call = sclp_vt220_notify, - .priority = 1, + .priority = INT_MIN + 1, /* run the callback late */ }; /* Structure needed to register with printk */ -- cgit v1.2.3 From 4b03b3ee60db1d46da7638e535848c377aa5348d Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 30 Apr 2022 21:11:16 +0200 Subject: s390/crypto: fix typos in comments Various spelling mistakes in comments. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Link: https://lore.kernel.org/r/20220430191122.8667-2-Julia.Lawall@inria.fr Signed-off-by: Heiko Carstens --- arch/s390/crypto/des_s390.c | 2 +- arch/s390/crypto/prng.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index bfbafd35bcbd..e013088b5115 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -194,7 +194,7 @@ static struct skcipher_alg cbc_des_alg = { * same as DES. Implementers MUST reject keys that exhibit this * property. * - * In fips mode additinally check for all 3 keys are unique. + * In fips mode additionally check for all 3 keys are unique. * */ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key, diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index 234d791ca59d..ae382bafc772 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -528,7 +528,7 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf, /* give mutex free before calling schedule() */ mutex_unlock(&prng_data->mutex); schedule(); - /* occopy mutex again */ + /* occupy mutex again */ if (mutex_lock_interruptible(&prng_data->mutex)) { if (ret == 0) ret = -ERESTARTSYS; -- cgit v1.2.3 From 108ab40fc1fe60c226f856a1e5e4cd4600a0092c Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 30 Apr 2022 21:11:19 +0200 Subject: s390/hypfs: fix typos in comments Various spelling mistakes in comments. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Link: https://lore.kernel.org/r/20220430191122.8667-5-Julia.Lawall@inria.fr Signed-off-by: Heiko Carstens --- arch/s390/hypfs/hypfs_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index 3765c2d81df5..a3d881ca0a98 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -190,7 +190,7 @@ int hypfs_vm_create_files(struct dentry *root) if (IS_ERR(data)) return PTR_ERR(data); - /* Hpervisor Info */ + /* Hypervisor Info */ dir = hypfs_mkdir(root, "hyp"); if (IS_ERR(dir)) { rc = PTR_ERR(dir); -- cgit v1.2.3 From f9a3099f794c67b6edbaf3cef67b80bea8923a2c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sun, 1 May 2022 20:55:05 +0200 Subject: s390/nospec: prefer local labels in .set directives Use local labels in .set directives to avoid potential compile errors with LTO + clang. See commit 334865b2915c ("x86/extable: Prefer local labels in .set directives") for further details. Since s390 doesn't support LTO currently this doesn't fix a real bug for now, but helps to avoid problems as soon as required pieces have been added to llvm. Signed-off-by: Heiko Carstens --- arch/s390/include/asm/nospec-insn.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h index 2cfcd5ac3a8b..d910d71b5bb5 100644 --- a/arch/s390/include/asm/nospec-insn.h +++ b/arch/s390/include/asm/nospec-insn.h @@ -54,31 +54,31 @@ .endm .macro __DECODE_R expand,reg - .set __decode_fail,1 + .set .L__decode_fail,1 .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 .ifc \reg,%r\r1 \expand \r1 - .set __decode_fail,0 + .set .L__decode_fail,0 .endif .endr - .if __decode_fail == 1 + .if .L__decode_fail == 1 .error "__DECODE_R failed" .endif .endm .macro __DECODE_RR expand,rsave,rtarget - .set __decode_fail,1 + .set .L__decode_fail,1 .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 .ifc \rsave,%r\r1 .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 .ifc \rtarget,%r\r2 \expand \r1,\r2 - .set __decode_fail,0 + .set .L__decode_fail,0 .endif .endr .endif .endr - .if __decode_fail == 1 + .if .L__decode_fail == 1 .error "__DECODE_RR failed" .endif .endm -- cgit v1.2.3 From 68a971acc9484559ae8e1cc80950f34919299eba Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sun, 1 May 2022 21:05:59 +0200 Subject: s390/extable: prefer local labels in .set directives Use local labels in .set directives to avoid potential compile errors with LTO + clang. See commit 334865b2915c ("x86/extable: Prefer local labels in .set directives") for further details. Since s390 doesn't support LTO currently this doesn't fix a real bug for now, but helps to avoid problems as soon as required pieces have been added to llvm. Signed-off-by: Heiko Carstens --- arch/s390/include/asm/asm-extable.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h index fb62df5e16a2..f24d9591aaed 100644 --- a/arch/s390/include/asm/asm-extable.h +++ b/arch/s390/include/asm/asm-extable.h @@ -26,16 +26,16 @@ stringify_in_c(.long (_target) - .;) \ stringify_in_c(.short (_type);) \ stringify_in_c(.macro extable_reg reg;) \ - stringify_in_c(.set found, 0;) \ - stringify_in_c(.set regnr, 0;) \ + stringify_in_c(.set .Lfound, 0;) \ + stringify_in_c(.set .Lregnr, 0;) \ stringify_in_c(.irp rs,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;) \ stringify_in_c(.ifc "\reg", "%%\rs";) \ - stringify_in_c(.set found, 1;) \ - stringify_in_c(.short regnr;) \ + stringify_in_c(.set .Lfound, 1;) \ + stringify_in_c(.short .Lregnr;) \ stringify_in_c(.endif;) \ - stringify_in_c(.set regnr, regnr+1;) \ + stringify_in_c(.set .Lregnr, .Lregnr+1;) \ stringify_in_c(.endr;) \ - stringify_in_c(.ifne (found != 1);) \ + stringify_in_c(.ifne (.Lfound != 1);) \ stringify_in_c(.error "extable_reg: bad register argument";) \ stringify_in_c(.endif;) \ stringify_in_c(.endm;) \ -- cgit v1.2.3 From 964bc5dbe602a62b7bbd67624c4b8f7a4ea692b2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 3 May 2022 10:48:41 +0200 Subject: s390/vx: remove comments from macros which break LLVM's IAS LLVM's integrated assembler does not like comments within macros: :3:19: error: too many positional arguments GR_NUM b2, 1 /* Base register */ ^ Remove them, since they are obvious anyway. Signed-off-by: Heiko Carstens --- arch/s390/include/asm/vx-insn.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h index 87e6cc2aeba4..95480ed9149e 100644 --- a/arch/s390/include/asm/vx-insn.h +++ b/arch/s390/include/asm/vx-insn.h @@ -366,7 +366,7 @@ .macro VLM vfrom, vto, disp, base, hint=3 VX_NUM v1, \vfrom VX_NUM v3, \vto - GR_NUM b2, \base /* Base register */ + GR_NUM b2, \base .word 0xE700 | ((v1&15) << 4) | (v3&15) .word (b2 << 12) | (\disp) MRXBOPC \hint, 0x36, v1, v3 @@ -376,7 +376,7 @@ .macro VST vr1, disp, index="%r0", base VX_NUM v1, \vr1 GR_NUM x2, \index - GR_NUM b2, \base /* Base register */ + GR_NUM b2, \base .word 0xE700 | ((v1&15) << 4) | (x2&15) .word (b2 << 12) | (\disp) MRXBOPC 0, 0x0E, v1 @@ -386,7 +386,7 @@ .macro VSTM vfrom, vto, disp, base, hint=3 VX_NUM v1, \vfrom VX_NUM v3, \vto - GR_NUM b2, \base /* Base register */ + GR_NUM b2, \base .word 0xE700 | ((v1&15) << 4) | (v3&15) .word (b2 << 12) | (\disp) MRXBOPC \hint, 0x3E, v1, v3 -- cgit v1.2.3 From edd4a8667355607345b76d5652adc0f300a28970 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sat, 23 Apr 2022 21:31:22 +0200 Subject: s390/boot: get rid of startup archive The final kernel image is created by linking decompressor object files with a startup archive. The startup archive file however does not contain only optional code and data which can be discarded if not referenced. It also contains mandatory object data like head.o which must never be discarded, even if not referenced. Move the decompresser code and linker script to the boot directory and get rid of the startup archive so everything is kept during link time. Acked-by: Vasily Gorbik Signed-off-by: Heiko Carstens --- arch/s390/boot/.gitignore | 3 + arch/s390/boot/Makefile | 76 +++++++++++++++++++--- arch/s390/boot/clz_ctz.c | 2 + arch/s390/boot/compressed/.gitignore | 4 -- arch/s390/boot/compressed/Makefile | 86 ------------------------ arch/s390/boot/compressed/clz_ctz.c | 2 - arch/s390/boot/compressed/decompressor.c | 85 ------------------------ arch/s390/boot/compressed/decompressor.h | 38 ----------- arch/s390/boot/compressed/vmlinux.lds.S | 108 ------------------------------- arch/s390/boot/decompressor.c | 85 ++++++++++++++++++++++++ arch/s390/boot/decompressor.h | 38 +++++++++++ arch/s390/boot/kaslr.c | 2 +- arch/s390/boot/mem_detect.c | 2 +- arch/s390/boot/startup.c | 2 +- arch/s390/boot/vmlinux.lds.S | 108 +++++++++++++++++++++++++++++++ 15 files changed, 307 insertions(+), 334 deletions(-) create mode 100644 arch/s390/boot/clz_ctz.c delete mode 100644 arch/s390/boot/compressed/.gitignore delete mode 100644 arch/s390/boot/compressed/Makefile delete mode 100644 arch/s390/boot/compressed/clz_ctz.c delete mode 100644 arch/s390/boot/compressed/decompressor.c delete mode 100644 arch/s390/boot/compressed/decompressor.h delete mode 100644 arch/s390/boot/compressed/vmlinux.lds.S create mode 100644 arch/s390/boot/decompressor.c create mode 100644 arch/s390/boot/decompressor.h create mode 100644 arch/s390/boot/vmlinux.lds.S diff --git a/arch/s390/boot/.gitignore b/arch/s390/boot/.gitignore index b265bfede188..f56591bc0897 100644 --- a/arch/s390/boot/.gitignore +++ b/arch/s390/boot/.gitignore @@ -2,3 +2,6 @@ image bzImage section_cmp.* +vmlinux +vmlinux.lds +vmlinux.syms diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index 0ba646899131..14d66501e4a9 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -41,10 +41,17 @@ obj-y += version.o pgm_check_info.o ctype.o obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o -targets := bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y) -subdir- := compressed +obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o +obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o +obj-all := $(obj-y) piggy.o syms.o + +targets := bzImage section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y) +targets += vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 +targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 +targets += vmlinux.bin.zst info.bin syms.bin vmlinux.syms $(obj-all) OBJECTS := $(addprefix $(obj)/,$(obj-y)) +OBJECTS_ALL := $(addprefix $(obj)/,$(obj-all)) quiet_cmd_section_cmp = SECTCMP $* define cmd_section_cmp @@ -59,14 +66,67 @@ define cmd_section_cmp touch $@ endef -$(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data $(obj)/section_cmp.boot.preserved.data FORCE +$(obj)/bzImage: $(obj)/vmlinux $(obj)/section_cmp.boot.data $(obj)/section_cmp.boot.preserved.data FORCE $(call if_changed,objcopy) -$(obj)/section_cmp%: vmlinux $(obj)/compressed/vmlinux FORCE +$(obj)/section_cmp%: vmlinux $(obj)/vmlinux FORCE $(call if_changed,section_cmp) -$(obj)/compressed/vmlinux: $(obj)/startup.a FORCE - $(Q)$(MAKE) $(build)=$(obj)/compressed $@ +LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup --build-id=sha1 -T +$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS_ALL) FORCE + $(call if_changed,ld) + +LDFLAGS_vmlinux.syms := --oformat $(LD_BFD) -e startup -T +$(obj)/vmlinux.syms: $(obj)/vmlinux.lds $(OBJECTS) FORCE + $(call if_changed,ld) + +quiet_cmd_dumpsyms = DUMPSYMS $< +define cmd_dumpsyms + $(NM) -n -S --format=bsd "$<" | sed -nE 's/^0*([0-9a-fA-F]+) 0*([0-9a-fA-F]+) [tT] ([^ ]*)$$/\1 \2 \3/p' | tr '\n' '\0' > "$@" +endef + +$(obj)/syms.bin: $(obj)/vmlinux.syms FORCE + $(call if_changed,dumpsyms) + +OBJCOPYFLAGS_syms.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.decompressor.syms +$(obj)/syms.o: $(obj)/syms.bin FORCE + $(call if_changed,objcopy) + +OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load +$(obj)/info.bin: vmlinux FORCE + $(call if_changed,objcopy) + +OBJCOPYFLAGS_info.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.info +$(obj)/info.o: $(obj)/info.bin FORCE + $(call if_changed,objcopy) + +OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section=.vmlinux.info -S +$(obj)/vmlinux.bin: vmlinux FORCE + $(call if_changed,objcopy) + +suffix-$(CONFIG_KERNEL_GZIP) := .gz +suffix-$(CONFIG_KERNEL_BZIP2) := .bz2 +suffix-$(CONFIG_KERNEL_LZ4) := .lz4 +suffix-$(CONFIG_KERNEL_LZMA) := .lzma +suffix-$(CONFIG_KERNEL_LZO) := .lzo +suffix-$(CONFIG_KERNEL_XZ) := .xz +suffix-$(CONFIG_KERNEL_ZSTD) := .zst -$(obj)/startup.a: $(OBJECTS) FORCE - $(call if_changed,ar) +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE + $(call if_changed,gzip) +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE + $(call if_changed,bzip2_with_size) +$(obj)/vmlinux.bin.lz4: $(obj)/vmlinux.bin FORCE + $(call if_changed,lz4_with_size) +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzma_with_size) +$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzo_with_size) +$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE + $(call if_changed,xzkern_with_size) +$(obj)/vmlinux.bin.zst: $(obj)/vmlinux.bin FORCE + $(call if_changed,zstd22_with_size) + +OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed +$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE + $(call if_changed,objcopy) diff --git a/arch/s390/boot/clz_ctz.c b/arch/s390/boot/clz_ctz.c new file mode 100644 index 000000000000..c3ebf248596b --- /dev/null +++ b/arch/s390/boot/clz_ctz.c @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "../../../../lib/clz_ctz.c" diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore deleted file mode 100644 index 01d93832cf4a..000000000000 --- a/arch/s390/boot/compressed/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -vmlinux -vmlinux.lds -vmlinux.syms diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile deleted file mode 100644 index d04e0e7de0b3..000000000000 --- a/arch/s390/boot/compressed/Makefile +++ /dev/null @@ -1,86 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# linux/arch/s390/boot/compressed/Makefile -# -# create a compressed vmlinux image from the original vmlinux -# - -KCOV_INSTRUMENT := n -GCOV_PROFILE := n -UBSAN_SANITIZE := n -KASAN_SANITIZE := n -KCSAN_SANITIZE := n - -obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o -obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o -obj-all := $(obj-y) piggy.o syms.o -targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 -targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 -targets += vmlinux.bin.zst -targets += info.bin syms.bin vmlinux.syms $(obj-all) - -KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR) -KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR) -OBJCOPYFLAGS := - -OBJECTS := $(addprefix $(obj)/,$(obj-y)) -OBJECTS_ALL := $(addprefix $(obj)/,$(obj-all)) - -LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup --build-id=sha1 -T -$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS_ALL) FORCE - $(call if_changed,ld) - -LDFLAGS_vmlinux.syms := --oformat $(LD_BFD) -e startup -T -$(obj)/vmlinux.syms: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE - $(call if_changed,ld) - -quiet_cmd_dumpsyms = DUMPSYMS $< -define cmd_dumpsyms - $(NM) -n -S --format=bsd "$<" | sed -nE 's/^0*([0-9a-fA-F]+) 0*([0-9a-fA-F]+) [tT] ([^ ]*)$$/\1 \2 \3/p' | tr '\n' '\0' > "$@" -endef - -$(obj)/syms.bin: $(obj)/vmlinux.syms FORCE - $(call if_changed,dumpsyms) - -OBJCOPYFLAGS_syms.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.decompressor.syms -$(obj)/syms.o: $(obj)/syms.bin FORCE - $(call if_changed,objcopy) - -OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load -$(obj)/info.bin: vmlinux FORCE - $(call if_changed,objcopy) - -OBJCOPYFLAGS_info.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.info -$(obj)/info.o: $(obj)/info.bin FORCE - $(call if_changed,objcopy) - -OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section=.vmlinux.info -S -$(obj)/vmlinux.bin: vmlinux FORCE - $(call if_changed,objcopy) - -suffix-$(CONFIG_KERNEL_GZIP) := .gz -suffix-$(CONFIG_KERNEL_BZIP2) := .bz2 -suffix-$(CONFIG_KERNEL_LZ4) := .lz4 -suffix-$(CONFIG_KERNEL_LZMA) := .lzma -suffix-$(CONFIG_KERNEL_LZO) := .lzo -suffix-$(CONFIG_KERNEL_XZ) := .xz -suffix-$(CONFIG_KERNEL_ZSTD) := .zst - -$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE - $(call if_changed,gzip) -$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE - $(call if_changed,bzip2_with_size) -$(obj)/vmlinux.bin.lz4: $(obj)/vmlinux.bin FORCE - $(call if_changed,lz4_with_size) -$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE - $(call if_changed,lzma_with_size) -$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE - $(call if_changed,lzo_with_size) -$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE - $(call if_changed,xzkern_with_size) -$(obj)/vmlinux.bin.zst: $(obj)/vmlinux.bin FORCE - $(call if_changed,zstd22_with_size) - -OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed -$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE - $(call if_changed,objcopy) diff --git a/arch/s390/boot/compressed/clz_ctz.c b/arch/s390/boot/compressed/clz_ctz.c deleted file mode 100644 index c3ebf248596b..000000000000 --- a/arch/s390/boot/compressed/clz_ctz.c +++ /dev/null @@ -1,2 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include "../../../../lib/clz_ctz.c" diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/compressed/decompressor.c deleted file mode 100644 index e27c2140d620..000000000000 --- a/arch/s390/boot/compressed/decompressor.c +++ /dev/null @@ -1,85 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Definitions and wrapper functions for kernel decompressor - * - * Copyright IBM Corp. 2010 - * - * Author(s): Martin Schwidefsky - */ - -#include -#include -#include -#include "decompressor.h" - -/* - * gzip declarations - */ -#define STATIC static - -#undef memset -#undef memcpy -#undef memmove -#define memmove memmove -#define memzero(s, n) memset((s), 0, (n)) - -#ifdef CONFIG_KERNEL_BZIP2 -#define BOOT_HEAP_SIZE 0x400000 -#elif CONFIG_KERNEL_ZSTD -#define BOOT_HEAP_SIZE 0x30000 -#else -#define BOOT_HEAP_SIZE 0x10000 -#endif - -static unsigned long free_mem_ptr = (unsigned long) _end; -static unsigned long free_mem_end_ptr = (unsigned long) _end + BOOT_HEAP_SIZE; - -#ifdef CONFIG_KERNEL_GZIP -#include "../../../../lib/decompress_inflate.c" -#endif - -#ifdef CONFIG_KERNEL_BZIP2 -#include "../../../../lib/decompress_bunzip2.c" -#endif - -#ifdef CONFIG_KERNEL_LZ4 -#include "../../../../lib/decompress_unlz4.c" -#endif - -#ifdef CONFIG_KERNEL_LZMA -#include "../../../../lib/decompress_unlzma.c" -#endif - -#ifdef CONFIG_KERNEL_LZO -#include "../../../../lib/decompress_unlzo.c" -#endif - -#ifdef CONFIG_KERNEL_XZ -#include "../../../../lib/decompress_unxz.c" -#endif - -#ifdef CONFIG_KERNEL_ZSTD -#include "../../../../lib/decompress_unzstd.c" -#endif - -#define decompress_offset ALIGN((unsigned long)_end + BOOT_HEAP_SIZE, PAGE_SIZE) - -unsigned long mem_safe_offset(void) -{ - /* - * due to 4MB HEAD_SIZE for bzip2 - * 'decompress_offset + vmlinux.image_size' could be larger than - * kernel at final position + its .bss, so take the larger of two - */ - return max(decompress_offset + vmlinux.image_size, - vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size); -} - -void *decompress_kernel(void) -{ - void *output = (void *)decompress_offset; - - __decompress(_compressed_start, _compressed_end - _compressed_start, - NULL, NULL, output, 0, NULL, error); - return output; -} diff --git a/arch/s390/boot/compressed/decompressor.h b/arch/s390/boot/compressed/decompressor.h deleted file mode 100644 index f75cc31a77dd..000000000000 --- a/arch/s390/boot/compressed/decompressor.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef BOOT_COMPRESSED_DECOMPRESSOR_H -#define BOOT_COMPRESSED_DECOMPRESSOR_H - -#include - -#ifdef CONFIG_KERNEL_UNCOMPRESSED -static inline void *decompress_kernel(void) { return NULL; } -#else -void *decompress_kernel(void); -#endif -unsigned long mem_safe_offset(void); -void error(char *m); - -struct vmlinux_info { - unsigned long default_lma; - void (*entry)(void); - unsigned long image_size; /* does not include .bss */ - unsigned long bss_size; /* uncompressed image .bss size */ - unsigned long bootdata_off; - unsigned long bootdata_size; - unsigned long bootdata_preserved_off; - unsigned long bootdata_preserved_size; - unsigned long dynsym_start; - unsigned long rela_dyn_start; - unsigned long rela_dyn_end; - unsigned long amode31_size; -}; - -/* Symbols defined by linker scripts */ -extern char _end[]; -extern unsigned char _compressed_start[]; -extern unsigned char _compressed_end[]; -extern char _vmlinux_info[]; - -#define vmlinux (*(struct vmlinux_info *)_vmlinux_info) - -#endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */ diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S deleted file mode 100644 index 918e05137d4c..000000000000 --- a/arch/s390/boot/compressed/vmlinux.lds.S +++ /dev/null @@ -1,108 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include -#include -#include -#include -#include - -OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") -OUTPUT_ARCH(s390:64-bit) - -ENTRY(startup) - -SECTIONS -{ - . = 0; - .head.text : { - _head = . ; - HEAD_TEXT - _ehead = . ; - } - .text : { - _text = .; /* Text */ - *(.text) - *(.text.*) - _etext = . ; - } - .rodata : { - _rodata = . ; - *(.rodata) /* read-only data */ - *(.rodata.*) - _erodata = . ; - } - NOTES - .data : { - _data = . ; - *(.data) - *(.data.*) - _edata = . ; - } - - BOOT_DATA - BOOT_DATA_PRESERVED - - /* - * This is the BSS section of the decompressor and not of the decompressed Linux kernel. - * It will consume place in the decompressor's image. - */ - . = ALIGN(8); - .bss : { - _bss = . ; - *(.bss) - *(.bss.*) - *(COMMON) - /* - * Stacks for the decompressor - */ - . = ALIGN(PAGE_SIZE); - _dump_info_stack_start = .; - . += PAGE_SIZE; - _dump_info_stack_end = .; - . = ALIGN(PAGE_SIZE); - _stack_start = .; - . += BOOT_STACK_SIZE; - _stack_end = .; - _ebss = .; - } - - /* - * uncompressed image info used by the decompressor it should match - * struct vmlinux_info. It comes from .vmlinux.info section of - * uncompressed vmlinux in a form of info.o - */ - . = ALIGN(8); - .vmlinux.info : { - _vmlinux_info = .; - *(.vmlinux.info) - } - - .decompressor.syms : { - . += 1; /* make sure we have \0 before the first entry */ - . = ALIGN(2); - _decompressor_syms_start = .; - *(.decompressor.syms) - _decompressor_syms_end = .; - } - -#ifdef CONFIG_KERNEL_UNCOMPRESSED - . = 0x100000; -#else - . = ALIGN(8); -#endif - .rodata.compressed : { - _compressed_start = .; - *(.vmlinux.bin.compressed) - _compressed_end = .; - FILL(0xff); - . = ALIGN(4096); - } - _end = .; - - /* Sections to be discarded */ - /DISCARD/ : { - *(.eh_frame) - *(__ex_table) - *(*__ksymtab*) - *(___kcrctab*) - } -} diff --git a/arch/s390/boot/decompressor.c b/arch/s390/boot/decompressor.c new file mode 100644 index 000000000000..e27c2140d620 --- /dev/null +++ b/arch/s390/boot/decompressor.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Definitions and wrapper functions for kernel decompressor + * + * Copyright IBM Corp. 2010 + * + * Author(s): Martin Schwidefsky + */ + +#include +#include +#include +#include "decompressor.h" + +/* + * gzip declarations + */ +#define STATIC static + +#undef memset +#undef memcpy +#undef memmove +#define memmove memmove +#define memzero(s, n) memset((s), 0, (n)) + +#ifdef CONFIG_KERNEL_BZIP2 +#define BOOT_HEAP_SIZE 0x400000 +#elif CONFIG_KERNEL_ZSTD +#define BOOT_HEAP_SIZE 0x30000 +#else +#define BOOT_HEAP_SIZE 0x10000 +#endif + +static unsigned long free_mem_ptr = (unsigned long) _end; +static unsigned long free_mem_end_ptr = (unsigned long) _end + BOOT_HEAP_SIZE; + +#ifdef CONFIG_KERNEL_GZIP +#include "../../../../lib/decompress_inflate.c" +#endif + +#ifdef CONFIG_KERNEL_BZIP2 +#include "../../../../lib/decompress_bunzip2.c" +#endif + +#ifdef CONFIG_KERNEL_LZ4 +#include "../../../../lib/decompress_unlz4.c" +#endif + +#ifdef CONFIG_KERNEL_LZMA +#include "../../../../lib/decompress_unlzma.c" +#endif + +#ifdef CONFIG_KERNEL_LZO +#include "../../../../lib/decompress_unlzo.c" +#endif + +#ifdef CONFIG_KERNEL_XZ +#include "../../../../lib/decompress_unxz.c" +#endif + +#ifdef CONFIG_KERNEL_ZSTD +#include "../../../../lib/decompress_unzstd.c" +#endif + +#define decompress_offset ALIGN((unsigned long)_end + BOOT_HEAP_SIZE, PAGE_SIZE) + +unsigned long mem_safe_offset(void) +{ + /* + * due to 4MB HEAD_SIZE for bzip2 + * 'decompress_offset + vmlinux.image_size' could be larger than + * kernel at final position + its .bss, so take the larger of two + */ + return max(decompress_offset + vmlinux.image_size, + vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size); +} + +void *decompress_kernel(void) +{ + void *output = (void *)decompress_offset; + + __decompress(_compressed_start, _compressed_end - _compressed_start, + NULL, NULL, output, 0, NULL, error); + return output; +} diff --git a/arch/s390/boot/decompressor.h b/arch/s390/boot/decompressor.h new file mode 100644 index 000000000000..f75cc31a77dd --- /dev/null +++ b/arch/s390/boot/decompressor.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef BOOT_COMPRESSED_DECOMPRESSOR_H +#define BOOT_COMPRESSED_DECOMPRESSOR_H + +#include + +#ifdef CONFIG_KERNEL_UNCOMPRESSED +static inline void *decompress_kernel(void) { return NULL; } +#else +void *decompress_kernel(void); +#endif +unsigned long mem_safe_offset(void); +void error(char *m); + +struct vmlinux_info { + unsigned long default_lma; + void (*entry)(void); + unsigned long image_size; /* does not include .bss */ + unsigned long bss_size; /* uncompressed image .bss size */ + unsigned long bootdata_off; + unsigned long bootdata_size; + unsigned long bootdata_preserved_off; + unsigned long bootdata_preserved_size; + unsigned long dynsym_start; + unsigned long rela_dyn_start; + unsigned long rela_dyn_end; + unsigned long amode31_size; +}; + +/* Symbols defined by linker scripts */ +extern char _end[]; +extern unsigned char _compressed_start[]; +extern unsigned char _compressed_end[]; +extern char _vmlinux_info[]; + +#define vmlinux (*(struct vmlinux_info *)_vmlinux_info) + +#endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */ diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c index d8984462071f..e8d74d4f62aa 100644 --- a/arch/s390/boot/kaslr.c +++ b/arch/s390/boot/kaslr.c @@ -8,7 +8,7 @@ #include #include #include -#include "compressed/decompressor.h" +#include "decompressor.h" #include "boot.h" #define PRNG_MODE_TDES 1 diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c index 2f949cd9076b..7fa1a32ea0f3 100644 --- a/arch/s390/boot/mem_detect.c +++ b/arch/s390/boot/mem_detect.c @@ -7,7 +7,7 @@ #include #include #include -#include "compressed/decompressor.h" +#include "decompressor.h" #include "boot.h" struct mem_detect_info __bootdata(mem_detect); diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 1aa11a8f57dd..863e6bcaa5a1 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -10,7 +10,7 @@ #include #include #include -#include "compressed/decompressor.h" +#include "decompressor.h" #include "boot.h" #include "uv.h" diff --git a/arch/s390/boot/vmlinux.lds.S b/arch/s390/boot/vmlinux.lds.S new file mode 100644 index 000000000000..918e05137d4c --- /dev/null +++ b/arch/s390/boot/vmlinux.lds.S @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include +#include + +OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") +OUTPUT_ARCH(s390:64-bit) + +ENTRY(startup) + +SECTIONS +{ + . = 0; + .head.text : { + _head = . ; + HEAD_TEXT + _ehead = . ; + } + .text : { + _text = .; /* Text */ + *(.text) + *(.text.*) + _etext = . ; + } + .rodata : { + _rodata = . ; + *(.rodata) /* read-only data */ + *(.rodata.*) + _erodata = . ; + } + NOTES + .data : { + _data = . ; + *(.data) + *(.data.*) + _edata = . ; + } + + BOOT_DATA + BOOT_DATA_PRESERVED + + /* + * This is the BSS section of the decompressor and not of the decompressed Linux kernel. + * It will consume place in the decompressor's image. + */ + . = ALIGN(8); + .bss : { + _bss = . ; + *(.bss) + *(.bss.*) + *(COMMON) + /* + * Stacks for the decompressor + */ + . = ALIGN(PAGE_SIZE); + _dump_info_stack_start = .; + . += PAGE_SIZE; + _dump_info_stack_end = .; + . = ALIGN(PAGE_SIZE); + _stack_start = .; + . += BOOT_STACK_SIZE; + _stack_end = .; + _ebss = .; + } + + /* + * uncompressed image info used by the decompressor it should match + * struct vmlinux_info. It comes from .vmlinux.info section of + * uncompressed vmlinux in a form of info.o + */ + . = ALIGN(8); + .vmlinux.info : { + _vmlinux_info = .; + *(.vmlinux.info) + } + + .decompressor.syms : { + . += 1; /* make sure we have \0 before the first entry */ + . = ALIGN(2); + _decompressor_syms_start = .; + *(.decompressor.syms) + _decompressor_syms_end = .; + } + +#ifdef CONFIG_KERNEL_UNCOMPRESSED + . = 0x100000; +#else + . = ALIGN(8); +#endif + .rodata.compressed : { + _compressed_start = .; + *(.vmlinux.bin.compressed) + _compressed_end = .; + FILL(0xff); + . = ALIGN(4096); + } + _end = .; + + /* Sections to be discarded */ + /DISCARD/ : { + *(.eh_frame) + *(__ex_table) + *(*__ksymtab*) + *(___kcrctab*) + } +} -- cgit v1.2.3 From 734757976e337dff02da5e36dedbac8321326f5c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sun, 24 Apr 2022 15:44:05 +0200 Subject: s390/head: adjust iplstart entry point Move iplstart entry point to 0x200 again, instead of the middle of the ipl code. This way even the comment describing the ccw program is correct again. Acked-by: Vasily Gorbik Signed-off-by: Heiko Carstens --- arch/s390/boot/head.S | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S index 666692429db0..206fca8cb816 100644 --- a/arch/s390/boot/head.S +++ b/arch/s390/boot/head.S @@ -33,11 +33,13 @@ #define EP_OFFSET 0x10008 #define EP_STRING "S390EP" +#define IPL_START 0x200 + __HEAD #define IPL_BS 0x730 .org 0 - .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded + .long 0x00080000,0x80000000+IPL_START # The first 24 bytes are loaded .long 0x02000018,0x60000050 # by ipl to addresses 0-23. .long 0x02000068,0x60000050 # (a PSW and two CCWs). .fill 80-24,1,0x40 # bytes 24-79 are discarded !! @@ -63,7 +65,7 @@ __HEAD .long 0x020006e0,0x20000050 .org __LC_RST_NEW_PSW # 0x1a0 - .quad 0,iplstart + .quad 0,IPL_START .org __LC_EXT_NEW_PSW # 0x1b0 .quad 0x0002000180000000,0x1b0 # disabled wait .org __LC_PGM_NEW_PSW # 0x1d0 @@ -71,8 +73,9 @@ __HEAD .org __LC_IO_NEW_PSW # 0x1f0 .quad 0x0002000180000000,0x1f0 # disabled wait - .org 0x200 - + .org IPL_START +ipl_start: + j .Liplcont # # subroutine to wait for end I/O # @@ -158,7 +161,7 @@ __HEAD .endr .long 0x02200050,0x00000000 -iplstart: +.Liplcont: mvi __LC_AR_MODE_ID,1 # set esame flag slr %r0,%r0 # set cpuid to zero lhi %r1,2 # mode 2 = esame (dump) -- cgit v1.2.3 From 84f4e1dfb243c0aaed0425a2b1c308a2fb37425d Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sun, 24 Apr 2022 15:52:01 +0200 Subject: s390/boot: change initial program check handler to disabled wait psw The program check handler of the kernel image points to startup_pgm_check_handler. However an early program check which happens while loading the kernel image will jump to potentially random code, since the code of the program check handler is not yet loaded; leading to a program check loop. Therefore initialize it to a disabled wait psw and let the startup code set the proper psw when everything is in memory. Reviewed-by: Vasily Gorbik Signed-off-by: Heiko Carstens --- arch/s390/boot/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S index 206fca8cb816..0b2cec40f261 100644 --- a/arch/s390/boot/head.S +++ b/arch/s390/boot/head.S @@ -69,7 +69,7 @@ __HEAD .org __LC_EXT_NEW_PSW # 0x1b0 .quad 0x0002000180000000,0x1b0 # disabled wait .org __LC_PGM_NEW_PSW # 0x1d0 - .quad 0x0000000180000000,startup_pgm_check_handler + .quad 0x0002000180000000,0x1d0 # disabled wait .org __LC_IO_NEW_PSW # 0x1f0 .quad 0x0002000180000000,0x1f0 # disabled wait -- cgit v1.2.3 From aceb06d1e83783bf5a25c4979647bd8af6a99654 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sun, 24 Apr 2022 16:02:21 +0200 Subject: s390/head: initialize all new psws Initialize all new psws with disabled wait psws, except for the restart new psw. This way every unexpected exception, svc, machine check, or interrupt is handled properly. Reviewed-by: Vasily Gorbik Signed-off-by: Heiko Carstens --- arch/s390/boot/head.S | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S index 0b2cec40f261..a63f7de76462 100644 --- a/arch/s390/boot/head.S +++ b/arch/s390/boot/head.S @@ -64,14 +64,22 @@ __HEAD .long 0x02000690,0x60000050 .long 0x020006e0,0x20000050 - .org __LC_RST_NEW_PSW # 0x1a0 +# The restart psw points to ipl_entry, which allows to load a kernel image +# into memory and starting it by a psw restart on any cpu. +# All other default psw new locations contain a disabled wait psw where the +# address indicates which psw was loaded. + .org __LC_RST_NEW_PSW .quad 0,IPL_START - .org __LC_EXT_NEW_PSW # 0x1b0 - .quad 0x0002000180000000,0x1b0 # disabled wait - .org __LC_PGM_NEW_PSW # 0x1d0 - .quad 0x0002000180000000,0x1d0 # disabled wait - .org __LC_IO_NEW_PSW # 0x1f0 - .quad 0x0002000180000000,0x1f0 # disabled wait + .org __LC_EXT_NEW_PSW + .quad 0x0002000180000000,__LC_EXT_NEW_PSW + .org __LC_SVC_NEW_PSW + .quad 0x0002000180000000,__LC_SVC_NEW_PSW + .org __LC_PGM_NEW_PSW + .quad 0x0002000180000000,__LC_PGM_NEW_PSW + .org __LC_MCK_NEW_PSW + .quad 0x0002000180000000,__LC_MCK_NEW_PSW + .org __LC_IO_NEW_PSW + .quad 0x0002000180000000,__LC_IO_NEW_PSW .org IPL_START ipl_start: -- cgit v1.2.3 From 67a9c428ef35780d09e5a3c1247919789a8212b4 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 27 Apr 2022 04:14:29 +0200 Subject: s390/ptrace: move short psw definitions to ptrace header file The short psw definitions are contained in compat header files, however short psws are not compat specific. Therefore move the definitions to ptrace header file. This also gets rid of a compat header include in kvm code. Acked-by: Vasily Gorbik Signed-off-by: Heiko Carstens --- arch/s390/include/asm/compat.h | 25 +------------------------ arch/s390/include/asm/ptrace.h | 29 +++++++++++++++++++++++++++++ arch/s390/kernel/compat_linux.h | 9 ++------- arch/s390/kvm/priv.c | 1 - 4 files changed, 32 insertions(+), 32 deletions(-) diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index cdc7ae72529d..7d6fe813ac39 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -8,6 +8,7 @@ #include #include #include +#include #define compat_mode_t compat_mode_t typedef u16 compat_mode_t; @@ -22,32 +23,8 @@ typedef u16 compat_mode_t; (__force t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \ }) -#define PSW32_MASK_PER 0x40000000UL -#define PSW32_MASK_DAT 0x04000000UL -#define PSW32_MASK_IO 0x02000000UL -#define PSW32_MASK_EXT 0x01000000UL -#define PSW32_MASK_KEY 0x00F00000UL -#define PSW32_MASK_BASE 0x00080000UL /* Always one */ -#define PSW32_MASK_MCHECK 0x00040000UL -#define PSW32_MASK_WAIT 0x00020000UL -#define PSW32_MASK_PSTATE 0x00010000UL -#define PSW32_MASK_ASC 0x0000C000UL -#define PSW32_MASK_CC 0x00003000UL -#define PSW32_MASK_PM 0x00000f00UL -#define PSW32_MASK_RI 0x00000080UL - #define PSW32_MASK_USER 0x0000FF00UL -#define PSW32_ADDR_AMODE 0x80000000UL -#define PSW32_ADDR_INSN 0x7FFFFFFFUL - -#define PSW32_DEFAULT_KEY (((u32) PAGE_DEFAULT_ACC) << 20) - -#define PSW32_ASC_PRIMARY 0x00000000UL -#define PSW32_ASC_ACCREG 0x00004000UL -#define PSW32_ASC_SECONDARY 0x00008000UL -#define PSW32_ASC_HOME 0x0000C000UL - #define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \ PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \ diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index ddb70fb13fbc..8bae33ab320a 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -71,6 +71,35 @@ enum { &(*(struct psw_bits *)(&(__psw))); \ })) +#define PSW32_MASK_PER 0x40000000UL +#define PSW32_MASK_DAT 0x04000000UL +#define PSW32_MASK_IO 0x02000000UL +#define PSW32_MASK_EXT 0x01000000UL +#define PSW32_MASK_KEY 0x00F00000UL +#define PSW32_MASK_BASE 0x00080000UL /* Always one */ +#define PSW32_MASK_MCHECK 0x00040000UL +#define PSW32_MASK_WAIT 0x00020000UL +#define PSW32_MASK_PSTATE 0x00010000UL +#define PSW32_MASK_ASC 0x0000C000UL +#define PSW32_MASK_CC 0x00003000UL +#define PSW32_MASK_PM 0x00000f00UL +#define PSW32_MASK_RI 0x00000080UL + +#define PSW32_ADDR_AMODE 0x80000000UL +#define PSW32_ADDR_INSN 0x7FFFFFFFUL + +#define PSW32_DEFAULT_KEY (((u32)PAGE_DEFAULT_ACC) << 20) + +#define PSW32_ASC_PRIMARY 0x00000000UL +#define PSW32_ASC_ACCREG 0x00004000UL +#define PSW32_ASC_SECONDARY 0x00008000UL +#define PSW32_ASC_HOME 0x0000C000UL + +typedef struct { + unsigned int mask; + unsigned int addr; +} psw_t32 __aligned(8); + #define PGM_INT_CODE_MASK 0x7f #define PGM_INT_CODE_PER 0x80 diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h index 64509e7dbd3b..f46ca315631d 100644 --- a/arch/s390/kernel/compat_linux.h +++ b/arch/s390/kernel/compat_linux.h @@ -5,6 +5,7 @@ #include #include #include +#include /* Macro that masks the high order bit of an 32 bit pointer and converts it*/ /* to a 64 bit pointer */ @@ -32,15 +33,9 @@ typedef struct freg_t32 fprs[__NUM_FPRS]; } _s390_fp_regs32; -typedef struct -{ - __u32 mask; - __u32 addr; -} _psw_t32 __attribute__ ((aligned(8))); - typedef struct { - _psw_t32 psw; + psw_t32 psw; __u32 gprs[__NUM_GPRS]; __u32 acrs[__NUM_ACRS]; } _s390_regs_common32; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 5beb7a4a11b3..83bb5cf97282 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3 From 834979c27f5281f37ae9ce5191134f26ae7b9fd0 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 27 Apr 2022 04:14:34 +0200 Subject: s390/boot: convert initial lowcore to C Convert initial lowcore to C and use proper defines and structures to initialize it. This should make the z/VM ipl procedure a bit less magic. Acked-by: Peter Oberparleiter Reviewed-by: Vasily Gorbik Signed-off-by: Heiko Carstens --- arch/s390/boot/Makefile | 2 +- arch/s390/boot/boot.h | 6 +++- arch/s390/boot/head.S | 62 +++++--------------------------- arch/s390/boot/ipl_data.c | 84 ++++++++++++++++++++++++++++++++++++++++++++ arch/s390/boot/vmlinux.lds.S | 5 +++ 5 files changed, 103 insertions(+), 56 deletions(-) create mode 100644 arch/s390/boot/ipl_data.c diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index 14d66501e4a9..883357a211a3 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -37,7 +37,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o -obj-y += version.o pgm_check_info.o ctype.o +obj-y += version.o pgm_check_info.o ctype.o ipl_data.o obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h index 641ce0fc5c3e..70418389414d 100644 --- a/arch/s390/boot/boot.h +++ b/arch/s390/boot/boot.h @@ -2,9 +2,12 @@ #ifndef BOOT_BOOT_H #define BOOT_BOOT_H -#include #include +#define IPL_START 0x200 + +#ifndef __ASSEMBLY__ + void startup_kernel(void); unsigned long detect_memory(void); bool is_ipl_block_dump(void); @@ -31,4 +34,5 @@ extern char _stack_start[], _stack_end[]; unsigned long read_ipl_report(unsigned long safe_offset); +#endif /* __ASSEMBLY__ */ #endif /* BOOT_BOOT_H */ diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S index a63f7de76462..ceb118621eaa 100644 --- a/arch/s390/boot/head.S +++ b/arch/s390/boot/head.S @@ -27,61 +27,15 @@ #include #include #include +#include "boot.h" #define ARCH_OFFSET 4 #define EP_OFFSET 0x10008 #define EP_STRING "S390EP" - -#define IPL_START 0x200 +#define IPL_BS 0x730 __HEAD - -#define IPL_BS 0x730 - .org 0 - .long 0x00080000,0x80000000+IPL_START # The first 24 bytes are loaded - .long 0x02000018,0x60000050 # by ipl to addresses 0-23. - .long 0x02000068,0x60000050 # (a PSW and two CCWs). - .fill 80-24,1,0x40 # bytes 24-79 are discarded !! - .long 0x020000f0,0x60000050 # The next 160 byte are loaded - .long 0x02000140,0x60000050 # to addresses 0x18-0xb7 - .long 0x02000190,0x60000050 # They form the continuation - .long 0x020001e0,0x60000050 # of the CCW program started - .long 0x02000230,0x60000050 # by ipl and load the range - .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image - .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730 - .long 0x02000320,0x60000050 # in memory. At the end of - .long 0x02000370,0x60000050 # the channel program the PSW - .long 0x020003c0,0x60000050 # at location 0 is loaded. - .long 0x02000410,0x60000050 # Initial processing starts - .long 0x02000460,0x60000050 # at 0x200 = iplstart. - .long 0x020004b0,0x60000050 - .long 0x02000500,0x60000050 - .long 0x02000550,0x60000050 - .long 0x020005a0,0x60000050 - .long 0x020005f0,0x60000050 - .long 0x02000640,0x60000050 - .long 0x02000690,0x60000050 - .long 0x020006e0,0x20000050 - -# The restart psw points to ipl_entry, which allows to load a kernel image -# into memory and starting it by a psw restart on any cpu. -# All other default psw new locations contain a disabled wait psw where the -# address indicates which psw was loaded. - .org __LC_RST_NEW_PSW - .quad 0,IPL_START - .org __LC_EXT_NEW_PSW - .quad 0x0002000180000000,__LC_EXT_NEW_PSW - .org __LC_SVC_NEW_PSW - .quad 0x0002000180000000,__LC_SVC_NEW_PSW - .org __LC_PGM_NEW_PSW - .quad 0x0002000180000000,__LC_PGM_NEW_PSW - .org __LC_MCK_NEW_PSW - .quad 0x0002000180000000,__LC_MCK_NEW_PSW - .org __LC_IO_NEW_PSW - .quad 0x0002000180000000,__LC_IO_NEW_PSW - - .org IPL_START ipl_start: j .Liplcont # @@ -279,10 +233,10 @@ ipl_start: # this is called either by the ipl loader or directly by PSW restart # or linload or SALIPL # - .org STARTUP_NORMAL_OFFSET + .org STARTUP_NORMAL_OFFSET - IPL_START SYM_CODE_START(startup) j startup_normal - .org EP_OFFSET + .org EP_OFFSET - IPL_START # # This is a list of s390 kernel entry points. At address 0x1000f the number of # valid entry points is stored. @@ -294,7 +248,7 @@ SYM_CODE_START(startup) # # kdump startup-code, running in 64 bit absolute addressing mode # - .org STARTUP_KDUMP_OFFSET + .org STARTUP_KDUMP_OFFSET - IPL_START j startup_kdump SYM_CODE_END(startup) SYM_CODE_START_LOCAL(startup_normal) @@ -384,7 +338,7 @@ SYM_CODE_END(startup_pgm_check_handler) # params at 10400 (setup.h) # Must be keept in sync with struct parmarea in setup.h # - .org PARMAREA + .org PARMAREA - IPL_START SYM_DATA_START(parmarea) .quad 0 # IPL_DEVICE .quad 0 # INITRD_START @@ -394,8 +348,8 @@ SYM_DATA_START(parmarea) .quad kernel_version # points to kernel version string .quad COMMAND_LINE_SIZE - .org COMMAND_LINE + .org COMMAND_LINE - IPL_START .byte "root=/dev/ram0 ro" .byte 0 - .org PARMAREA+__PARMAREA_SIZE + .org PARMAREA+__PARMAREA_SIZE - IPL_START SYM_DATA_END(parmarea) diff --git a/arch/s390/boot/ipl_data.c b/arch/s390/boot/ipl_data.c new file mode 100644 index 000000000000..90749e3b2fa6 --- /dev/null +++ b/arch/s390/boot/ipl_data.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include "boot.h" + +#define CCW0(cmd, addr, cnt, flg) \ + { .cmd_code = cmd, .cda = addr, .count = cnt, .flags = flg, } + +#define PSW_MASK_DISABLED (PSW_MASK_WAIT | PSW_MASK_EA | PSW_MASK_BA) + +struct ipl_lowcore { + psw_t32 ipl_psw; /* 0x0000 */ + struct ccw0 ccwpgm[2]; /* 0x0008 */ + u8 fill[56]; /* 0x0018 */ + struct ccw0 ccwpgmcc[20]; /* 0x0050 */ + u8 pad_0xf0[0x01a0-0x00f0]; /* 0x00f0 */ + psw_t restart_psw; /* 0x01a0 */ + psw_t external_new_psw; /* 0x01b0 */ + psw_t svc_new_psw; /* 0x01c0 */ + psw_t program_new_psw; /* 0x01d0 */ + psw_t mcck_new_psw; /* 0x01e0 */ + psw_t io_new_psw; /* 0x01f0 */ +}; + +/* + * Initial lowcore for IPL: the first 24 bytes are loaded by IPL to + * addresses 0-23 (a PSW and two CCWs). Bytes 24-79 are discarded. + * The next 160 bytes are loaded to addresses 0x18-0xb7. They form + * the continuation of the CCW program started by IPL and load the + * range 0x0f0-0x730 from the image to the range 0x0f0-0x730 in + * memory. At the end of the channel program the PSW at location 0 is + * loaded. + * Initial processing starts at 0x200 = iplstart. + * + * The restart psw points to iplstart which allows to load a kernel + * image into memory and starting it by a psw restart on any cpu. All + * other default psw new locations contain a disabled wait psw where + * the address indicates which psw was loaded. + * + * Note that the 'file' utility can detect s390 kernel images. For + * that to succeed the two initial CCWs, and the 0x40 fill bytes must + * be present. + */ +struct ipl_lowcore ipl_lowcore __section(".ipldata") = { + .ipl_psw = { .mask = PSW32_MASK_BASE, .addr = PSW32_ADDR_AMODE | IPL_START }, + .ccwpgm = { + [ 0] = CCW0(CCW_CMD_READ_IPL, 0x018, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [ 1] = CCW0(CCW_CMD_READ_IPL, 0x068, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + }, + .fill = { + [ 0 ... 55] = 0x40, + }, + .ccwpgmcc = { + [ 0] = CCW0(CCW_CMD_READ_IPL, 0x0f0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [ 1] = CCW0(CCW_CMD_READ_IPL, 0x140, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [ 2] = CCW0(CCW_CMD_READ_IPL, 0x190, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [ 3] = CCW0(CCW_CMD_READ_IPL, 0x1e0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [ 4] = CCW0(CCW_CMD_READ_IPL, 0x230, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [ 5] = CCW0(CCW_CMD_READ_IPL, 0x280, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [ 6] = CCW0(CCW_CMD_READ_IPL, 0x2d0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [ 7] = CCW0(CCW_CMD_READ_IPL, 0x320, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [ 8] = CCW0(CCW_CMD_READ_IPL, 0x370, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [ 9] = CCW0(CCW_CMD_READ_IPL, 0x3c0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [10] = CCW0(CCW_CMD_READ_IPL, 0x410, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [11] = CCW0(CCW_CMD_READ_IPL, 0x460, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [12] = CCW0(CCW_CMD_READ_IPL, 0x4b0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [13] = CCW0(CCW_CMD_READ_IPL, 0x500, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [14] = CCW0(CCW_CMD_READ_IPL, 0x550, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [15] = CCW0(CCW_CMD_READ_IPL, 0x5a0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [16] = CCW0(CCW_CMD_READ_IPL, 0x5f0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [17] = CCW0(CCW_CMD_READ_IPL, 0x640, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [18] = CCW0(CCW_CMD_READ_IPL, 0x690, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), + [19] = CCW0(CCW_CMD_READ_IPL, 0x6e0, 0x50, CCW_FLAG_SLI), + }, + .restart_psw = { .mask = 0, .addr = IPL_START, }, + .external_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_EXT_NEW_PSW, }, + .svc_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_SVC_NEW_PSW, }, + .program_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_PGM_NEW_PSW, }, + .mcck_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_MCK_NEW_PSW, }, + .io_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_IO_NEW_PSW, }, +}; diff --git a/arch/s390/boot/vmlinux.lds.S b/arch/s390/boot/vmlinux.lds.S index 918e05137d4c..983f02dc985a 100644 --- a/arch/s390/boot/vmlinux.lds.S +++ b/arch/s390/boot/vmlinux.lds.S @@ -4,6 +4,7 @@ #include #include #include +#include "boot.h" OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") OUTPUT_ARCH(s390:64-bit) @@ -13,6 +14,10 @@ ENTRY(startup) SECTIONS { . = 0; + .ipldata : { + *(.ipldata) + } + . = IPL_START; .head.text : { _head = . ; HEAD_TEXT -- cgit v1.2.3 From f84d88ed3beb7fc2b4549e4c213ad428c0be9029 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 25 Apr 2022 21:24:56 +0200 Subject: s390/boot: convert parmarea to C Convert parmarea to C, which makes it much easier to initialize it. No need to keep offsets in assembler code in sync with struct parmarea anymore. Reviewed-by: Vasily Gorbik Signed-off-by: Heiko Carstens --- arch/s390/boot/head.S | 20 -------------------- arch/s390/boot/ipl_data.c | 2 +- arch/s390/boot/ipl_parm.c | 7 +++++++ arch/s390/boot/vmlinux.lds.S | 4 ++++ 4 files changed, 12 insertions(+), 21 deletions(-) diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S index ceb118621eaa..2ced90172680 100644 --- a/arch/s390/boot/head.S +++ b/arch/s390/boot/head.S @@ -333,23 +333,3 @@ SYM_CODE_START_LOCAL(startup_pgm_check_handler) lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8) lpswe __LC_RETURN_PSW # disabled wait SYM_CODE_END(startup_pgm_check_handler) - -# -# params at 10400 (setup.h) -# Must be keept in sync with struct parmarea in setup.h -# - .org PARMAREA - IPL_START -SYM_DATA_START(parmarea) - .quad 0 # IPL_DEVICE - .quad 0 # INITRD_START - .quad 0 # INITRD_SIZE - .quad 0 # OLDMEM_BASE - .quad 0 # OLDMEM_SIZE - .quad kernel_version # points to kernel version string - .quad COMMAND_LINE_SIZE - - .org COMMAND_LINE - IPL_START - .byte "root=/dev/ram0 ro" - .byte 0 - .org PARMAREA+__PARMAREA_SIZE - IPL_START -SYM_DATA_END(parmarea) diff --git a/arch/s390/boot/ipl_data.c b/arch/s390/boot/ipl_data.c index 90749e3b2fa6..0846e2b249c6 100644 --- a/arch/s390/boot/ipl_data.c +++ b/arch/s390/boot/ipl_data.c @@ -44,7 +44,7 @@ struct ipl_lowcore { * that to succeed the two initial CCWs, and the 0x40 fill bytes must * be present. */ -struct ipl_lowcore ipl_lowcore __section(".ipldata") = { +static struct ipl_lowcore ipl_lowcore __used __section(".ipldata") = { .ipl_psw = { .mask = PSW32_MASK_BASE, .addr = PSW32_ADDR_AMODE | IPL_START }, .ccwpgm = { [ 0] = CCW0(CCW_CMD_READ_IPL, 0x018, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c index 9ed7e29c81d9..ca78d6162245 100644 --- a/arch/s390/boot/ipl_parm.c +++ b/arch/s390/boot/ipl_parm.c @@ -8,9 +8,16 @@ #include #include #include +#include #include #include "boot.h" +struct parmarea parmarea __section(".parmarea") = { + .kernel_version = (unsigned long)kernel_version, + .max_command_line_size = COMMAND_LINE_SIZE, + .command_line = "root=/dev/ram0 ro", +}; + char __bootdata(early_command_line)[COMMAND_LINE_SIZE]; int __bootdata(noexec_disabled); diff --git a/arch/s390/boot/vmlinux.lds.S b/arch/s390/boot/vmlinux.lds.S index 983f02dc985a..af5c6860e0a1 100644 --- a/arch/s390/boot/vmlinux.lds.S +++ b/arch/s390/boot/vmlinux.lds.S @@ -23,6 +23,10 @@ SECTIONS HEAD_TEXT _ehead = . ; } + . = PARMAREA; + .parmarea : { + *(.parmarea) + } .text : { _text = .; /* Text */ *(.text) -- cgit v1.2.3 From 29b06ad7e8a69ad1cbfe7898aef1d9cb7fbd02a5 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 3 May 2022 14:59:16 +0200 Subject: s390/entry: remove broken and not needed code LLVM's integrated assembler reports the following error when compiling entry.S: :38:5: error: unknown token in expression tm %r8,0x0001 # coming from user space? The correct instruction would have been tmhh instead of tm. The current code is doing nothing, since (with gas) it get's translated to a tm instruction which reads from real address 8, which again contains always zero, and therefore the conditional code is never executed. Note that due to the missing displacement gas translates "%r8" into "8(%r0)". Also code inspection reveals that this conditional code is not needed. Therefore remove it. Reviewed-by: Sven Schnelle Reviewed-by: Alexander Gordeev Signed-off-by: Heiko Carstens --- arch/s390/kernel/entry.S | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 59b69c8ab5e1..a6008e58631b 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -473,10 +473,7 @@ ENTRY(\name) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC MBEAR %r11 stmg %r8,%r9,__PT_PSW(%r11) - tm %r8,0x0001 # coming from user space? - jno 1f - lctlg %c1,%c1,__LC_KERNEL_ASCE -1: lgr %r2,%r11 # pass pointer to pt_regs + lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,\handler mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) tmhh %r8,0x0001 # returning to user ? -- cgit v1.2.3 From fcdc03f78d5c8db53ba090e38474b05113d34ade Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 4 May 2022 15:23:39 +0200 Subject: s390/compat: cleanup compat_linux.h header file Remove various declarations from former s390 specific compat system calls which have been removed with commit fef747bab3c0 ("s390: use generic UID16 implementation"). While at it clean up the whole small header file. Signed-off-by: Heiko Carstens --- arch/s390/kernel/compat_linux.h | 80 +++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 52 deletions(-) diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h index f46ca315631d..ef23739b277c 100644 --- a/arch/s390/kernel/compat_linux.h +++ b/arch/s390/kernel/compat_linux.h @@ -7,62 +7,57 @@ #include #include -/* Macro that masks the high order bit of an 32 bit pointer and converts it*/ -/* to a 64 bit pointer */ -#define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL)) -#define AA(__x) \ - ((unsigned long)(__x)) +/* + * Macro that masks the high order bit of a 32 bit pointer and + * converts it to a 64 bit pointer. + */ +#define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL)) +#define AA(__x) ((unsigned long)(__x)) /* Now 32bit compatibility types */ struct ipc_kludge_32 { - __u32 msgp; /* pointer */ - __s32 msgtyp; + __u32 msgp; /* pointer */ + __s32 msgtyp; }; /* asm/sigcontext.h */ -typedef union -{ - __u64 d; - __u32 f; +typedef union { + __u64 d; + __u32 f; } freg_t32; -typedef struct -{ +typedef struct { unsigned int fpc; unsigned int pad; - freg_t32 fprs[__NUM_FPRS]; + freg_t32 fprs[__NUM_FPRS]; } _s390_fp_regs32; -typedef struct -{ +typedef struct { psw_t32 psw; __u32 gprs[__NUM_GPRS]; __u32 acrs[__NUM_ACRS]; } _s390_regs_common32; -typedef struct -{ +typedef struct { _s390_regs_common32 regs; - _s390_fp_regs32 fpregs; + _s390_fp_regs32 fpregs; } _sigregs32; -typedef struct -{ - __u32 gprs_high[__NUM_GPRS]; - __u64 vxrs_low[__NUM_VXRS_LOW]; - __vector128 vxrs_high[__NUM_VXRS_HIGH]; - __u8 __reserved[128]; +typedef struct { + __u32 gprs_high[__NUM_GPRS]; + __u64 vxrs_low[__NUM_VXRS_LOW]; + __vector128 vxrs_high[__NUM_VXRS_HIGH]; + __u8 __reserved[128]; } _sigregs_ext32; #define _SIGCONTEXT_NSIG32 64 #define _SIGCONTEXT_NSIG_BPW32 32 #define __SIGNAL_FRAMESIZE32 96 -#define _SIGMASK_COPY_SIZE32 (sizeof(u32)*2) +#define _SIGMASK_COPY_SIZE32 (sizeof(u32) * 2) -struct sigcontext32 -{ +struct sigcontext32 { __u32 oldmask[_COMPAT_NSIG_WORDS]; - __u32 sregs; /* pointer */ + __u32 sregs; /* pointer */ }; /* asm/signal.h */ @@ -70,11 +65,11 @@ struct sigcontext32 /* asm/ucontext.h */ struct ucontext32 { __u32 uc_flags; - __u32 uc_link; /* pointer */ + __u32 uc_link; /* pointer */ compat_stack_t uc_stack; _sigregs32 uc_mcontext; compat_sigset_t uc_sigmask; - /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ + /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ unsigned char __unused[128 - sizeof(compat_sigset_t)]; _sigregs_ext32 uc_mcontext_ext; }; @@ -83,25 +78,6 @@ struct stat64_emu31; struct mmap_arg_struct_emu31; struct fadvise64_64_args; -long compat_sys_s390_chown16(const char __user *filename, u16 user, u16 group); -long compat_sys_s390_lchown16(const char __user *filename, u16 user, u16 group); -long compat_sys_s390_fchown16(unsigned int fd, u16 user, u16 group); -long compat_sys_s390_setregid16(u16 rgid, u16 egid); -long compat_sys_s390_setgid16(u16 gid); -long compat_sys_s390_setreuid16(u16 ruid, u16 euid); -long compat_sys_s390_setuid16(u16 uid); -long compat_sys_s390_setresuid16(u16 ruid, u16 euid, u16 suid); -long compat_sys_s390_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid); -long compat_sys_s390_setresgid16(u16 rgid, u16 egid, u16 sgid); -long compat_sys_s390_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid); -long compat_sys_s390_setfsuid16(u16 uid); -long compat_sys_s390_setfsgid16(u16 gid); -long compat_sys_s390_getgroups16(int gidsetsize, u16 __user *grouplist); -long compat_sys_s390_setgroups16(int gidsetsize, u16 __user *grouplist); -long compat_sys_s390_getuid16(void); -long compat_sys_s390_geteuid16(void); -long compat_sys_s390_getgid16(void); -long compat_sys_s390_getegid16(void); long compat_sys_s390_truncate64(const char __user *path, u32 high, u32 low); long compat_sys_s390_ftruncate64(unsigned int fd, u32 high, u32 low); long compat_sys_s390_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, u32 high, u32 low); @@ -113,8 +89,8 @@ long compat_sys_s390_fstat64(unsigned int fd, struct stat64_emu31 __user *statbu long compat_sys_s390_fstatat64(unsigned int dfd, const char __user *filename, struct stat64_emu31 __user *statbuf, int flag); long compat_sys_s390_old_mmap(struct mmap_arg_struct_emu31 __user *arg); long compat_sys_s390_mmap2(struct mmap_arg_struct_emu31 __user *arg); -long compat_sys_s390_read(unsigned int fd, char __user * buf, compat_size_t count); -long compat_sys_s390_write(unsigned int fd, const char __user * buf, compat_size_t count); +long compat_sys_s390_read(unsigned int fd, char __user *buf, compat_size_t count); +long compat_sys_s390_write(unsigned int fd, const char __user *buf, compat_size_t count); long compat_sys_s390_fadvise64(int fd, u32 high, u32 low, compat_size_t len, int advise); long compat_sys_s390_fadvise64_64(struct fadvise64_64_args __user *args); long compat_sys_s390_sync_file_range(int fd, u32 offhigh, u32 offlow, u32 nhigh, u32 nlow, unsigned int flags); -- cgit v1.2.3 From 6d97af487dee3176cb1342d4ab16637e495440ad Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Wed, 4 May 2022 08:23:50 +0200 Subject: entry: Rename arch_check_user_regs() to arch_enter_from_user_mode() arch_check_user_regs() is used at the moment to verify that struct pt_regs contains valid values when entering the kernel from userspace. s390 needs a place in the generic entry code to modify a cpu data structure when switching from userspace to kernel mode. As arch_check_user_regs() is exactly this, rename it to arch_enter_from_user_mode(). When entering the kernel from userspace, arch_check_user_regs() is used to verify that struct pt_regs contains valid values. Note that the NMI codepath doesn't call this function. s390 needs a place in the generic entry code to modify a cpu data structure when switching from userspace to kernel mode. As arch_check_user_regs() is exactly this, rename it to arch_enter_from_user_mode(). Signed-off-by: Sven Schnelle Reviewed-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Link: https://lore.kernel.org/r/20220504062351.2954280-2-tmricht@linux.ibm.com Signed-off-by: Heiko Carstens --- arch/s390/include/asm/entry-common.h | 4 ++-- arch/x86/include/asm/entry-common.h | 4 ++-- include/linux/entry-common.h | 8 ++++---- kernel/entry/common.c | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h index 2f0a1cacdf85..99d654ccd3db 100644 --- a/arch/s390/include/asm/entry-common.h +++ b/arch/s390/include/asm/entry-common.h @@ -15,12 +15,12 @@ void do_per_trap(struct pt_regs *regs); #ifdef CONFIG_DEBUG_ENTRY -static __always_inline void arch_check_user_regs(struct pt_regs *regs) +static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) { debug_user_asce(0); } -#define arch_check_user_regs arch_check_user_regs +#define arch_enter_from_user_mode arch_enter_from_user_mode #endif /* CONFIG_DEBUG_ENTRY */ static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs, diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h index 43184640b579..674ed46d3ced 100644 --- a/arch/x86/include/asm/entry-common.h +++ b/arch/x86/include/asm/entry-common.h @@ -10,7 +10,7 @@ #include /* Check that the stack and regs on entry from user mode are sane. */ -static __always_inline void arch_check_user_regs(struct pt_regs *regs) +static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) { if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) { /* @@ -42,7 +42,7 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs) WARN_ON_ONCE(regs != task_pt_regs(current)); } } -#define arch_check_user_regs arch_check_user_regs +#define arch_enter_from_user_mode arch_enter_from_user_mode static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, unsigned long ti_work) diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index ab78bd4c2eb0..c92ac75d6556 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -63,7 +63,7 @@ ARCH_EXIT_TO_USER_MODE_WORK) /** - * arch_check_user_regs - Architecture specific sanity check for user mode regs + * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs * @regs: Pointer to currents pt_regs * * Defaults to an empty implementation. Can be replaced by architecture @@ -73,10 +73,10 @@ * section. Use __always_inline so the compiler cannot push it out of line * and make it instrumentable. */ -static __always_inline void arch_check_user_regs(struct pt_regs *regs); +static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs); -#ifndef arch_check_user_regs -static __always_inline void arch_check_user_regs(struct pt_regs *regs) {} +#ifndef arch_enter_from_user_mode +static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {} #endif /** diff --git a/kernel/entry/common.c b/kernel/entry/common.c index 93c3b86e781c..9e63923c5a0f 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -17,7 +17,7 @@ /* See comment for enter_from_user_mode() in entry-common.h */ static __always_inline void __enter_from_user_mode(struct pt_regs *regs) { - arch_check_user_regs(regs); + arch_enter_from_user_mode(regs); lockdep_hardirqs_off(CALLER_ADDR0); CT_WARN_ON(ct_state() != CONTEXT_USER); -- cgit v1.2.3 From 39d62336f5c126ad6dccdf66cd249f2d0e86d3c9 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Wed, 4 May 2022 08:23:51 +0200 Subject: s390/pai: add support for cryptography counters PMU device driver perf_pai_crypto supports Processor Activity Instrumentation (PAI), available with IBM z16: - maps a full page to lowcore address 0x1500. - uses CR0 bit 13 to turn PAI crypto counting on and off. - creates a sample with raw data on each context switch out when at context switch some mapped counters have a value of nonzero. This device driver only supports CPU wide context, no task context is allowed. Support for counting: - one or more counters can be specified using perf stat -e pai_crypto/xxx/ where xxx stands for the counter event name. Multiple invocation of this command is possible. The counter names are listed in /sys/devices/pai_crypto/events directory. - one special counters can be specified using perf stat -e pai_crypto/CRYPTO_ALL/ which returns the sum of all incremented crypto counters. - one event pai_crypto/CRYPTO_ALL/ is reserved for sampling. No multiple invocations are possible. The event collects data at context switch out and saves them in the ring buffer. Add qpaci assembly instruction to query supported memory mapped crypto counters. It returns the number of counters (no holes allowed in that range). The PAI crypto counter events are system wide and can not be executed in parallel. Therefore some restrictions documented in function paicrypt_busy apply. In particular event CRYPTO_ALL for sampling must run exclusive. Only counting events can run in parallel. PAI crypto counter events can not be created when a CPU hot plug add is processed. This means a CPU hot plug add does not get the necessary PAI event to record PAI cryptography counter increments on the newly added CPU. CPU hot plug remove removes the event and terminates the counting of PAI counters immediately. Co-developed-by: Sven Schnelle Signed-off-by: Sven Schnelle Reviewed-by: Juergen Christ Signed-off-by: Thomas Richter Link: https://lore.kernel.org/r/20220504062351.2954280-3-tmricht@linux.ibm.com Signed-off-by: Heiko Carstens --- arch/s390/include/asm/ctl_reg.h | 4 +- arch/s390/include/asm/entry-common.h | 10 +- arch/s390/include/asm/lowcore.h | 5 +- arch/s390/include/asm/nmi.h | 2 +- arch/s390/include/asm/pai.h | 74 ++++ arch/s390/kernel/Makefile | 1 + arch/s390/kernel/entry.S | 1 + arch/s390/kernel/nmi.c | 6 +- arch/s390/kernel/perf_pai_crypto.c | 688 +++++++++++++++++++++++++++++++++++ 9 files changed, 784 insertions(+), 7 deletions(-) create mode 100644 arch/s390/include/asm/pai.h create mode 100644 arch/s390/kernel/perf_pai_crypto.c diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 82388da3f95f..267a8f88e143 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -93,7 +93,9 @@ union ctlreg0 { unsigned long tcx : 1; /* Transactional-Execution control */ unsigned long pifo : 1; /* Transactional-Execution Program- Interruption-Filtering Override */ - unsigned long : 22; + unsigned long : 3; + unsigned long ccc : 1; /* Cryptography counter control */ + unsigned long : 18; unsigned long : 3; unsigned long lap : 1; /* Low-address-protection control */ unsigned long : 4; diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h index 99d654ccd3db..000de2b1e67a 100644 --- a/arch/s390/include/asm/entry-common.h +++ b/arch/s390/include/asm/entry-common.h @@ -9,19 +9,21 @@ #include #include #include +#include #define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP) void do_per_trap(struct pt_regs *regs); -#ifdef CONFIG_DEBUG_ENTRY static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) { - debug_user_asce(0); + if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) + debug_user_asce(0); + + pai_kernel_enter(regs); } #define arch_enter_from_user_mode arch_enter_from_user_mode -#endif /* CONFIG_DEBUG_ENTRY */ static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs, unsigned long ti_work) @@ -44,6 +46,8 @@ static __always_inline void arch_exit_to_user_mode(void) if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) debug_user_asce(1); + + pai_kernel_exit(current_pt_regs()); } #define arch_exit_to_user_mode arch_exit_to_user_mode diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 56002aeacabf..26fe5e535728 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -200,7 +200,10 @@ struct lowcore { __u64 last_break_save_area; /* 0x1338 */ __u32 access_regs_save_area[16]; /* 0x1340 */ __u64 cregs_save_area[16]; /* 0x1380 */ - __u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */ + __u8 pad_0x1400[0x1500-0x1400]; /* 0x1400 */ + /* Cryptography-counter designation */ + __u64 ccd; /* 0x1500 */ + __u8 pad_0x1508[0x1800-0x1508]; /* 0x1508 */ /* Transaction abort diagnostic block */ struct pgm_tdb pgm_tdb; /* 0x1800 */ diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h index 292083083830..af1cd3a6f406 100644 --- a/arch/s390/include/asm/nmi.h +++ b/arch/s390/include/asm/nmi.h @@ -101,7 +101,7 @@ void nmi_alloc_mcesa_early(u64 *mcesad); int nmi_alloc_mcesa(u64 *mcesad); void nmi_free_mcesa(u64 *mcesad); -void s390_handle_mcck(void); +void s390_handle_mcck(struct pt_regs *regs); void __s390_handle_mcck(void); int s390_do_machine_check(struct pt_regs *regs); diff --git a/arch/s390/include/asm/pai.h b/arch/s390/include/asm/pai.h new file mode 100644 index 000000000000..5b7e33ac6f0b --- /dev/null +++ b/arch/s390/include/asm/pai.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Processor Activity Instrumentation support for cryptography counters + * + * Copyright IBM Corp. 2022 + * Author(s): Thomas Richter + */ +#ifndef _ASM_S390_PAI_H +#define _ASM_S390_PAI_H + +#include +#include +#include + +struct qpaci_info_block { + u64 header; + struct { + u64 : 8; + u64 num_cc : 8; /* # of supported crypto counters */ + u64 : 48; + }; +}; + +static inline int qpaci(struct qpaci_info_block *info) +{ + /* Size of info (in double words minus one) */ + size_t size = sizeof(*info) / sizeof(u64) - 1; + int cc; + + asm volatile( + " lgr 0,%[size]\n" + " .insn s,0xb28f0000,%[info]\n" + " lgr %[size],0\n" + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=d" (cc), [info] "=Q" (*info), [size] "+&d" (size) + : + : "0", "cc", "memory"); + return cc ? (size + 1) * sizeof(u64) : 0; +} + +#define PAI_CRYPTO_BASE 0x1000 /* First event number */ +#define PAI_CRYPTO_MAXCTR 256 /* Max # of event counters */ +#define PAI_CRYPTO_KERNEL_OFFSET 2048 + +DECLARE_STATIC_KEY_FALSE(pai_key); + +static __always_inline void pai_kernel_enter(struct pt_regs *regs) +{ + if (!IS_ENABLED(CONFIG_PERF_EVENTS)) + return; + if (!static_branch_unlikely(&pai_key)) + return; + if (!S390_lowcore.ccd) + return; + if (!user_mode(regs)) + return; + WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd | PAI_CRYPTO_KERNEL_OFFSET); +} + +static __always_inline void pai_kernel_exit(struct pt_regs *regs) +{ + if (!IS_ENABLED(CONFIG_PERF_EVENTS)) + return; + if (!static_branch_unlikely(&pai_key)) + return; + if (!S390_lowcore.ccd) + return; + if (!user_mode(regs)) + return; + WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET); +} + +#endif diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index c8d1b6aa823e..5851041bb214 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -72,6 +72,7 @@ obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_arch.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o +obj-$(CONFIG_PERF_EVENTS) += perf_pai_crypto.o obj-$(CONFIG_TRACEPOINTS) += trace.o obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index a6008e58631b..685ccec02a27 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -599,6 +599,7 @@ ENTRY(mcck_int_handler) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) + lgr %r2,%r11 lgr %r15,%r1 brasl %r14,s390_handle_mcck .Lmcck_return: diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index fc60e29b8690..53ed3884fe64 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -29,6 +29,8 @@ #include #include #include +#include + #include struct mcck_struct { @@ -169,10 +171,12 @@ void __s390_handle_mcck(void) } } -void noinstr s390_handle_mcck(void) +void noinstr s390_handle_mcck(struct pt_regs *regs) { trace_hardirqs_off(); + pai_kernel_enter(regs); __s390_handle_mcck(); + pai_kernel_exit(regs); trace_hardirqs_on(); } /* diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c new file mode 100644 index 000000000000..8c1545946d85 --- /dev/null +++ b/arch/s390/kernel/perf_pai_crypto.c @@ -0,0 +1,688 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance event support - Processor Activity Instrumentation Facility + * + * Copyright IBM Corp. 2022 + * Author(s): Thomas Richter + */ +#define KMSG_COMPONENT "pai_crypto" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static debug_info_t *cfm_dbg; +static unsigned int paicrypt_cnt; /* Size of the mapped counter sets */ + /* extracted with QPACI instruction */ + +DEFINE_STATIC_KEY_FALSE(pai_key); + +struct pai_userdata { + u16 num; + u64 value; +} __packed; + +struct paicrypt_map { + unsigned long *page; /* Page for CPU to store counters */ + struct pai_userdata *save; /* Page to store no-zero counters */ + unsigned int users; /* # of PAI crypto users */ + unsigned int sampler; /* # of PAI crypto samplers */ + unsigned int counter; /* # of PAI crypto counters */ + struct perf_event *event; /* Perf event for sampling */ +}; + +static DEFINE_PER_CPU(struct paicrypt_map, paicrypt_map); + +/* Release the PMU if event is the last perf event */ +static DEFINE_MUTEX(pai_reserve_mutex); + +/* Adjust usage counters and remove allocated memory when all users are + * gone. + */ +static void paicrypt_event_destroy(struct perf_event *event) +{ + struct paicrypt_map *cpump = per_cpu_ptr(&paicrypt_map, event->cpu); + + cpump->event = NULL; + static_branch_dec(&pai_key); + mutex_lock(&pai_reserve_mutex); + if (event->attr.sample_period) + cpump->sampler -= 1; + else + cpump->counter -= 1; + debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d" + " sampler %d counter %d\n", __func__, + event->attr.config, event->cpu, cpump->sampler, + cpump->counter); + if (!cpump->counter && !cpump->sampler) { + debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n", + __func__, (unsigned long)cpump->page, + cpump->save); + free_page((unsigned long)cpump->page); + cpump->page = NULL; + kvfree(cpump->save); + cpump->save = NULL; + } + mutex_unlock(&pai_reserve_mutex); +} + +static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel) +{ + if (kernel) + nr += PAI_CRYPTO_MAXCTR; + return cpump->page[nr]; +} + +/* Read the counter values. Return value from location in CMP. For event + * CRYPTO_ALL sum up all events. + */ +static u64 paicrypt_getdata(struct perf_event *event, bool kernel) +{ + struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map); + u64 sum = 0; + int i; + + if (event->attr.config != PAI_CRYPTO_BASE) { + return paicrypt_getctr(cpump, + event->attr.config - PAI_CRYPTO_BASE, + kernel); + } + + for (i = 1; i <= paicrypt_cnt; i++) { + u64 val = paicrypt_getctr(cpump, i, kernel); + + if (!val) + continue; + sum += val; + } + return sum; +} + +static u64 paicrypt_getall(struct perf_event *event) +{ + u64 sum = 0; + + if (!event->attr.exclude_kernel) + sum += paicrypt_getdata(event, true); + if (!event->attr.exclude_user) + sum += paicrypt_getdata(event, false); + + return sum; +} + +/* Used to avoid races in checking concurrent access of counting and + * sampling for crypto events + * + * Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is + * allowed and when this event is running, no counting event is allowed. + * Several counting events are allowed in parallel, but no sampling event + * is allowed while one (or more) counting events are running. + * + * This function is called in process context and it is save to block. + * When the event initialization functions fails, no other call back will + * be invoked. + * + * Allocate the memory for the event. + */ +static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump) +{ + unsigned int *use_ptr; + int rc = 0; + + mutex_lock(&pai_reserve_mutex); + if (a->sample_period) { /* Sampling requested */ + use_ptr = &cpump->sampler; + if (cpump->counter || cpump->sampler) + rc = -EBUSY; /* ... sampling/counting active */ + } else { /* Counting requested */ + use_ptr = &cpump->counter; + if (cpump->sampler) + rc = -EBUSY; /* ... and sampling active */ + } + if (rc) + goto unlock; + + /* Allocate memory for counter page and counter extraction. + * Only the first counting event has to allocate a page. + */ + if (cpump->page) + goto unlock; + + rc = -ENOMEM; + cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL); + if (!cpump->page) + goto unlock; + cpump->save = kvmalloc_array(paicrypt_cnt + 1, + sizeof(struct pai_userdata), GFP_KERNEL); + if (!cpump->save) { + free_page((unsigned long)cpump->page); + cpump->page = NULL; + goto unlock; + } + rc = 0; + +unlock: + /* If rc is non-zero, do not increment counter/sampler. */ + if (!rc) + *use_ptr += 1; + debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx sampler %d" + " counter %d page %#lx save %p rc %d\n", __func__, + a->sample_period, cpump->sampler, cpump->counter, + (unsigned long)cpump->page, cpump->save, rc); + mutex_unlock(&pai_reserve_mutex); + return rc; +} + +/* Might be called on different CPU than the one the event is intended for. */ +static int paicrypt_event_init(struct perf_event *event) +{ + struct perf_event_attr *a = &event->attr; + struct paicrypt_map *cpump; + int rc; + + /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */ + if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type) + return -ENOENT; + /* PAI crypto event must be valid */ + if (a->config > PAI_CRYPTO_BASE + paicrypt_cnt) + return -EINVAL; + /* Allow only CPU wide operation, no process context for now. */ + if (event->hw.target || event->cpu == -1) + return -ENOENT; + /* Allow only CRYPTO_ALL for sampling. */ + if (a->sample_period && a->config != PAI_CRYPTO_BASE) + return -EINVAL; + + cpump = per_cpu_ptr(&paicrypt_map, event->cpu); + rc = paicrypt_busy(a, cpump); + if (rc) + return rc; + + cpump->event = event; + event->destroy = paicrypt_event_destroy; + + if (a->sample_period) { + a->sample_period = 1; + a->freq = 0; + /* Register for paicrypt_sched_task() to be called */ + event->attach_state |= PERF_ATTACH_SCHED_CB; + /* Add raw data which contain the memory mapped counters */ + a->sample_type |= PERF_SAMPLE_RAW; + /* Turn off inheritance */ + a->inherit = 0; + } + + static_branch_inc(&pai_key); + return 0; +} + +static void paicrypt_read(struct perf_event *event) +{ + u64 prev, new, delta; + + prev = local64_read(&event->hw.prev_count); + new = paicrypt_getall(event); + local64_set(&event->hw.prev_count, new); + delta = (prev <= new) ? new - prev + : (-1ULL - prev) + new + 1; /* overflow */ + local64_add(delta, &event->count); +} + +static void paicrypt_start(struct perf_event *event, int flags) +{ + u64 sum; + + sum = paicrypt_getall(event); /* Get current value */ + local64_set(&event->hw.prev_count, sum); + local64_set(&event->count, 0); +} + +static int paicrypt_add(struct perf_event *event, int flags) +{ + struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map); + unsigned long ccd; + + if (cpump->users++ == 0) { + ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET; + WRITE_ONCE(S390_lowcore.ccd, ccd); + __ctl_set_bit(0, 50); + } + cpump->event = event; + if (flags & PERF_EF_START && !event->attr.sample_period) { + /* Only counting needs initial counter value */ + paicrypt_start(event, PERF_EF_RELOAD); + } + event->hw.state = 0; + if (event->attr.sample_period) + perf_sched_cb_inc(event->pmu); + return 0; +} + +static void paicrypt_stop(struct perf_event *event, int flags) +{ + paicrypt_read(event); + event->hw.state = PERF_HES_STOPPED; +} + +static void paicrypt_del(struct perf_event *event, int flags) +{ + struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map); + + if (event->attr.sample_period) + perf_sched_cb_dec(event->pmu); + if (!event->attr.sample_period) + /* Only counting needs to read counter */ + paicrypt_stop(event, PERF_EF_UPDATE); + if (cpump->users-- == 1) { + __ctl_clear_bit(0, 50); + WRITE_ONCE(S390_lowcore.ccd, 0); + } +} + +/* Create raw data and save it in buffer. Returns number of bytes copied. + * Saves only positive counter entries of the form + * 2 bytes: Number of counter + * 8 bytes: Value of counter + */ +static size_t paicrypt_copy(struct pai_userdata *userdata, + struct paicrypt_map *cpump, + bool exclude_user, bool exclude_kernel) +{ + int i, outidx = 0; + + for (i = 1; i <= paicrypt_cnt; i++) { + u64 val = 0; + + if (!exclude_kernel) + val += paicrypt_getctr(cpump, i, true); + if (!exclude_user) + val += paicrypt_getctr(cpump, i, false); + if (val) { + userdata[outidx].num = i; + userdata[outidx].value = val; + outidx++; + } + } + return outidx * sizeof(struct pai_userdata); +} + +static int paicrypt_push_sample(void) +{ + struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map); + struct perf_event *event = cpump->event; + struct perf_sample_data data; + struct perf_raw_record raw; + struct pt_regs regs; + size_t rawsize; + int overflow; + + if (!cpump->event) /* No event active */ + return 0; + rawsize = paicrypt_copy(cpump->save, cpump, + cpump->event->attr.exclude_user, + cpump->event->attr.exclude_kernel); + if (!rawsize) /* No incremented counters */ + return 0; + + /* Setup perf sample */ + memset(®s, 0, sizeof(regs)); + memset(&raw, 0, sizeof(raw)); + memset(&data, 0, sizeof(data)); + perf_sample_data_init(&data, 0, event->hw.last_period); + if (event->attr.sample_type & PERF_SAMPLE_TID) { + data.tid_entry.pid = task_tgid_nr(current); + data.tid_entry.tid = task_pid_nr(current); + } + if (event->attr.sample_type & PERF_SAMPLE_TIME) + data.time = event->clock(); + if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) + data.id = event->id; + if (event->attr.sample_type & PERF_SAMPLE_CPU) { + data.cpu_entry.cpu = smp_processor_id(); + data.cpu_entry.reserved = 0; + } + if (event->attr.sample_type & PERF_SAMPLE_RAW) { + raw.frag.size = rawsize; + raw.frag.data = cpump->save; + raw.size = raw.frag.size; + data.raw = &raw; + } + + overflow = perf_event_overflow(event, &data, ®s); + perf_event_update_userpage(event); + /* Clear lowcore page after read */ + memset(cpump->page, 0, PAGE_SIZE); + return overflow; +} + +/* Called on schedule-in and schedule-out. No access to event structure, + * but for sampling only event CRYPTO_ALL is allowed. + */ +static void paicrypt_sched_task(struct perf_event_context *ctx, bool sched_in) +{ + /* We started with a clean page on event installation. So read out + * results on schedule_out and if page was dirty, clear values. + */ + if (!sched_in) + paicrypt_push_sample(); +} + +/* Attribute definitions for paicrypt interface. As with other CPU + * Measurement Facilities, there is one attribute per mapped counter. + * The number of mapped counters may vary per machine generation. Use + * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction + * to determine the number of mapped counters. The instructions returns + * a positive number, which is the highest number of supported counters. + * All counters less than this number are also supported, there are no + * holes. A returned number of zero means no support for mapped counters. + * + * The identification of the counter is a unique number. The chosen range + * is 0x1000 + offset in mapped kernel page. + * All CPU Measurement Facility counters identifiers must be unique and + * the numbers from 0 to 496 are already used for the CPU Measurement + * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already + * used for the CPU Measurement Sampling facility. + */ +PMU_FORMAT_ATTR(event, "config:0-63"); + +static struct attribute *paicrypt_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group paicrypt_events_group = { + .name = "events", + .attrs = NULL /* Filled in attr_event_init() */ +}; + +static struct attribute_group paicrypt_format_group = { + .name = "format", + .attrs = paicrypt_format_attr, +}; + +static const struct attribute_group *paicrypt_attr_groups[] = { + &paicrypt_events_group, + &paicrypt_format_group, + NULL, +}; + +/* Performance monitoring unit for mapped counters */ +static struct pmu paicrypt = { + .task_ctx_nr = perf_invalid_context, + .event_init = paicrypt_event_init, + .add = paicrypt_add, + .del = paicrypt_del, + .start = paicrypt_start, + .stop = paicrypt_stop, + .read = paicrypt_read, + .sched_task = paicrypt_sched_task, + .attr_groups = paicrypt_attr_groups +}; + +/* List of symbolic PAI counter names. */ +static const char * const paicrypt_ctrnames[] = { + [0] = "CRYPTO_ALL", + [1] = "KM_DEA", + [2] = "KM_TDEA_128", + [3] = "KM_TDEA_192", + [4] = "KM_ENCRYPTED_DEA", + [5] = "KM_ENCRYPTED_TDEA_128", + [6] = "KM_ENCRYPTED_TDEA_192", + [7] = "KM_AES_128", + [8] = "KM_AES_192", + [9] = "KM_AES_256", + [10] = "KM_ENCRYPTED_AES_128", + [11] = "KM_ENCRYPTED_AES_192", + [12] = "KM_ENCRYPTED_AES_256", + [13] = "KM_XTS_AES_128", + [14] = "KM_XTS_AES_256", + [15] = "KM_XTS_ENCRYPTED_AES_128", + [16] = "KM_XTS_ENCRYPTED_AES_256", + [17] = "KMC_DEA", + [18] = "KMC_TDEA_128", + [19] = "KMC_TDEA_192", + [20] = "KMC_ENCRYPTED_DEA", + [21] = "KMC_ENCRYPTED_TDEA_128", + [22] = "KMC_ENCRYPTED_TDEA_192", + [23] = "KMC_AES_128", + [24] = "KMC_AES_192", + [25] = "KMC_AES_256", + [26] = "KMC_ENCRYPTED_AES_128", + [27] = "KMC_ENCRYPTED_AES_192", + [28] = "KMC_ENCRYPTED_AES_256", + [29] = "KMC_PRNG", + [30] = "KMA_GCM_AES_128", + [31] = "KMA_GCM_AES_192", + [32] = "KMA_GCM_AES_256", + [33] = "KMA_GCM_ENCRYPTED_AES_128", + [34] = "KMA_GCM_ENCRYPTED_AES_192", + [35] = "KMA_GCM_ENCRYPTED_AES_256", + [36] = "KMF_DEA", + [37] = "KMF_TDEA_128", + [38] = "KMF_TDEA_192", + [39] = "KMF_ENCRYPTED_DEA", + [40] = "KMF_ENCRYPTED_TDEA_128", + [41] = "KMF_ENCRYPTED_TDEA_192", + [42] = "KMF_AES_128", + [43] = "KMF_AES_192", + [44] = "KMF_AES_256", + [45] = "KMF_ENCRYPTED_AES_128", + [46] = "KMF_ENCRYPTED_AES_192", + [47] = "KMF_ENCRYPTED_AES_256", + [48] = "KMCTR_DEA", + [49] = "KMCTR_TDEA_128", + [50] = "KMCTR_TDEA_192", + [51] = "KMCTR_ENCRYPTED_DEA", + [52] = "KMCTR_ENCRYPTED_TDEA_128", + [53] = "KMCTR_ENCRYPTED_TDEA_192", + [54] = "KMCTR_AES_128", + [55] = "KMCTR_AES_192", + [56] = "KMCTR_AES_256", + [57] = "KMCTR_ENCRYPTED_AES_128", + [58] = "KMCTR_ENCRYPTED_AES_192", + [59] = "KMCTR_ENCRYPTED_AES_256", + [60] = "KMO_DEA", + [61] = "KMO_TDEA_128", + [62] = "KMO_TDEA_192", + [63] = "KMO_ENCRYPTED_DEA", + [64] = "KMO_ENCRYPTED_TDEA_128", + [65] = "KMO_ENCRYPTED_TDEA_192", + [66] = "KMO_AES_128", + [67] = "KMO_AES_192", + [68] = "KMO_AES_256", + [69] = "KMO_ENCRYPTED_AES_128", + [70] = "KMO_ENCRYPTED_AES_192", + [71] = "KMO_ENCRYPTED_AES_256", + [72] = "KIMD_SHA_1", + [73] = "KIMD_SHA_256", + [74] = "KIMD_SHA_512", + [75] = "KIMD_SHA3_224", + [76] = "KIMD_SHA3_256", + [77] = "KIMD_SHA3_384", + [78] = "KIMD_SHA3_512", + [79] = "KIMD_SHAKE_128", + [80] = "KIMD_SHAKE_256", + [81] = "KIMD_GHASH", + [82] = "KLMD_SHA_1", + [83] = "KLMD_SHA_256", + [84] = "KLMD_SHA_512", + [85] = "KLMD_SHA3_224", + [86] = "KLMD_SHA3_256", + [87] = "KLMD_SHA3_384", + [88] = "KLMD_SHA3_512", + [89] = "KLMD_SHAKE_128", + [90] = "KLMD_SHAKE_256", + [91] = "KMAC_DEA", + [92] = "KMAC_TDEA_128", + [93] = "KMAC_TDEA_192", + [94] = "KMAC_ENCRYPTED_DEA", + [95] = "KMAC_ENCRYPTED_TDEA_128", + [96] = "KMAC_ENCRYPTED_TDEA_192", + [97] = "KMAC_AES_128", + [98] = "KMAC_AES_192", + [99] = "KMAC_AES_256", + [100] = "KMAC_ENCRYPTED_AES_128", + [101] = "KMAC_ENCRYPTED_AES_192", + [102] = "KMAC_ENCRYPTED_AES_256", + [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA", + [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128", + [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192", + [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA", + [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128", + [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192", + [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128", + [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192", + [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256", + [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128", + [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192", + [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A", + [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128", + [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256", + [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128", + [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256", + [119] = "PCC_SCALAR_MULTIPLY_P256", + [120] = "PCC_SCALAR_MULTIPLY_P384", + [121] = "PCC_SCALAR_MULTIPLY_P521", + [122] = "PCC_SCALAR_MULTIPLY_ED25519", + [123] = "PCC_SCALAR_MULTIPLY_ED448", + [124] = "PCC_SCALAR_MULTIPLY_X25519", + [125] = "PCC_SCALAR_MULTIPLY_X448", + [126] = "PRNO_SHA_512_DRNG", + [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO", + [128] = "PRNO_TRNG", + [129] = "KDSA_ECDSA_VERIFY_P256", + [130] = "KDSA_ECDSA_VERIFY_P384", + [131] = "KDSA_ECDSA_VERIFY_P521", + [132] = "KDSA_ECDSA_SIGN_P256", + [133] = "KDSA_ECDSA_SIGN_P384", + [134] = "KDSA_ECDSA_SIGN_P521", + [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256", + [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384", + [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521", + [138] = "KDSA_EDDSA_VERIFY_ED25519", + [139] = "KDSA_EDDSA_VERIFY_ED448", + [140] = "KDSA_EDDSA_SIGN_ED25519", + [141] = "KDSA_EDDSA_SIGN_ED448", + [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519", + [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448", + [144] = "PCKMO_ENCRYPT_DEA_KEY", + [145] = "PCKMO_ENCRYPT_TDEA_128_KEY", + [146] = "PCKMO_ENCRYPT_TDEA_192_KEY", + [147] = "PCKMO_ENCRYPT_AES_128_KEY", + [148] = "PCKMO_ENCRYPT_AES_192_KEY", + [149] = "PCKMO_ENCRYPT_AES_256_KEY", + [150] = "PCKMO_ENCRYPT_ECC_P256_KEY", + [151] = "PCKMO_ENCRYPT_ECC_P384_KEY", + [152] = "PCKMO_ENCRYPT_ECC_P521_KEY", + [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY", + [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY", + [155] = "IBM_RESERVED_155", + [156] = "IBM_RESERVED_156", +}; + +static void __init attr_event_free(struct attribute **attrs, int num) +{ + struct perf_pmu_events_attr *pa; + int i; + + for (i = 0; i < num; i++) { + struct device_attribute *dap; + + dap = container_of(attrs[i], struct device_attribute, attr); + pa = container_of(dap, struct perf_pmu_events_attr, attr); + kfree(pa); + } + kfree(attrs); +} + +static int __init attr_event_init_one(struct attribute **attrs, int num) +{ + struct perf_pmu_events_attr *pa; + + pa = kzalloc(sizeof(*pa), GFP_KERNEL); + if (!pa) + return -ENOMEM; + + sysfs_attr_init(&pa->attr.attr); + pa->id = PAI_CRYPTO_BASE + num; + pa->attr.attr.name = paicrypt_ctrnames[num]; + pa->attr.attr.mode = 0444; + pa->attr.show = cpumf_events_sysfs_show; + pa->attr.store = NULL; + attrs[num] = &pa->attr.attr; + return 0; +} + +/* Create PMU sysfs event attributes on the fly. */ +static int __init attr_event_init(void) +{ + struct attribute **attrs; + int ret, i; + + attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs), + GFP_KERNEL); + if (!attrs) + return -ENOMEM; + for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) { + ret = attr_event_init_one(attrs, i); + if (ret) { + attr_event_free(attrs, i - 1); + return ret; + } + } + attrs[i] = NULL; + paicrypt_events_group.attrs = attrs; + return 0; +} + +static int __init paicrypt_init(void) +{ + struct qpaci_info_block ib; + int rc; + + if (!test_facility(196)) + return 0; + + qpaci(&ib); + paicrypt_cnt = ib.num_cc; + if (paicrypt_cnt == 0) + return 0; + if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) + paicrypt_cnt = PAI_CRYPTO_MAXCTR - 1; + + rc = attr_event_init(); /* Export known PAI crypto events */ + if (rc) { + pr_err("Creation of PMU pai_crypto /sysfs failed\n"); + return rc; + } + + /* Setup s390dbf facility */ + cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128); + if (!cfm_dbg) { + pr_err("Registration of s390dbf pai_crypto failed\n"); + return -ENOMEM; + } + debug_register_view(cfm_dbg, &debug_sprintf_view); + + rc = perf_pmu_register(&paicrypt, "pai_crypto", -1); + if (rc) { + pr_err("Registering the pai_crypto PMU failed with rc=%i\n", + rc); + debug_unregister_view(cfm_dbg, &debug_sprintf_view); + debug_unregister(cfm_dbg); + return rc; + } + return 0; +} + +device_initcall(paicrypt_init); -- cgit v1.2.3 From 03780c83c78546310c084ef3df69da2a0bafbcb5 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Mon, 2 May 2022 11:12:09 +0200 Subject: s390/stp: fix todoff size The size of the TOD offset field in the stp info response is 64 bits. Signed-off-by: Sven Schnelle Reviewed-by: Heiko Carstens Signed-off-by: Heiko Carstens --- arch/s390/include/asm/stp.h | 4 ++-- arch/s390/kernel/time.c | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/s390/include/asm/stp.h b/arch/s390/include/asm/stp.h index ba07463897c1..4d74d7e33340 100644 --- a/arch/s390/include/asm/stp.h +++ b/arch/s390/include/asm/stp.h @@ -44,8 +44,8 @@ struct stp_sstpi { u32 : 32; u32 ctnid[3]; u32 : 32; - u32 todoff[4]; - u32 rsvd[48]; + u64 todoff; + u32 rsvd[50]; } __packed; struct stp_tzib { diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 326cb8f75f58..2506bfdc91c7 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -554,9 +554,7 @@ static int stp_sync_clock(void *data) while (atomic_read(&sync->cpus) != 0) cpu_relax(); rc = 0; - if (stp_info.todoff[0] || stp_info.todoff[1] || - stp_info.todoff[2] || stp_info.todoff[3] || - stp_info.tmd != 2) { + if (stp_info.todoff || stp_info.tmd != 2) { flags = vdso_update_begin(); rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta); -- cgit v1.2.3 From 5ace65ebb5ce9fe1cc8fdbdd97079fb566ef0ea4 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Tue, 3 May 2022 09:58:33 +0200 Subject: s390/stp: clock_delta should be signed clock_delta is declared as unsigned long in various places. However, the clock sync delta can be negative. This would add a huge positive offset in clock_sync_global where clock_delta is added to clk.eitod which is a 72 bit integer. Declare it as signed long to fix this. Cc: stable@vger.kernel.org Signed-off-by: Sven Schnelle Reviewed-by: Heiko Carstens Signed-off-by: Heiko Carstens --- arch/s390/include/asm/cio.h | 2 +- arch/s390/kernel/time.c | 8 ++++---- drivers/s390/cio/chsc.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index 1effac6a0152..1c4f585dd39b 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -369,7 +369,7 @@ void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev); struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages); /* Function from drivers/s390/cio/chsc.c */ -int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta); +int chsc_sstpc(void *page, unsigned int op, u16 ctrl, long *clock_delta); int chsc_sstpi(void *page, void *result, size_t size); int chsc_stzi(void *page, void *result, size_t size); int chsc_sgib(u32 origin); diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 2506bfdc91c7..6b7b6d5e3632 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -364,7 +364,7 @@ static inline int check_sync_clock(void) * Apply clock delta to the global data structures. * This is called once on the CPU that performed the clock sync. */ -static void clock_sync_global(unsigned long delta) +static void clock_sync_global(long delta) { unsigned long now, adj; struct ptff_qto qto; @@ -400,7 +400,7 @@ static void clock_sync_global(unsigned long delta) * Apply clock delta to the per-CPU data structures of this CPU. * This is called for each online CPU after the call to clock_sync_global. */ -static void clock_sync_local(unsigned long delta) +static void clock_sync_local(long delta) { /* Add the delta to the clock comparator. */ if (S390_lowcore.clock_comparator != clock_comparator_max) { @@ -424,7 +424,7 @@ static void __init time_init_wq(void) struct clock_sync_data { atomic_t cpus; int in_sync; - unsigned long clock_delta; + long clock_delta; }; /* @@ -544,7 +544,7 @@ static int stpinfo_valid(void) static int stp_sync_clock(void *data) { struct clock_sync_data *sync = data; - u64 clock_delta, flags; + long clock_delta, flags; static int first; int rc; diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 297fb399363c..620a917cd3a1 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -1255,7 +1255,7 @@ exit: EXPORT_SYMBOL_GPL(css_general_characteristics); EXPORT_SYMBOL_GPL(css_chsc_characteristics); -int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta) +int chsc_sstpc(void *page, unsigned int op, u16 ctrl, long *clock_delta) { struct { struct chsc_header request; @@ -1266,7 +1266,7 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta) unsigned int rsvd2[5]; struct chsc_header response; unsigned int rsvd3[3]; - u64 clock_delta; + s64 clock_delta; unsigned int rsvd4[2]; } *rr; int rc; -- cgit v1.2.3 From 63678eecec57fc51b778be3da35a397931287170 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 6 May 2022 11:33:19 +0200 Subject: s390/preempt: disable __preempt_count_add() optimization for PROFILE_ALL_BRANCHES MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gcc 12 does not (always) optimize away code that should only be generated if parameters are constant and within in a certain range. This depends on various obscure kernel config options, however in particular PROFILE_ALL_BRANCHES can trigger this compile error: In function ‘__atomic_add_const’, inlined from ‘__preempt_count_add.part.0’ at ./arch/s390/include/asm/preempt.h:50:3: ./arch/s390/include/asm/atomic_ops.h:80:9: error: impossible constraint in ‘asm’ 80 | asm volatile( \ | ^~~ Workaround this by simply disabling the optimization for PROFILE_ALL_BRANCHES, since the kernel will be so slow, that this optimization won't matter at all. Reported-by: Thomas Richter Reviewed-by: Sven Schnelle Signed-off-by: Heiko Carstens --- arch/s390/include/asm/preempt.h | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index d9d5350cc3ec..bf15da0fedbc 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -46,10 +46,17 @@ static inline bool test_preempt_need_resched(void) static inline void __preempt_count_add(int val) { - if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) - __atomic_add_const(val, &S390_lowcore.preempt_count); - else - __atomic_add(val, &S390_lowcore.preempt_count); + /* + * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES + * enabled, gcc 12 fails to handle __builtin_constant_p(). + */ + if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) { + if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) { + __atomic_add_const(val, &S390_lowcore.preempt_count); + return; + } + } + __atomic_add(val, &S390_lowcore.preempt_count); } static inline void __preempt_count_sub(int val) -- cgit v1.2.3 From c9311de71635d3eaa158df8516b9b99a92d60a0c Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Fri, 13 May 2022 12:42:55 +0200 Subject: s390/cpumf: add new extended counter set for IBM z16 Export the extended counter set counters of the IBM z16 via sysfs. Signed-off-by: Thomas Richter Acked-by: Sumanth Korikkar Signed-off-by: Heiko Carstens --- arch/s390/kernel/perf_cpum_cf_events.c | 148 +++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index 52c1fe23b823..0d64aafd158f 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -295,6 +295,76 @@ CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108); CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109); CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); +CPUMF_EVENT_ATTR(cf_z16, L1D_RO_EXCL_WRITES, 0x0080); +CPUMF_EVENT_ATTR(cf_z16, DTLB2_WRITES, 0x0081); +CPUMF_EVENT_ATTR(cf_z16, DTLB2_MISSES, 0x0082); +CPUMF_EVENT_ATTR(cf_z16, CRSTE_1MB_WRITES, 0x0083); +CPUMF_EVENT_ATTR(cf_z16, DTLB2_GPAGE_WRITES, 0x0084); +CPUMF_EVENT_ATTR(cf_z16, ITLB2_WRITES, 0x0086); +CPUMF_EVENT_ATTR(cf_z16, ITLB2_MISSES, 0x0087); +CPUMF_EVENT_ATTR(cf_z16, TLB2_PTE_WRITES, 0x0089); +CPUMF_EVENT_ATTR(cf_z16, TLB2_CRSTE_WRITES, 0x008a); +CPUMF_EVENT_ATTR(cf_z16, TLB2_ENGINES_BUSY, 0x008b); +CPUMF_EVENT_ATTR(cf_z16, TX_C_TEND, 0x008c); +CPUMF_EVENT_ATTR(cf_z16, TX_NC_TEND, 0x008d); +CPUMF_EVENT_ATTR(cf_z16, L1C_TLB2_MISSES, 0x008f); +CPUMF_EVENT_ATTR(cf_z16, DCW_REQ, 0x0091); +CPUMF_EVENT_ATTR(cf_z16, DCW_REQ_IV, 0x0092); +CPUMF_EVENT_ATTR(cf_z16, DCW_REQ_CHIP_HIT, 0x0093); +CPUMF_EVENT_ATTR(cf_z16, DCW_REQ_DRAWER_HIT, 0x0094); +CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP, 0x0095); +CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_IV, 0x0096); +CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_CHIP_HIT, 0x0097); +CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_DRAWER_HIT, 0x0098); +CPUMF_EVENT_ATTR(cf_z16, DCW_ON_MODULE, 0x0099); +CPUMF_EVENT_ATTR(cf_z16, DCW_ON_DRAWER, 0x009a); +CPUMF_EVENT_ATTR(cf_z16, DCW_OFF_DRAWER, 0x009b); +CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_MEMORY, 0x009c); +CPUMF_EVENT_ATTR(cf_z16, DCW_ON_MODULE_MEMORY, 0x009d); +CPUMF_EVENT_ATTR(cf_z16, DCW_ON_DRAWER_MEMORY, 0x009e); +CPUMF_EVENT_ATTR(cf_z16, DCW_OFF_DRAWER_MEMORY, 0x009f); +CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_MODULE_IV, 0x00a0); +CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_MODULE_CHIP_HIT, 0x00a1); +CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_MODULE_DRAWER_HIT, 0x00a2); +CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_DRAWER_IV, 0x00a3); +CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_DRAWER_CHIP_HIT, 0x00a4); +CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_DRAWER_DRAWER_HIT, 0x00a5); +CPUMF_EVENT_ATTR(cf_z16, IDCW_OFF_DRAWER_IV, 0x00a6); +CPUMF_EVENT_ATTR(cf_z16, IDCW_OFF_DRAWER_CHIP_HIT, 0x00a7); +CPUMF_EVENT_ATTR(cf_z16, IDCW_OFF_DRAWER_DRAWER_HIT, 0x00a8); +CPUMF_EVENT_ATTR(cf_z16, ICW_REQ, 0x00a9); +CPUMF_EVENT_ATTR(cf_z16, ICW_REQ_IV, 0x00aa); +CPUMF_EVENT_ATTR(cf_z16, ICW_REQ_CHIP_HIT, 0x00ab); +CPUMF_EVENT_ATTR(cf_z16, ICW_REQ_DRAWER_HIT, 0x00ac); +CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP, 0x00ad); +CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_IV, 0x00ae); +CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_CHIP_HIT, 0x00af); +CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_DRAWER_HIT, 0x00b0); +CPUMF_EVENT_ATTR(cf_z16, ICW_ON_MODULE, 0x00b1); +CPUMF_EVENT_ATTR(cf_z16, ICW_ON_DRAWER, 0x00b2); +CPUMF_EVENT_ATTR(cf_z16, ICW_OFF_DRAWER, 0x00b3); +CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_MEMORY, 0x00b4); +CPUMF_EVENT_ATTR(cf_z16, ICW_ON_MODULE_MEMORY, 0x00b5); +CPUMF_EVENT_ATTR(cf_z16, ICW_ON_DRAWER_MEMORY, 0x00b6); +CPUMF_EVENT_ATTR(cf_z16, ICW_OFF_DRAWER_MEMORY, 0x00b7); +CPUMF_EVENT_ATTR(cf_z16, BCD_DFP_EXECUTION_SLOTS, 0x00e0); +CPUMF_EVENT_ATTR(cf_z16, VX_BCD_EXECUTION_SLOTS, 0x00e1); +CPUMF_EVENT_ATTR(cf_z16, DECIMAL_INSTRUCTIONS, 0x00e2); +CPUMF_EVENT_ATTR(cf_z16, LAST_HOST_TRANSLATIONS, 0x00e8); +CPUMF_EVENT_ATTR(cf_z16, TX_NC_TABORT, 0x00f4); +CPUMF_EVENT_ATTR(cf_z16, TX_C_TABORT_NO_SPECIAL, 0x00f5); +CPUMF_EVENT_ATTR(cf_z16, TX_C_TABORT_SPECIAL, 0x00f6); +CPUMF_EVENT_ATTR(cf_z16, DFLT_ACCESS, 0x00f8); +CPUMF_EVENT_ATTR(cf_z16, DFLT_CYCLES, 0x00fd); +CPUMF_EVENT_ATTR(cf_z16, SORTL, 0x0100); +CPUMF_EVENT_ATTR(cf_z16, DFLT_CC, 0x0109); +CPUMF_EVENT_ATTR(cf_z16, DFLT_CCFINISH, 0x010a); +CPUMF_EVENT_ATTR(cf_z16, NNPA_INVOCATIONS, 0x010b); +CPUMF_EVENT_ATTR(cf_z16, NNPA_COMPLETIONS, 0x010c); +CPUMF_EVENT_ATTR(cf_z16, NNPA_WAIT_LOCK, 0x010d); +CPUMF_EVENT_ATTR(cf_z16, NNPA_HOLD_LOCK, 0x010e); +CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); +CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = { CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES), @@ -635,6 +705,80 @@ static struct attribute *cpumcf_z15_pmu_event_attr[] __initdata = { NULL, }; +static struct attribute *cpumcf_z16_pmu_event_attr[] __initdata = { + CPUMF_EVENT_PTR(cf_z16, L1D_RO_EXCL_WRITES), + CPUMF_EVENT_PTR(cf_z16, DTLB2_WRITES), + CPUMF_EVENT_PTR(cf_z16, DTLB2_MISSES), + CPUMF_EVENT_PTR(cf_z16, CRSTE_1MB_WRITES), + CPUMF_EVENT_PTR(cf_z16, DTLB2_GPAGE_WRITES), + CPUMF_EVENT_PTR(cf_z16, ITLB2_WRITES), + CPUMF_EVENT_PTR(cf_z16, ITLB2_MISSES), + CPUMF_EVENT_PTR(cf_z16, TLB2_PTE_WRITES), + CPUMF_EVENT_PTR(cf_z16, TLB2_CRSTE_WRITES), + CPUMF_EVENT_PTR(cf_z16, TLB2_ENGINES_BUSY), + CPUMF_EVENT_PTR(cf_z16, TX_C_TEND), + CPUMF_EVENT_PTR(cf_z16, TX_NC_TEND), + CPUMF_EVENT_PTR(cf_z16, L1C_TLB2_MISSES), + CPUMF_EVENT_PTR(cf_z16, DCW_REQ), + CPUMF_EVENT_PTR(cf_z16, DCW_REQ_IV), + CPUMF_EVENT_PTR(cf_z16, DCW_REQ_CHIP_HIT), + CPUMF_EVENT_PTR(cf_z16, DCW_REQ_DRAWER_HIT), + CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP), + CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_IV), + CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_CHIP_HIT), + CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_DRAWER_HIT), + CPUMF_EVENT_PTR(cf_z16, DCW_ON_MODULE), + CPUMF_EVENT_PTR(cf_z16, DCW_ON_DRAWER), + CPUMF_EVENT_PTR(cf_z16, DCW_OFF_DRAWER), + CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_MEMORY), + CPUMF_EVENT_PTR(cf_z16, DCW_ON_MODULE_MEMORY), + CPUMF_EVENT_PTR(cf_z16, DCW_ON_DRAWER_MEMORY), + CPUMF_EVENT_PTR(cf_z16, DCW_OFF_DRAWER_MEMORY), + CPUMF_EVENT_PTR(cf_z16, IDCW_ON_MODULE_IV), + CPUMF_EVENT_PTR(cf_z16, IDCW_ON_MODULE_CHIP_HIT), + CPUMF_EVENT_PTR(cf_z16, IDCW_ON_MODULE_DRAWER_HIT), + CPUMF_EVENT_PTR(cf_z16, IDCW_ON_DRAWER_IV), + CPUMF_EVENT_PTR(cf_z16, IDCW_ON_DRAWER_CHIP_HIT), + CPUMF_EVENT_PTR(cf_z16, IDCW_ON_DRAWER_DRAWER_HIT), + CPUMF_EVENT_PTR(cf_z16, IDCW_OFF_DRAWER_IV), + CPUMF_EVENT_PTR(cf_z16, IDCW_OFF_DRAWER_CHIP_HIT), + CPUMF_EVENT_PTR(cf_z16, IDCW_OFF_DRAWER_DRAWER_HIT), + CPUMF_EVENT_PTR(cf_z16, ICW_REQ), + CPUMF_EVENT_PTR(cf_z16, ICW_REQ_IV), + CPUMF_EVENT_PTR(cf_z16, ICW_REQ_CHIP_HIT), + CPUMF_EVENT_PTR(cf_z16, ICW_REQ_DRAWER_HIT), + CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP), + CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_IV), + CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_CHIP_HIT), + CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_DRAWER_HIT), + CPUMF_EVENT_PTR(cf_z16, ICW_ON_MODULE), + CPUMF_EVENT_PTR(cf_z16, ICW_ON_DRAWER), + CPUMF_EVENT_PTR(cf_z16, ICW_OFF_DRAWER), + CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_MEMORY), + CPUMF_EVENT_PTR(cf_z16, ICW_ON_MODULE_MEMORY), + CPUMF_EVENT_PTR(cf_z16, ICW_ON_DRAWER_MEMORY), + CPUMF_EVENT_PTR(cf_z16, ICW_OFF_DRAWER_MEMORY), + CPUMF_EVENT_PTR(cf_z16, BCD_DFP_EXECUTION_SLOTS), + CPUMF_EVENT_PTR(cf_z16, VX_BCD_EXECUTION_SLOTS), + CPUMF_EVENT_PTR(cf_z16, DECIMAL_INSTRUCTIONS), + CPUMF_EVENT_PTR(cf_z16, LAST_HOST_TRANSLATIONS), + CPUMF_EVENT_PTR(cf_z16, TX_NC_TABORT), + CPUMF_EVENT_PTR(cf_z16, TX_C_TABORT_NO_SPECIAL), + CPUMF_EVENT_PTR(cf_z16, TX_C_TABORT_SPECIAL), + CPUMF_EVENT_PTR(cf_z16, DFLT_ACCESS), + CPUMF_EVENT_PTR(cf_z16, DFLT_CYCLES), + CPUMF_EVENT_PTR(cf_z16, SORTL), + CPUMF_EVENT_PTR(cf_z16, DFLT_CC), + CPUMF_EVENT_PTR(cf_z16, DFLT_CCFINISH), + CPUMF_EVENT_PTR(cf_z16, NNPA_INVOCATIONS), + CPUMF_EVENT_PTR(cf_z16, NNPA_COMPLETIONS), + CPUMF_EVENT_PTR(cf_z16, NNPA_WAIT_LOCK), + CPUMF_EVENT_PTR(cf_z16, NNPA_HOLD_LOCK), + CPUMF_EVENT_PTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE), + CPUMF_EVENT_PTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE), + NULL, +}; + /* END: CPUM_CF COUNTER DEFINITIONS ===================================== */ static struct attribute_group cpumcf_pmu_events_group = { @@ -749,6 +893,10 @@ __init const struct attribute_group **cpumf_cf_event_group(void) case 0x8562: model = cpumcf_z15_pmu_event_attr; break; + case 0x3931: + case 0x3932: + model = cpumcf_z16_pmu_event_attr; + break; default: model = none; break; -- cgit v1.2.3 From fad442d3abde47aef97d0d822807ab6e2555784a Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 11 May 2022 14:05:25 +0200 Subject: s390/alternatives: provide identical sized orginal/alternative sequences Explicitly provide identical sized original/alternative instruction sequences. This way there is no need for the s390 specific alternatives infrastructure to generate padding sequences. The code which generates such sequences will be removed with a follow on patch. Acked-by: Vasily Gorbik Tested-by: Nathan Chancellor Tested-by: Nick Desaulniers Link: https://lore.kernel.org/r/20220511120532.2228616-2-hca@linux.ibm.com Signed-off-by: Heiko Carstens --- arch/s390/include/asm/spinlock.h | 2 +- arch/s390/kernel/entry.S | 20 ++++++++++---------- arch/s390/lib/spinlock.c | 4 ++-- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 10a460762e94..37127cd7749e 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -79,7 +79,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) typecheck(int, lp->lock); kcsan_release(); asm_inline volatile( - ALTERNATIVE("", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */ + ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */ " sth %1,%0\n" : "=R" (((unsigned short *) &lp->lock)[1]) : "d" (0) : "cc", "memory"); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 685ccec02a27..a6b45eaa3450 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -53,19 +53,19 @@ STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE _LPP_OFFSET = __LC_LPP .macro STBEAR address - ALTERNATIVE "", ".insn s,0xb2010000,\address", 193 + ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193 .endm .macro LBEAR address - ALTERNATIVE "", ".insn s,0xb2000000,\address", 193 + ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193 .endm .macro LPSWEY address,lpswe - ALTERNATIVE "b \lpswe", ".insn siy,0xeb0000000071,\address,0", 193 + ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193 .endm .macro MBEAR reg - ALTERNATIVE "", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193 + ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193 .endm .macro CHECK_STACK savearea @@ -121,16 +121,16 @@ _LPP_OFFSET = __LC_LPP .endm .macro BPOFF - ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,12,0", 82 + ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82 .endm .macro BPON - ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,13,0", 82 + ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82 .endm .macro BPENTER tif_ptr,tif_mask ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \ - "", 82 + "j .+12; nop; nop", 82 .endm .macro BPEXIT tif_ptr,tif_mask @@ -226,7 +226,7 @@ ENTRY(__switch_to) aghi %r3,__TASK_pid mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task - ALTERNATIVE "", "lpp _LPP_OFFSET", 40 + ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 BR_EX %r14 ENDPROC(__switch_to) @@ -610,7 +610,7 @@ ENTRY(mcck_int_handler) jno 0f BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP stpt __LC_EXIT_TIMER -0: ALTERNATIVE "", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193 +0: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193 LBEAR 0(%r12) lmg %r11,%r15,__PT_R11(%r11) LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE @@ -646,7 +646,7 @@ ENTRY(mcck_int_handler) ENDPROC(mcck_int_handler) ENTRY(restart_int_handler) - ALTERNATIVE "", "lpp _LPP_OFFSET", 40 + ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 stg %r15,__LC_SAVE_AREA_RESTART TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 jz 0f diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 5e7ea8b111e8..04d4c6cf898e 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -75,7 +75,7 @@ static inline int arch_load_niai4(int *lock) int owner; asm_inline volatile( - ALTERNATIVE("", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */ + ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */ " l %0,%1\n" : "=d" (owner) : "Q" (*lock) : "memory"); return owner; @@ -86,7 +86,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new) int expected = old; asm_inline volatile( - ALTERNATIVE("", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */ + ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */ " cs %0,%3,%1\n" : "=d" (old), "=Q" (*lock) : "0" (old), "d" (new), "Q" (*lock) -- cgit v1.2.3 From e6ed91fd0768b914558dad5eeda2407a7d871f52 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 11 May 2022 14:05:26 +0200 Subject: s390/alternatives: remove padding generation code clang fails to handle ".if" statements in inline assembly which are heavily used in the alternatives code. To work around this remove this code, and enforce that users of alternatives must specify original and alternative instruction sequences which have identical sizes. Add a compile time check with two ".org" statements similar to arm64. In result not only clang can handle this, but also quite a lot of code can be removed. Acked-by: Vasily Gorbik Tested-by: Nathan Chancellor Tested-by: Nick Desaulniers Link: https://github.com/ClangBuiltLinux/linux/issues/1356 Link: https://lore.kernel.org/r/20220511120532.2228616-3-hca@linux.ibm.com Signed-off-by: Heiko Carstens --- arch/s390/include/asm/alternative-asm.h | 76 ++++----------------------- arch/s390/include/asm/alternative.h | 93 +++++++-------------------------- arch/s390/kernel/alternative.c | 61 +-------------------- 3 files changed, 31 insertions(+), 199 deletions(-) diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h index bb3837d7387c..7db046596b93 100644 --- a/arch/s390/include/asm/alternative-asm.h +++ b/arch/s390/include/asm/alternative-asm.h @@ -4,19 +4,6 @@ #ifdef __ASSEMBLY__ -/* - * Check the length of an instruction sequence. The length may not be larger - * than 254 bytes and it has to be divisible by 2. - */ -.macro alt_len_check start,end - .if ( \end - \start ) > 254 - .error "cpu alternatives does not support instructions blocks > 254 bytes\n" - .endif - .if ( \end - \start ) % 2 - .error "cpu alternatives instructions length is odd\n" - .endif -.endm - /* * Issue one struct alt_instr descriptor entry (need to put it into * the section .altinstructions, see below). This entry contains @@ -28,66 +15,29 @@ .long \alt_start - . .word \feature .byte \orig_end - \orig_start - .byte \alt_end - \alt_start -.endm - -/* - * Fill up @bytes with nops. The macro emits 6-byte nop instructions - * for the bulk of the area, possibly followed by a 4-byte and/or - * a 2-byte nop if the size of the area is not divisible by 6. - */ -.macro alt_pad_fill bytes - .rept ( \bytes ) / 6 - brcl 0,0 - .endr - .rept ( \bytes ) % 6 / 4 - nop - .endr - .rept ( \bytes ) % 6 % 4 / 2 - nopr - .endr -.endm - -/* - * Fill up @bytes with nops. If the number of bytes is larger - * than 6, emit a jg instruction to branch over all nops, then - * fill an area of size (@bytes - 6) with nop instructions. - */ -.macro alt_pad bytes - .if ( \bytes > 0 ) - .if ( \bytes > 6 ) - jg . + \bytes - alt_pad_fill \bytes - 6 - .else - alt_pad_fill \bytes - .endif - .endif + .org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start ) + .org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start ) .endm /* * Define an alternative between two instructions. If @feature is * present, early code in apply_alternatives() replaces @oldinstr with - * @newinstr. ".skip" directive takes care of proper instruction padding - * in case @newinstr is longer than @oldinstr. + * @newinstr. */ .macro ALTERNATIVE oldinstr, newinstr, feature .pushsection .altinstr_replacement,"ax" 770: \newinstr 771: .popsection 772: \oldinstr -773: alt_len_check 770b, 771b - alt_len_check 772b, 773b - alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) ) -774: .pushsection .altinstructions,"a" - alt_entry 772b, 774b, 770b, 771b, \feature +773: .pushsection .altinstructions,"a" + alt_entry 772b, 773b, 770b, 771b, \feature .popsection .endm /* * Define an alternative between two instructions. If @feature is * present, early code in apply_alternatives() replaces @oldinstr with - * @newinstr. ".skip" directive takes care of proper instruction padding - * in case @newinstr is longer than @oldinstr. + * @newinstr. */ .macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 .pushsection .altinstr_replacement,"ax" @@ -95,17 +45,9 @@ 771: \newinstr2 772: .popsection 773: \oldinstr -774: alt_len_check 770b, 771b - alt_len_check 771b, 772b - alt_len_check 773b, 774b - .if ( 771b - 770b > 772b - 771b ) - alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) ) - .else - alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) ) - .endif -775: .pushsection .altinstructions,"a" - alt_entry 773b, 775b, 770b, 771b,\feature1 - alt_entry 773b, 775b, 771b, 772b,\feature2 +774: .pushsection .altinstructions,"a" + alt_entry 773b, 774b, 770b, 771b,\feature1 + alt_entry 773b, 774b, 771b, 772b,\feature2 .popsection .endm diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h index 3f2856ed6808..904dd049f954 100644 --- a/arch/s390/include/asm/alternative.h +++ b/arch/s390/include/asm/alternative.h @@ -13,32 +13,25 @@ struct alt_instr { s32 repl_offset; /* offset to replacement instruction */ u16 facility; /* facility bit set for replacement */ u8 instrlen; /* length of original instruction */ - u8 replacementlen; /* length of new instruction */ } __packed; void apply_alternative_instructions(void); void apply_alternatives(struct alt_instr *start, struct alt_instr *end); /* - * |661: |662: |6620 |663: - * +-----------+---------------------+ - * | oldinstr | oldinstr_padding | - * | +----------+----------+ - * | | | | - * | | >6 bytes |6/4/2 nops| - * | |6 bytes jg-----------> - * +-----------+---------------------+ - * ^^ static padding ^^ + * +---------------------------------+ + * |661: |662: + * | oldinstr | + * +---------------------------------+ * * .altinstr_replacement section - * +---------------------+-----------+ + * +---------------------------------+ * |6641: |6651: * | alternative instr 1 | - * +-----------+---------+- - - - - -+ - * |6642: |6652: | - * | alternative instr 2 | padding - * +---------------------+- - - - - -+ - * ^ runtime ^ + * +---------------------------------+ + * |6642: |6652: + * | alternative instr 2 | + * +---------------------------------+ * * .altinstructions section * +---------------------------------+ @@ -47,77 +40,31 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); * +---------------------------------+ */ -#define b_altinstr(num) "664"#num -#define e_altinstr(num) "665"#num - -#define e_oldinstr_pad_end "663" +#define b_altinstr(num) "664"#num +#define e_altinstr(num) "665"#num #define oldinstr_len "662b-661b" -#define oldinstr_total_len e_oldinstr_pad_end"b-661b" #define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b" -#define oldinstr_pad_len(num) \ - "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \ - "((" altinstr_len(num) ")-(" oldinstr_len "))" - -#define INSTR_LEN_SANITY_CHECK(len) \ - ".if " len " > 254\n" \ - "\t.error \"cpu alternatives does not support instructions " \ - "blocks > 254 bytes\"\n" \ - ".endif\n" \ - ".if (" len ") %% 2\n" \ - "\t.error \"cpu alternatives instructions length is odd\"\n" \ - ".endif\n" - -#define OLDINSTR_PADDING(oldinstr, num) \ - ".if " oldinstr_pad_len(num) " > 6\n" \ - "\tjg " e_oldinstr_pad_end "f\n" \ - "6620:\n" \ - "\t.rept (" oldinstr_pad_len(num) " - (6620b-662b)) / 2\n" \ - "\tnopr\n" \ - ".else\n" \ - "\t.rept " oldinstr_pad_len(num) " / 6\n" \ - "\t.brcl 0,0\n" \ - "\t.endr\n" \ - "\t.rept " oldinstr_pad_len(num) " %% 6 / 4\n" \ - "\tnop\n" \ - "\t.endr\n" \ - "\t.rept " oldinstr_pad_len(num) " %% 6 %% 4 / 2\n" \ - "\tnopr\n" \ - ".endr\n" \ - ".endif\n" - -#define OLDINSTR(oldinstr, num) \ - "661:\n\t" oldinstr "\n662:\n" \ - OLDINSTR_PADDING(oldinstr, num) \ - e_oldinstr_pad_end ":\n" \ - INSTR_LEN_SANITY_CHECK(oldinstr_len) - -#define OLDINSTR_2(oldinstr, num1, num2) \ - "661:\n\t" oldinstr "\n662:\n" \ - ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \ - OLDINSTR_PADDING(oldinstr, num2) \ - ".else\n" \ - OLDINSTR_PADDING(oldinstr, num1) \ - ".endif\n" \ - e_oldinstr_pad_end ":\n" \ - INSTR_LEN_SANITY_CHECK(oldinstr_len) + +#define OLDINSTR(oldinstr) \ + "661:\n\t" oldinstr "\n662:\n" #define ALTINSTR_ENTRY(facility, num) \ "\t.long 661b - .\n" /* old instruction */ \ "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \ "\t.word " __stringify(facility) "\n" /* facility bit */ \ - "\t.byte " oldinstr_total_len "\n" /* source len */ \ - "\t.byte " altinstr_len(num) "\n" /* alt instruction len */ + "\t.byte " oldinstr_len "\n" /* instruction len */ \ + "\t.org . - (" oldinstr_len ") + (" altinstr_len(num) ")\n" \ + "\t.org . - (" altinstr_len(num) ") + (" oldinstr_len ")\n" #define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \ - b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \ - INSTR_LEN_SANITY_CHECK(altinstr_len(num)) + b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" /* alternative assembly primitive: */ #define ALTERNATIVE(oldinstr, altinstr, facility) \ ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(altinstr, 1) \ ".popsection\n" \ - OLDINSTR(oldinstr, 1) \ + OLDINSTR(oldinstr) \ ".pushsection .altinstructions,\"a\"\n" \ ALTINSTR_ENTRY(facility, 1) \ ".popsection\n" @@ -127,7 +74,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); ALTINSTR_REPLACEMENT(altinstr1, 1) \ ALTINSTR_REPLACEMENT(altinstr2, 2) \ ".popsection\n" \ - OLDINSTR_2(oldinstr, 1, 2) \ + OLDINSTR(oldinstr) \ ".pushsection .altinstructions,\"a\"\n" \ ALTINSTR_ENTRY(facility1, 1) \ ALTINSTR_ENTRY(facility2, 2) \ diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c index cce0ddee2d02..e7bca29f9c34 100644 --- a/arch/s390/kernel/alternative.c +++ b/arch/s390/kernel/alternative.c @@ -7,8 +7,6 @@ #include #include -#define MAX_PATCH_LEN (255 - 1) - static int __initdata_or_module alt_instr_disabled; static int __init disable_alternative_instructions(char *str) @@ -19,85 +17,30 @@ static int __init disable_alternative_instructions(char *str) early_param("noaltinstr", disable_alternative_instructions); -struct brcl_insn { - u16 opc; - s32 disp; -} __packed; - -static u16 __initdata_or_module nop16 = 0x0700; -static u32 __initdata_or_module nop32 = 0x47000000; -static struct brcl_insn __initdata_or_module nop48 = { - 0xc004, 0 -}; - -static const void *nops[] __initdata_or_module = { - &nop16, - &nop32, - &nop48 -}; - -static void __init_or_module add_jump_padding(void *insns, unsigned int len) -{ - struct brcl_insn brcl = { - 0xc0f4, - len / 2 - }; - - memcpy(insns, &brcl, sizeof(brcl)); - insns += sizeof(brcl); - len -= sizeof(brcl); - - while (len > 0) { - memcpy(insns, &nop16, 2); - insns += 2; - len -= 2; - } -} - -static void __init_or_module add_padding(void *insns, unsigned int len) -{ - if (len > 6) - add_jump_padding(insns, len); - else if (len >= 2) - memcpy(insns, nops[len / 2 - 1], len); -} - static void __init_or_module __apply_alternatives(struct alt_instr *start, struct alt_instr *end) { struct alt_instr *a; u8 *instr, *replacement; - u8 insnbuf[MAX_PATCH_LEN]; /* * The scan order should be from start to end. A later scanned * alternative code can overwrite previously scanned alternative code. */ for (a = start; a < end; a++) { - int insnbuf_sz = 0; - instr = (u8 *)&a->instr_offset + a->instr_offset; replacement = (u8 *)&a->repl_offset + a->repl_offset; if (!__test_facility(a->facility, alt_stfle_fac_list)) continue; - if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) { + if (unlikely(a->instrlen % 2)) { WARN_ONCE(1, "cpu alternatives instructions length is " "odd, skipping patching\n"); continue; } - memcpy(insnbuf, replacement, a->replacementlen); - insnbuf_sz = a->replacementlen; - - if (a->instrlen > a->replacementlen) { - add_padding(insnbuf + a->replacementlen, - a->instrlen - a->replacementlen); - insnbuf_sz += a->instrlen - a->replacementlen; - } - - s390_kernel_write(instr, insnbuf, insnbuf_sz); + s390_kernel_write(instr, replacement, a->instrlen); } } -- cgit v1.2.3 From 4c25f0ff6336738fcb03216ae103c3c17908304a Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 11 May 2022 14:05:27 +0200 Subject: s390/entry: workaround llvm's IAS limitations llvm's integrated assembler cannot handle immediate values which are calculated with two local labels: :3:13: error: invalid operand for instruction clgfi %r14,.Lsie_done - .Lsie_gmap Workaround this by adding clang specific code which reads the specific value from memory. Since this code is within the hot paths of the kernel and adds an additional memory reference, keep the original code, and add ifdef'ed code. Acked-by: Alexander Gordeev Link: https://lore.kernel.org/r/20220511120532.2228616-5-hca@linux.ibm.com Signed-off-by: Heiko Carstens --- arch/s390/kernel/entry.S | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index a6b45eaa3450..df41132ccd06 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -172,9 +172,19 @@ _LPP_OFFSET = __LC_LPP lgr %r14,\reg larl %r13,\start slgr %r14,%r13 - lghi %r13,\end - \start - clgr %r14,%r13 +#ifdef CONFIG_AS_IS_LLVM + clgfrl %r14,.Lrange_size\@ +#else + clgfi %r14,\end - \start +#endif jhe \outside_label +#ifdef CONFIG_AS_IS_LLVM + .section .rodata, "a" + .align 4 +.Lrange_size\@: + .long \end - \start + .previous +#endif .endm .macro SIEEXIT -- cgit v1.2.3 From adda746629b4a3950f313bc645fa0e54daee871c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 11 May 2022 14:05:29 +0200 Subject: s390/purgatory: workaround llvm's IAS limitations llvm's integrated assembler cannot handle immediate values which are calculated with two local labels: arch/s390/purgatory/head.S:139:11: error: invalid operand for instruction aghi %r8,-(.base_crash-purgatory_start) Workaround this by partially rewriting the code. Link: https://lore.kernel.org/r/20220511120532.2228616-6-hca@linux.ibm.com Signed-off-by: Heiko Carstens --- arch/s390/purgatory/head.S | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S index 3d1c31e0cf3d..6f835124ee82 100644 --- a/arch/s390/purgatory/head.S +++ b/arch/s390/purgatory/head.S @@ -44,11 +44,14 @@ .endm .macro MEMSWAP dst,src,buf,len -10: cghi \len,bufsz +10: larl %r0,purgatory_end + larl %r1,stack + slgr %r0,%r1 + cgr \len,%r0 jh 11f lgr %r4,\len j 12f -11: lghi %r4,bufsz +11: lgr %r4,%r0 12: MEMCPY \buf,\dst,%r4 MEMCPY \dst,\src,%r4 @@ -135,12 +138,18 @@ ENTRY(purgatory_start) .start_crash_kernel: /* Location of purgatory_start in crash memory */ + larl %r0,.base_crash + larl %r1,purgatory_start + slgr %r0,%r1 lgr %r8,%r13 - aghi %r8,-(.base_crash-purgatory_start) + sgr %r8,%r0 /* Destination for this code i.e. end of memory to be swapped. */ + larl %r0,purgatory_end + larl %r1,purgatory_start + slgr %r0,%r1 lg %r9,crash_size-.base_crash(%r13) - aghi %r9,-(purgatory_end-purgatory_start) + sgr %r9,%r0 /* Destination in crash memory, i.e. same as r9 but in crash memory. */ lg %r10,crash_start-.base_crash(%r13) @@ -149,15 +158,19 @@ ENTRY(purgatory_start) /* Buffer location (in crash memory) and size. As the purgatory is * behind the point of no return it can re-use the stack as buffer. */ - lghi %r11,bufsz + larl %r11,purgatory_end larl %r12,stack + slgr %r11,%r12 MEMCPY %r12,%r9,%r11 /* dst -> (crash) buf */ MEMCPY %r9,%r8,%r11 /* self -> dst */ /* Jump to new location. */ lgr %r7,%r9 - aghi %r7,.jump_to_dst-purgatory_start + larl %r0,.jump_to_dst + larl %r1,purgatory_start + slgr %r0,%r1 + agr %r7,%r0 br %r7 .jump_to_dst: @@ -169,7 +182,10 @@ ENTRY(purgatory_start) /* Load new buffer location after jump */ larl %r7,stack - aghi %r10,stack-purgatory_start + lgr %r0,%r7 + larl %r1,purgatory_start + slgr %r0,%r1 + agr %r10,%r0 MEMCPY %r10,%r7,%r11 /* (new) buf -> (crash) buf */ /* Now the code is set up to run from its designated location. Start -- cgit v1.2.3 From e9953b729b789c0e2984859e3b2170b7fa8520d5 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 11 May 2022 14:05:30 +0200 Subject: s390/boot: workaround llvm IAS bug For at least the mvc and clc instructions llvm's integrated assembler can generate incorrect code. In particular this happens with decompressor boot code. The reason seems to be that relocations for the second displacement of each instruction are at incorrect locations (-/+: gas vs llvm IAS): mvc __LC_IO_NEW_PSW(16),.Lnewpsw results in 4: d2 0f 01 f0 00 00 mvc 496(16,%r0),0 - 8: R_390_12 .head.text+0x10 + 6: R_390_12 .head.text+0x10 and clc 0(3,%r4),.L_hdr results in 258: d5 02 40 00 00 00 clc 0(3,%r4),0 - 25c: R_390_12 .head.text+0x324 + 25a: R_390_12 .head.text+0x324 Workaround this by writing the code in a different way. Tested-by: Nathan Chancellor Tested-by: Nick Desaulniers Link: https://github.com/llvm/llvm-project/issues/55411 Link: https://lore.kernel.org/r/20220511120532.2228616-7-hca@linux.ibm.com Signed-off-by: Heiko Carstens --- arch/s390/boot/head.S | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S index 2ced90172680..8402e1cd133b 100644 --- a/arch/s390/boot/head.S +++ b/arch/s390/boot/head.S @@ -42,7 +42,8 @@ ipl_start: # subroutine to wait for end I/O # .Lirqwait: - mvc __LC_IO_NEW_PSW(16),.Lnewpsw # set up IO interrupt psw + larl %r13,.Lnewpsw # set up IO interrupt psw + mvc __LC_IO_NEW_PSW(16),0(%r13) lpsw .Lwaitpsw .Lioint: br %r14 @@ -155,9 +156,11 @@ ipl_start: lr %r2,%r3 .Lnotrunc: l %r4,.Linitrd - clc 0(3,%r4),.L_hdr # if it is HDRx + larl %r13,.L_hdr + clc 0(3,%r4),0(%r13) # if it is HDRx bz .Lagain1 # skip dataset header - clc 0(3,%r4),.L_eof # if it is EOFx + larl %r13,.L_eof + clc 0(3,%r4),0(%r13) # if it is EOFx bz .Lagain1 # skip dateset trailer lr %r5,%r2 @@ -181,9 +184,11 @@ ipl_start: .Lrdcont: l %r2,.Linitrd - clc 0(3,%r2),.L_hdr # skip HDRx and EOFx + larl %r13,.L_hdr # skip HDRx and EOFx + clc 0(3,%r2),0(%r13) bz .Lagain2 - clc 0(3,%r2),.L_eof + larl %r13,.L_eof + clc 0(3,%r2),0(%r13) bz .Lagain2 # @@ -260,20 +265,23 @@ SYM_CODE_START_LOCAL(startup_normal) .fill 16,4,0x0 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs sam64 # switch to 64 bit addressing mode - basr %r13,0 # get base -.LPG0: - mvc __LC_EXT_NEW_PSW(16),.Lext_new_psw-.LPG0(%r13) - mvc __LC_PGM_NEW_PSW(16),.Lpgm_new_psw-.LPG0(%r13) - mvc __LC_IO_NEW_PSW(16),.Lio_new_psw-.LPG0(%r13) + larl %r13,.Lext_new_psw + mvc __LC_EXT_NEW_PSW(16),0(%r13) + larl %r13,.Lpgm_new_psw + mvc __LC_PGM_NEW_PSW(16),0(%r13) + larl %r13,.Lio_new_psw + mvc __LC_IO_NEW_PSW(16),0(%r13) xc 0x200(256),0x200 # partially clear lowcore xc 0x300(256),0x300 xc 0xe00(256),0xe00 xc 0xf00(256),0xf00 - lctlg %c0,%c15,.Lctl-.LPG0(%r13) # load control registers + larl %r13,.Lctl + lctlg %c0,%c15,0(%r13) # load control registers stcke __LC_BOOT_CLOCK mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1 - spt 6f-.LPG0(%r13) - mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) + larl %r13,6f + spt 0(%r13) + mvc __LC_LAST_UPDATE_TIMER(8),0(%r13) larl %r15,_stack_end-STACK_FRAME_OVERHEAD brasl %r14,sclp_early_setup_buffer brasl %r14,verify_facilities -- cgit v1.2.3 From bb31074db95f735004203b307e63e2e0d4ef9c26 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 11 May 2022 14:05:31 +0200 Subject: s390/boot: do not emit debug info for assembly with llvm's IAS Commit ee6d777d3e93 ("s390/decompressor: support extra debug flags") added extra debug flags, in particular debug info is created, depending on config options. With llvm's IAS this causes this compile warning: arch/s390/boot/head.S:38:1: warning: DWARF2 only supports one section per compilation unit .section ".head.text","ax" ^ This is a known problem and was addressed with commit b8a9092330da ("Kbuild: do not emit debug info for assembly with LLVM_IAS=1"). Just do the same for s390 to get rid of this warning. Tested-by: Nathan Chancellor Tested-by: Nick Desaulniers Link: https://lore.kernel.org/r/20220511120532.2228616-8-hca@linux.ibm.com Signed-off-by: Heiko Carstens --- arch/s390/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/s390/Makefile b/arch/s390/Makefile index e441b60b1812..12037c9e3802 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -20,7 +20,9 @@ LDFLAGS_vmlinux := -pie endif aflags_dwarf := -Wa,-gdwarf-2 KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__ +ifndef CONFIG_AS_IS_LLVM KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf)) +endif KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain -- cgit v1.2.3 From 8218827b73c6e41029438a2d3cc573286beee914 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 11 May 2022 14:05:32 +0200 Subject: scripts/min-tool-version.sh: raise minimum clang version to 14.0.0 for s390 Before version 14.0.0 llvm's integrated assembler fails to handle some displacement variants: arch/s390/purgatory/head.S:108:10: error: invalid operand for instruction lg %r11,kernel_type-.base_crash(%r13) Instead of working around this and given that this is already fixed raise the minimum clang version from 13.0.0 to 14.0.0. Acked-by: Nick Desaulniers Tested-by: Nathan Chancellor Tested-by: Nick Desaulniers Link: https://reviews.llvm.org/D113341 Link: https://lore.kernel.org/r/20220511120532.2228616-9-hca@linux.ibm.com Signed-off-by: Heiko Carstens --- scripts/min-tool-version.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh index 7c20252a90c6..250925aab101 100755 --- a/scripts/min-tool-version.sh +++ b/scripts/min-tool-version.sh @@ -24,9 +24,8 @@ icc) echo 16.0.3 ;; llvm) - # https://lore.kernel.org/r/YMtib5hKVyNknZt3@osiris/ if [ "$SRCARCH" = s390 ]; then - echo 13.0.0 + echo 14.0.0 else echo 11.0.0 fi -- cgit v1.2.3 From 94d3477897481b92874654455e263e0b1728acb5 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 16 May 2022 14:37:47 +0200 Subject: s390/head: get rid of 31 bit leftovers Get rid of old 31 bit leftovers within ipl code: - convert everything to pc relative code - use 64 bit addressing mode as early as possible - use 64 bit arithmetics wherever possible This way the code doesn't look as odd as before anymore. Reviewed-by: Sven Schnelle Signed-off-by: Heiko Carstens --- arch/s390/boot/head.S | 275 +++++++++++++++++++++++--------------------------- 1 file changed, 126 insertions(+), 149 deletions(-) diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S index 8402e1cd133b..3f79b9efb803 100644 --- a/arch/s390/boot/head.S +++ b/arch/s390/boot/head.S @@ -29,202 +29,179 @@ #include #include "boot.h" -#define ARCH_OFFSET 4 - #define EP_OFFSET 0x10008 #define EP_STRING "S390EP" #define IPL_BS 0x730 __HEAD ipl_start: - j .Liplcont -# -# subroutine to wait for end I/O -# -.Lirqwait: - larl %r13,.Lnewpsw # set up IO interrupt psw - mvc __LC_IO_NEW_PSW(16),0(%r13) - lpsw .Lwaitpsw -.Lioint: - br %r14 - .align 8 -.Lnewpsw: - .quad 0x0000000080000000,.Lioint -.Lwaitpsw: - .long 0x020a0000,0x80000000+.Lioint - -# -# subroutine for loading cards from the reader -# -.Lloader: - la %r4,0(%r14) - la %r3,.Lorb # r2 = address of orb into r2 - la %r5,.Lirb # r4 = address of irb - la %r6,.Lccws - la %r7,20 -.Linit: - st %r2,4(%r6) # initialize CCW data addresses - la %r2,0x50(%r2) - la %r6,8(%r6) - bct 7,.Linit - - lctl %c6,%c6,.Lcr6 # set IO subclass mask - slr %r2,%r2 -.Lldlp: - ssch 0(%r3) # load chunk of 1600 bytes - bnz .Llderr -.Lwait4irq: - bas %r14,.Lirqwait - c %r1,__LC_SUBCHANNEL_ID # compare subchannel number - bne .Lwait4irq - tsch 0(%r5) - - slr %r0,%r0 - ic %r0,8(%r5) # get device status - chi %r0,8 # channel end ? - be .Lcont - chi %r0,12 # channel end + device end ? - be .Lcont - - l %r0,4(%r5) - s %r0,8(%r3) # r0/8 = number of ccws executed - mhi %r0,10 # *10 = number of bytes in ccws - lh %r3,10(%r5) # get residual count - sr %r0,%r3 # #ccws*80-residual=#bytes read - ar %r2,%r0 - - br %r4 # r2 contains the total size - -.Lcont: - ahi %r2,0x640 # add 0x640 to total size - la %r6,.Lccws - la %r7,20 -.Lincr: - l %r0,4(%r6) # update CCW data addresses - ahi %r0,0x640 - st %r0,4(%r6) - ahi %r6,8 - bct 7,.Lincr - - b .Lldlp -.Llderr: - lpsw .Lcrash - - .align 8 -.Lorb: .long 0x00000000,0x0080ff00,.Lccws -.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -.Lcr6: .long 0xff000000 -.Lloadp:.long 0,0 - .align 8 -.Lcrash:.long 0x000a0000,0x00000000 - - .align 8 -.Lccws: .rept 19 - .long 0x02600050,0x00000000 - .endr - .long 0x02200050,0x00000000 - -.Liplcont: mvi __LC_AR_MODE_ID,1 # set esame flag slr %r0,%r0 # set cpuid to zero lhi %r1,2 # mode 2 = esame (dump) sigp %r1,%r0,0x12 # switch to esame mode - bras %r13,0f - .fill 16,4,0x0 -0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs - sam31 # switch to 31 bit addressing mode - lh %r1,__LC_SUBCHANNEL_ID # test if subchannel number - bct %r1,.Lnoload # is valid - l %r1,__LC_SUBCHANNEL_ID # load ipl subchannel number - la %r2,IPL_BS # load start address - bas %r14,.Lloader # load rest of ipl image - l %r12,.Lparm # pointer to parameter area - st %r1,IPL_DEVICE+ARCH_OFFSET-PARMAREA(%r12) # save ipl device number - + sam64 # switch to 64 bit addressing mode + lgh %r1,__LC_SUBCHANNEL_ID # test if subchannel number + brctg %r1,.Lnoload # is valid + llgf %r1,__LC_SUBCHANNEL_ID # load ipl subchannel number + lghi %r2,IPL_BS # load start address + bras %r14,.Lloader # load rest of ipl image + larl %r12,parmarea # pointer to parameter area + stg %r1,IPL_DEVICE-PARMAREA(%r12) # save ipl device number # # load parameter file from ipl device # .Lagain1: - l %r2,.Linitrd # ramdisk loc. is temp - bas %r14,.Lloader # load parameter file - ltr %r2,%r2 # got anything ? - bz .Lnopf - l %r3,MAX_COMMAND_LINE_SIZE+ARCH_OFFSET-PARMAREA(%r12) - ahi %r3,-1 - clr %r2,%r3 - bl .Lnotrunc - lr %r2,%r3 + larl %r2,_end # ramdisk loc. is temp + bras %r14,.Lloader # load parameter file + ltgr %r2,%r2 # got anything ? + jz .Lnopf + lg %r3,MAX_COMMAND_LINE_SIZE-PARMAREA(%r12) + aghi %r3,-1 + clgr %r2,%r3 + jl .Lnotrunc + lgr %r2,%r3 .Lnotrunc: - l %r4,.Linitrd + larl %r4,_end larl %r13,.L_hdr clc 0(3,%r4),0(%r13) # if it is HDRx - bz .Lagain1 # skip dataset header + jz .Lagain1 # skip dataset header larl %r13,.L_eof clc 0(3,%r4),0(%r13) # if it is EOFx - bz .Lagain1 # skip dateset trailer - - lr %r5,%r2 + jz .Lagain1 # skip dateset trailer + lgr %r5,%r2 la %r6,COMMAND_LINE-PARMAREA(%r12) - lr %r7,%r2 - ahi %r7,1 + lgr %r7,%r2 + aghi %r7,1 mvcl %r6,%r4 .Lnopf: - # # load ramdisk from ipl device # .Lagain2: - l %r2,.Linitrd # addr of ramdisk - st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) - bas %r14,.Lloader # load ramdisk - st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of rd - ltr %r2,%r2 - bnz .Lrdcont - st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found + larl %r2,_end # addr of ramdisk + stg %r2,INITRD_START-PARMAREA(%r12) + bras %r14,.Lloader # load ramdisk + stg %r2,INITRD_SIZE-PARMAREA(%r12) # store size of rd + ltgr %r2,%r2 + jnz .Lrdcont + stg %r2,INITRD_START-PARMAREA(%r12) # no ramdisk found .Lrdcont: - l %r2,.Linitrd - + larl %r2,_end larl %r13,.L_hdr # skip HDRx and EOFx clc 0(3,%r2),0(%r13) - bz .Lagain2 + jz .Lagain2 larl %r13,.L_eof clc 0(3,%r2),0(%r13) - bz .Lagain2 - + jz .Lagain2 # # reset files in VM reader # - stidp .Lcpuid # store cpuid - tm .Lcpuid,0xff # running VM ? - bno .Lnoreset - la %r2,.Lreset - lhi %r3,26 + larl %r13,.Lcpuid + stidp 0(%r13) # store cpuid + tm 0(%r13),0xff # running VM ? + jno .Lnoreset + larl %r2,.Lreset + lghi %r3,26 diag %r2,%r3,8 - la %r5,.Lirb + larl %r5,.Lirb stsch 0(%r5) # check if irq is pending tm 30(%r5),0x0f # by verifying if any of the - bnz .Lwaitforirq # activity or status control + jnz .Lwaitforirq # activity or status control tm 31(%r5),0xff # bits is set in the schib - bz .Lnoreset + jz .Lnoreset .Lwaitforirq: - bas %r14,.Lirqwait # wait for IO interrupt + bras %r14,.Lirqwait # wait for IO interrupt c %r1,__LC_SUBCHANNEL_ID # compare subchannel number - bne .Lwaitforirq - la %r5,.Lirb + jne .Lwaitforirq + larl %r5,.Lirb tsch 0(%r5) .Lnoreset: - b .Lnoload - + j .Lnoload # # everything loaded, go for it # .Lnoload: - l %r1,.Lstartup - br %r1 + jg startup +# +# subroutine to wait for end I/O +# +.Lirqwait: + larl %r13,.Lnewpswmask # set up IO interrupt psw + mvc __LC_IO_NEW_PSW(8),0(%r13) + stg %r14,__LC_IO_NEW_PSW+8 + larl %r13,.Lwaitpsw + lpswe 0(%r13) +.Lioint: +# +# subroutine for loading cards from the reader +# +.Lloader: + lgr %r4,%r14 + larl %r3,.Lorb # r2 = address of orb into r2 + larl %r5,.Lirb # r4 = address of irb + larl %r6,.Lccws + lghi %r7,20 +.Linit: + st %r2,4(%r6) # initialize CCW data addresses + la %r2,0x50(%r2) + la %r6,8(%r6) + brctg %r7,.Linit + larl %r13,.Lcr6 + lctlg %c6,%c6,0(%r13) + xgr %r2,%r2 +.Lldlp: + ssch 0(%r3) # load chunk of 1600 bytes + jnz .Llderr +.Lwait4irq: + bras %r14,.Lirqwait + c %r1,__LC_SUBCHANNEL_ID # compare subchannel number + jne .Lwait4irq + tsch 0(%r5) + xgr %r0,%r0 + ic %r0,8(%r5) # get device status + cghi %r0,8 # channel end ? + je .Lcont + cghi %r0,12 # channel end + device end ? + je .Lcont + llgf %r0,4(%r5) + sgf %r0,8(%r3) # r0/8 = number of ccws executed + mghi %r0,10 # *10 = number of bytes in ccws + llgh %r3,10(%r5) # get residual count + sgr %r0,%r3 # #ccws*80-residual=#bytes read + agr %r2,%r0 + br %r4 # r2 contains the total size +.Lcont: + aghi %r2,0x640 # add 0x640 to total size + larl %r6,.Lccws + lghi %r7,20 +.Lincr: + l %r0,4(%r6) # update CCW data addresses + aghi %r0,0x640 + st %r0,4(%r6) + aghi %r6,8 + brctg %r7,.Lincr + j .Lldlp +.Llderr: + larl %r13,.Lcrash + lpsw 0(%r13) -.Linitrd:.long _end # default address of initrd -.Lparm: .long PARMAREA -.Lstartup: .long startup + .align 8 +.Lwaitpsw: + .quad 0x0202000180000000,.Lioint +.Lnewpswmask: + .quad 0x0000000180000000 + .align 8 +.Lorb: .long 0x00000000,0x0080ff00,.Lccws +.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + .align 8 +.Lcr6: .quad 0x00000000ff000000 + .align 8 +.Lcrash:.long 0x000a0000,0x00000000 + .align 8 +.Lccws: .rept 19 + .long 0x02600050,0x00000000 + .endr + .long 0x02200050,0x00000000 .Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" -- cgit v1.2.3