forked from luck/tmp_suning_uos_patched
s390 updates for 5.2-rc3
- Farewell Martin Schwidefsky: add Martin to CREDITS and remove him from MAINTAINERS - Vasily Gorbik and Christian Borntraeger join as maintainers for s390 - Fix locking bug in ctr(aes) and ctr(des) s390 specific ciphers - A rather large patch which fixes gcm-aes-s390 scatter gather handling - Fix zcrypt wrong dispatching for control domain CPRBs - Fix assignment of bus resources in PCI code - Fix structure definition for set PCI function - Fix one compile error and one compile warning seen when CONFIG_OPTIMIZE_INLINING is enabled -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJc8QIzAAoJECIOw3kbKW7C8+cP/iEuFF/YFKq896Zmd50wV1fL 0ASkqKfrwWNzQz+Y9c7WuIoGNghptr4zPPANkaRLkUCIZ2SsmvSLrggtAD0Ls4ut vAoCXLH10rDdGWmiXHuDHOsmeQ/1RdbqW6ZfZEDhLNY7vYtCFfpOyAN0QEhGa7/F NcAemkD9Q3uerATCr37mVcK3GrzzhcbGd9mVN5uqdq0ZLRDrI4K+JxWVisdBvzve bnWCwUsgDYOhc1C1pMDD8IsWd+F3a7V+caDNWFhMgbRCPA9adNzf9fYuEH5ftI7U W7bS45ZR5BX3pAHtOIg6s/l2W+cu3vGKuCYIutA2JpRqGM8IASEoUJwMZ54Fu/nF Eh+zcfizxwREX7VjXKHd9oXZ41cocYdBIt8kCMe+gbj9zKzD716wzhiBVh3WCG6v uBEx0nHMkHlKaTaNY3oUU5HP6t+zARw+uApkpA9EdNiM1pV6T/n9ySTnJHrU0NNo 3nYlCHzm1W9RLknbp+vmc9SbbEzWhhUpCDUi/5Ny8YsFwsddMixWMmg9DI8zJpJB Qr6OSD05dThPPH7bWNcBbbt/NU0p/BaxgbVYewcHM/cln1tw2lDuOY0jjiIB3SV/ twhMoFx7fEZCpSsP8t27f33NA3UTEpHhr2KSNZZ2w2DGu6QE/QawgUHDT+VlFs5x WiUkvArmRqlWL3Aaz6W8 =e7cv -----END PGP SIGNATURE----- Merge tag 's390-5.2-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux Pull s390 fixes from Heiko Carstens: - Farewell Martin Schwidefsky: add Martin to CREDITS and remove him from MAINTAINERS - Vasily Gorbik and Christian Borntraeger join as maintainers for s390 - Fix locking bug in ctr(aes) and ctr(des) s390 specific ciphers - A rather large patch which fixes gcm-aes-s390 scatter gather handling - Fix zcrypt wrong dispatching for control domain CPRBs - Fix assignment of bus resources in PCI code - Fix structure definition for set PCI function - Fix one compile error and one compile warning seen when CONFIG_OPTIMIZE_INLINING is enabled * tag 's390-5.2-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: MAINTAINERS: add Vasily Gorbik and Christian Borntraeger for s390 MAINTAINERS: Farewell Martin Schwidefsky s390/crypto: fix possible sleep during spinlock aquired s390/crypto: fix gcm-aes-s390 selftest failures s390/zcrypt: Fix wrong dispatching for control domain CPRBs s390/pci: fix assignment of bus resources s390/pci: fix struct definition for set PCI function s390: mark __cpacf_check_opcode() and cpacf_query_func() as __always_inline s390: add unreachable() to dump_fault_info() to fix -Wmaybe-uninitialized
This commit is contained in:
commit
27a03b1a71
8
CREDITS
8
CREDITS
|
@ -3364,6 +3364,14 @@ S: Braunschweiger Strasse 79
|
|||
S: 31134 Hildesheim
|
||||
S: Germany
|
||||
|
||||
N: Martin Schwidefsky
|
||||
D: Martin was the most significant contributor to the initial s390
|
||||
D: port of the Linux Kernel and later the maintainer of the s390
|
||||
D: architecture backend for almost two decades.
|
||||
D: He passed away in 2019, and will be greatly missed.
|
||||
S: Germany
|
||||
W: https://lwn.net/Articles/789028/
|
||||
|
||||
N: Marcel Selhorst
|
||||
E: tpmdd@selhorst.net
|
||||
D: TPM driver
|
||||
|
|
|
@ -3049,8 +3049,9 @@ S: Maintained
|
|||
F: arch/riscv/net/
|
||||
|
||||
BPF JIT for S390
|
||||
M: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
M: Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
M: Vasily Gorbik <gor@linux.ibm.com>
|
||||
M: Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: bpf@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -13614,8 +13615,9 @@ S: Maintained
|
|||
F: drivers/video/fbdev/savage/
|
||||
|
||||
S390
|
||||
M: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
M: Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
M: Vasily Gorbik <gor@linux.ibm.com>
|
||||
M: Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git
|
||||
|
|
|
@ -27,14 +27,14 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/fips.h>
|
||||
#include <linux/string.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <asm/cpacf.h>
|
||||
|
||||
static u8 *ctrblk;
|
||||
static DEFINE_SPINLOCK(ctrblk_lock);
|
||||
static DEFINE_MUTEX(ctrblk_lock);
|
||||
|
||||
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
|
||||
kma_functions;
|
||||
|
@ -698,7 +698,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
|||
unsigned int n, nbytes;
|
||||
int ret, locked;
|
||||
|
||||
locked = spin_trylock(&ctrblk_lock);
|
||||
locked = mutex_trylock(&ctrblk_lock);
|
||||
|
||||
ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
|
||||
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
||||
|
@ -716,7 +716,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
|||
ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
||||
}
|
||||
if (locked)
|
||||
spin_unlock(&ctrblk_lock);
|
||||
mutex_unlock(&ctrblk_lock);
|
||||
/*
|
||||
* final block may be < AES_BLOCK_SIZE, copy only nbytes
|
||||
*/
|
||||
|
@ -826,19 +826,45 @@ static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
|
||||
unsigned int len)
|
||||
static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
|
||||
unsigned int len)
|
||||
{
|
||||
memset(gw, 0, sizeof(*gw));
|
||||
gw->walk_bytes_remain = len;
|
||||
scatterwalk_start(&gw->walk, sg);
|
||||
}
|
||||
|
||||
static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
||||
static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
|
||||
{
|
||||
struct scatterlist *nextsg;
|
||||
|
||||
gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
|
||||
while (!gw->walk_bytes) {
|
||||
nextsg = sg_next(gw->walk.sg);
|
||||
if (!nextsg)
|
||||
return 0;
|
||||
scatterwalk_start(&gw->walk, nextsg);
|
||||
gw->walk_bytes = scatterwalk_clamp(&gw->walk,
|
||||
gw->walk_bytes_remain);
|
||||
}
|
||||
gw->walk_ptr = scatterwalk_map(&gw->walk);
|
||||
return gw->walk_bytes;
|
||||
}
|
||||
|
||||
static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
gw->walk_bytes_remain -= nbytes;
|
||||
scatterwalk_unmap(&gw->walk);
|
||||
scatterwalk_advance(&gw->walk, nbytes);
|
||||
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
|
||||
gw->walk_ptr = NULL;
|
||||
}
|
||||
|
||||
static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
||||
{
|
||||
int n;
|
||||
|
||||
/* minbytesneeded <= AES_BLOCK_SIZE */
|
||||
if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
|
||||
gw->ptr = gw->buf;
|
||||
gw->nbytes = gw->buf_bytes;
|
||||
|
@ -851,13 +877,11 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
|||
goto out;
|
||||
}
|
||||
|
||||
gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
|
||||
if (!gw->walk_bytes) {
|
||||
scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
|
||||
gw->walk_bytes = scatterwalk_clamp(&gw->walk,
|
||||
gw->walk_bytes_remain);
|
||||
if (!_gcm_sg_clamp_and_map(gw)) {
|
||||
gw->ptr = NULL;
|
||||
gw->nbytes = 0;
|
||||
goto out;
|
||||
}
|
||||
gw->walk_ptr = scatterwalk_map(&gw->walk);
|
||||
|
||||
if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
|
||||
gw->ptr = gw->walk_ptr;
|
||||
|
@ -869,51 +893,90 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
|||
n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
|
||||
memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
|
||||
gw->buf_bytes += n;
|
||||
gw->walk_bytes_remain -= n;
|
||||
scatterwalk_unmap(&gw->walk);
|
||||
scatterwalk_advance(&gw->walk, n);
|
||||
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
|
||||
|
||||
_gcm_sg_unmap_and_advance(gw, n);
|
||||
if (gw->buf_bytes >= minbytesneeded) {
|
||||
gw->ptr = gw->buf;
|
||||
gw->nbytes = gw->buf_bytes;
|
||||
goto out;
|
||||
}
|
||||
|
||||
gw->walk_bytes = scatterwalk_clamp(&gw->walk,
|
||||
gw->walk_bytes_remain);
|
||||
if (!gw->walk_bytes) {
|
||||
scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
|
||||
gw->walk_bytes = scatterwalk_clamp(&gw->walk,
|
||||
gw->walk_bytes_remain);
|
||||
if (!_gcm_sg_clamp_and_map(gw)) {
|
||||
gw->ptr = NULL;
|
||||
gw->nbytes = 0;
|
||||
goto out;
|
||||
}
|
||||
gw->walk_ptr = scatterwalk_map(&gw->walk);
|
||||
}
|
||||
|
||||
out:
|
||||
return gw->nbytes;
|
||||
}
|
||||
|
||||
static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
|
||||
static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
||||
{
|
||||
int n;
|
||||
if (gw->walk_bytes_remain == 0) {
|
||||
gw->ptr = NULL;
|
||||
gw->nbytes = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!_gcm_sg_clamp_and_map(gw)) {
|
||||
gw->ptr = NULL;
|
||||
gw->nbytes = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (gw->walk_bytes >= minbytesneeded) {
|
||||
gw->ptr = gw->walk_ptr;
|
||||
gw->nbytes = gw->walk_bytes;
|
||||
goto out;
|
||||
}
|
||||
|
||||
scatterwalk_unmap(&gw->walk);
|
||||
gw->walk_ptr = NULL;
|
||||
|
||||
gw->ptr = gw->buf;
|
||||
gw->nbytes = sizeof(gw->buf);
|
||||
|
||||
out:
|
||||
return gw->nbytes;
|
||||
}
|
||||
|
||||
static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
|
||||
{
|
||||
if (gw->ptr == NULL)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (gw->ptr == gw->buf) {
|
||||
n = gw->buf_bytes - bytesdone;
|
||||
int n = gw->buf_bytes - bytesdone;
|
||||
if (n > 0) {
|
||||
memmove(gw->buf, gw->buf + bytesdone, n);
|
||||
gw->buf_bytes -= n;
|
||||
gw->buf_bytes = n;
|
||||
} else
|
||||
gw->buf_bytes = 0;
|
||||
} else {
|
||||
gw->walk_bytes_remain -= bytesdone;
|
||||
scatterwalk_unmap(&gw->walk);
|
||||
scatterwalk_advance(&gw->walk, bytesdone);
|
||||
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
|
||||
}
|
||||
} else
|
||||
_gcm_sg_unmap_and_advance(gw, bytesdone);
|
||||
|
||||
return bytesdone;
|
||||
}
|
||||
|
||||
static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
|
||||
{
|
||||
int i, n;
|
||||
|
||||
if (gw->ptr == NULL)
|
||||
return 0;
|
||||
|
||||
if (gw->ptr == gw->buf) {
|
||||
for (i = 0; i < bytesdone; i += n) {
|
||||
if (!_gcm_sg_clamp_and_map(gw))
|
||||
return i;
|
||||
n = min(gw->walk_bytes, bytesdone - i);
|
||||
memcpy(gw->walk_ptr, gw->buf + i, n);
|
||||
_gcm_sg_unmap_and_advance(gw, n);
|
||||
}
|
||||
} else
|
||||
_gcm_sg_unmap_and_advance(gw, bytesdone);
|
||||
|
||||
return bytesdone;
|
||||
}
|
||||
|
||||
static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
|
||||
|
@ -926,7 +989,7 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
|
|||
unsigned int pclen = req->cryptlen;
|
||||
int ret = 0;
|
||||
|
||||
unsigned int len, in_bytes, out_bytes,
|
||||
unsigned int n, len, in_bytes, out_bytes,
|
||||
min_bytes, bytes, aad_bytes, pc_bytes;
|
||||
struct gcm_sg_walk gw_in, gw_out;
|
||||
u8 tag[GHASH_DIGEST_SIZE];
|
||||
|
@ -963,14 +1026,14 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
|
|||
*(u32 *)(param.j0 + ivsize) = 1;
|
||||
memcpy(param.k, ctx->key, ctx->key_len);
|
||||
|
||||
gcm_sg_walk_start(&gw_in, req->src, len);
|
||||
gcm_sg_walk_start(&gw_out, req->dst, len);
|
||||
gcm_walk_start(&gw_in, req->src, len);
|
||||
gcm_walk_start(&gw_out, req->dst, len);
|
||||
|
||||
do {
|
||||
min_bytes = min_t(unsigned int,
|
||||
aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
|
||||
in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
|
||||
out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
|
||||
in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
|
||||
out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
|
||||
bytes = min(in_bytes, out_bytes);
|
||||
|
||||
if (aadlen + pclen <= bytes) {
|
||||
|
@ -997,8 +1060,11 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
|
|||
gw_in.ptr + aad_bytes, pc_bytes,
|
||||
gw_in.ptr, aad_bytes);
|
||||
|
||||
gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
|
||||
gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
|
||||
n = aad_bytes + pc_bytes;
|
||||
if (gcm_in_walk_done(&gw_in, n) != n)
|
||||
return -ENOMEM;
|
||||
if (gcm_out_walk_done(&gw_out, n) != n)
|
||||
return -ENOMEM;
|
||||
aadlen -= aad_bytes;
|
||||
pclen -= pc_bytes;
|
||||
} while (aadlen + pclen > 0);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/cpufeature.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/fips.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/des.h>
|
||||
#include <asm/cpacf.h>
|
||||
|
@ -21,7 +22,7 @@
|
|||
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
|
||||
|
||||
static u8 *ctrblk;
|
||||
static DEFINE_SPINLOCK(ctrblk_lock);
|
||||
static DEFINE_MUTEX(ctrblk_lock);
|
||||
|
||||
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
|
||||
|
||||
|
@ -374,7 +375,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
|
|||
unsigned int n, nbytes;
|
||||
int ret, locked;
|
||||
|
||||
locked = spin_trylock(&ctrblk_lock);
|
||||
locked = mutex_trylock(&ctrblk_lock);
|
||||
|
||||
ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
|
||||
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
|
||||
|
@ -391,7 +392,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
|
|||
ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
||||
}
|
||||
if (locked)
|
||||
spin_unlock(&ctrblk_lock);
|
||||
mutex_unlock(&ctrblk_lock);
|
||||
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
|
||||
if (nbytes) {
|
||||
cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
|
||||
|
|
|
@ -160,8 +160,8 @@ struct ap_config_info {
|
|||
unsigned char Nd; /* max # of Domains - 1 */
|
||||
unsigned char _reserved3[10];
|
||||
unsigned int apm[8]; /* AP ID mask */
|
||||
unsigned int aqm[8]; /* AP queue mask */
|
||||
unsigned int adm[8]; /* AP domain mask */
|
||||
unsigned int aqm[8]; /* AP (usage) queue mask */
|
||||
unsigned int adm[8]; /* AP (control) domain mask */
|
||||
unsigned char _reserved4[16];
|
||||
} __aligned(8);
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ static inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
|
|||
: "cc");
|
||||
}
|
||||
|
||||
static inline int __cpacf_check_opcode(unsigned int opcode)
|
||||
static __always_inline int __cpacf_check_opcode(unsigned int opcode)
|
||||
{
|
||||
switch (opcode) {
|
||||
case CPACF_KMAC:
|
||||
|
@ -218,7 +218,7 @@ static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func)
|
|||
return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0;
|
||||
}
|
||||
|
||||
static inline int cpacf_query_func(unsigned int opcode, unsigned int func)
|
||||
static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int func)
|
||||
{
|
||||
cpacf_mask_t mask;
|
||||
|
||||
|
|
|
@ -70,6 +70,17 @@ struct clp_rsp_list_pci {
|
|||
struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
|
||||
} __packed;
|
||||
|
||||
struct mio_info {
|
||||
u32 valid : 6;
|
||||
u32 : 26;
|
||||
u32 : 32;
|
||||
struct {
|
||||
u64 wb;
|
||||
u64 wt;
|
||||
} addr[PCI_BAR_COUNT];
|
||||
u32 reserved[6];
|
||||
} __packed;
|
||||
|
||||
/* Query PCI function request */
|
||||
struct clp_req_query_pci {
|
||||
struct clp_req_hdr hdr;
|
||||
|
@ -100,14 +111,7 @@ struct clp_rsp_query_pci {
|
|||
u32 uid; /* user defined id */
|
||||
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
|
||||
u32 reserved2[16];
|
||||
u32 mio_valid : 6;
|
||||
u32 : 26;
|
||||
u32 : 32;
|
||||
struct {
|
||||
u64 wb;
|
||||
u64 wt;
|
||||
} addr[PCI_BAR_COUNT];
|
||||
u32 reserved3[6];
|
||||
struct mio_info mio;
|
||||
} __packed;
|
||||
|
||||
/* Query PCI function group request */
|
||||
|
@ -155,8 +159,9 @@ struct clp_req_set_pci {
|
|||
struct clp_rsp_set_pci {
|
||||
struct clp_rsp_hdr hdr;
|
||||
u32 fh; /* function handle */
|
||||
u32 reserved3;
|
||||
u64 reserved4;
|
||||
u32 reserved1;
|
||||
u64 reserved2;
|
||||
struct mio_info mio;
|
||||
} __packed;
|
||||
|
||||
/* Combined request/response block structures used by clp insn */
|
||||
|
|
|
@ -85,7 +85,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
|
|||
* Find out which address space caused the exception.
|
||||
* Access register mode is impossible, ignore space == 3.
|
||||
*/
|
||||
static inline enum fault_type get_fault_type(struct pt_regs *regs)
|
||||
static enum fault_type get_fault_type(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long trans_exc_code;
|
||||
|
||||
|
@ -211,6 +211,8 @@ static void dump_fault_info(struct pt_regs *regs)
|
|||
asce = S390_lowcore.kernel_asce;
|
||||
pr_cont("kernel ");
|
||||
break;
|
||||
default:
|
||||
unreachable();
|
||||
}
|
||||
pr_cont("ASCE.\n");
|
||||
dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
|
||||
|
|
|
@ -528,7 +528,10 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev,
|
|||
if (zdev->bars[i].val & 4)
|
||||
flags |= IORESOURCE_MEM_64;
|
||||
|
||||
addr = ZPCI_ADDR(entry);
|
||||
if (static_branch_likely(&have_mio))
|
||||
addr = (unsigned long) zdev->bars[i].mio_wb;
|
||||
else
|
||||
addr = ZPCI_ADDR(entry);
|
||||
size = 1UL << zdev->bars[i].size;
|
||||
|
||||
res = __alloc_res(zdev, addr, size, flags);
|
||||
|
|
|
@ -165,11 +165,11 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
|
|||
}
|
||||
zdev->mio_capable = response->mio_addr_avail;
|
||||
for (i = 0; i < PCI_BAR_COUNT; i++) {
|
||||
if (!(response->mio_valid & (1 << (PCI_BAR_COUNT - i - 1))))
|
||||
if (!(response->mio.valid & (1 << (PCI_BAR_COUNT - i - 1))))
|
||||
continue;
|
||||
|
||||
zdev->bars[i].mio_wb = (void __iomem *) response->addr[i].wb;
|
||||
zdev->bars[i].mio_wt = (void __iomem *) response->addr[i].wt;
|
||||
zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb;
|
||||
zdev->bars[i].mio_wt = (void __iomem *) response->mio.addr[i].wt;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -254,19 +254,37 @@ static inline int ap_test_config_card_id(unsigned int id)
|
|||
}
|
||||
|
||||
/*
|
||||
* ap_test_config_domain(): Test, whether an AP usage domain is configured.
|
||||
* ap_test_config_usage_domain(): Test, whether an AP usage domain
|
||||
* is configured.
|
||||
* @domain AP usage domain ID
|
||||
*
|
||||
* Returns 0 if the usage domain is not configured
|
||||
* 1 if the usage domain is configured or
|
||||
* if the configuration information is not available
|
||||
*/
|
||||
static inline int ap_test_config_domain(unsigned int domain)
|
||||
int ap_test_config_usage_domain(unsigned int domain)
|
||||
{
|
||||
if (!ap_configuration) /* QCI not supported */
|
||||
return domain < 16;
|
||||
return ap_test_config(ap_configuration->aqm, domain);
|
||||
}
|
||||
EXPORT_SYMBOL(ap_test_config_usage_domain);
|
||||
|
||||
/*
|
||||
* ap_test_config_ctrl_domain(): Test, whether an AP control domain
|
||||
* is configured.
|
||||
* @domain AP control domain ID
|
||||
*
|
||||
* Returns 1 if the control domain is configured
|
||||
* 0 in all other cases
|
||||
*/
|
||||
int ap_test_config_ctrl_domain(unsigned int domain)
|
||||
{
|
||||
if (!ap_configuration) /* QCI not supported */
|
||||
return 0;
|
||||
return ap_test_config(ap_configuration->adm, domain);
|
||||
}
|
||||
EXPORT_SYMBOL(ap_test_config_ctrl_domain);
|
||||
|
||||
/**
|
||||
* ap_query_queue(): Check if an AP queue is available.
|
||||
|
@ -1267,7 +1285,7 @@ static void ap_select_domain(void)
|
|||
best_domain = -1;
|
||||
max_count = 0;
|
||||
for (i = 0; i < AP_DOMAINS; i++) {
|
||||
if (!ap_test_config_domain(i) ||
|
||||
if (!ap_test_config_usage_domain(i) ||
|
||||
!test_bit_inv(i, ap_perms.aqm))
|
||||
continue;
|
||||
count = 0;
|
||||
|
@ -1442,7 +1460,7 @@ static void _ap_scan_bus_adapter(int id)
|
|||
(void *)(long) qid,
|
||||
__match_queue_device_with_qid);
|
||||
aq = dev ? to_ap_queue(dev) : NULL;
|
||||
if (!ap_test_config_domain(dom)) {
|
||||
if (!ap_test_config_usage_domain(dom)) {
|
||||
if (dev) {
|
||||
/* Queue device exists but has been
|
||||
* removed from configuration.
|
||||
|
|
|
@ -251,6 +251,9 @@ void ap_wait(enum ap_wait wait);
|
|||
void ap_request_timeout(struct timer_list *t);
|
||||
void ap_bus_force_rescan(void);
|
||||
|
||||
int ap_test_config_usage_domain(unsigned int domain);
|
||||
int ap_test_config_ctrl_domain(unsigned int domain);
|
||||
|
||||
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
|
||||
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
|
||||
void ap_queue_prepare_remove(struct ap_queue *aq);
|
||||
|
|
|
@ -822,7 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
|
|||
struct ap_message ap_msg;
|
||||
unsigned int weight, pref_weight;
|
||||
unsigned int func_code;
|
||||
unsigned short *domain;
|
||||
unsigned short *domain, tdom;
|
||||
int qid = 0, rc = -ENODEV;
|
||||
struct module *mod;
|
||||
|
||||
|
@ -834,6 +834,17 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
|
|||
if (rc)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If a valid target domain is set and this domain is NOT a usage
|
||||
* domain but a control only domain, use the default domain as target.
|
||||
*/
|
||||
tdom = *domain;
|
||||
if (tdom >= 0 && tdom < AP_DOMAINS &&
|
||||
!ap_test_config_usage_domain(tdom) &&
|
||||
ap_test_config_ctrl_domain(tdom) &&
|
||||
ap_domain_index >= 0)
|
||||
tdom = ap_domain_index;
|
||||
|
||||
pref_zc = NULL;
|
||||
pref_zq = NULL;
|
||||
spin_lock(&zcrypt_list_lock);
|
||||
|
@ -856,8 +867,8 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
|
|||
/* check if device is online and eligible */
|
||||
if (!zq->online ||
|
||||
!zq->ops->send_cprb ||
|
||||
((*domain != (unsigned short) AUTOSELECT) &&
|
||||
(*domain != AP_QID_QUEUE(zq->queue->qid))))
|
||||
(tdom != (unsigned short) AUTOSELECT &&
|
||||
tdom != AP_QID_QUEUE(zq->queue->qid)))
|
||||
continue;
|
||||
/* check if device node has admission for this queue */
|
||||
if (!zcrypt_check_queue(perms,
|
||||
|
|
Loading…
Reference in New Issue
Block a user