forked from luck/tmp_suning_uos_patched
ca520cab25
Pull locking and atomic updates from Ingo Molnar: "Main changes in this cycle are: - Extend atomic primitives with coherent logic op primitives (atomic_{or,and,xor}()) and deprecate the old partial APIs (atomic_{set,clear}_mask()) The old ops were incoherent with incompatible signatures across architectures and with incomplete support. Now every architecture supports the primitives consistently (by Peter Zijlstra) - Generic support for 'relaxed atomics': - _acquire/release/relaxed() flavours of xchg(), cmpxchg() and {add,sub}_return() - atomic_read_acquire() - atomic_set_release() This came out of porting qwrlock code to arm64 (by Will Deacon) - Clean up the fragile static_key APIs that were causing repeat bugs, by introducing a new one: DEFINE_STATIC_KEY_TRUE(name); DEFINE_STATIC_KEY_FALSE(name); which define a key of different types with an initial true/false value. Then allow: static_branch_likely() static_branch_unlikely() to take a key of either type and emit the right instruction for the case. To be able to know the 'type' of the static key we encode it in the jump entry (by Peter Zijlstra) - Static key self-tests (by Jason Baron) - qrwlock optimizations (by Waiman Long) - small futex enhancements (by Davidlohr Bueso) - ... and misc other changes" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits) jump_label/x86: Work around asm build bug on older/backported GCCs locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations locking, include/llist: Use linux/atomic.h instead of asm/cmpxchg.h locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics locking/qrwlock: Implement queue_write_unlock() using smp_store_release() locking/lockref: Remove homebrew cmpxchg64_relaxed() macro definition locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t' locking, asm-generic: Rework atomic-long.h to avoid bulk code duplication locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations locking, compiler.h: Cast away attributes in the WRITE_ONCE() magic locking/static_keys: Make verify_keys() static jump label, locking/static_keys: Update docs locking/static_keys: Provide a selftest jump_label: Provide a self-test s390/uaccess, locking/static_keys: employ static_branch_likely() x86, tsc, locking/static_keys: Employ static_branch_likely() locking/static_keys: Add selftest locking/static_keys: Add a new static_key interface locking/static_keys: Rework update logic locking/static_keys: Add static_key_{en,dis}able() helpers ...
107 lines
2.4 KiB
C
107 lines
2.4 KiB
C
/*
|
|
* Jump label s390 support
|
|
*
|
|
* Copyright IBM Corp. 2011
|
|
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
|
|
*/
|
|
#include <linux/module.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/stop_machine.h>
|
|
#include <linux/jump_label.h>
|
|
#include <asm/ipl.h>
|
|
|
|
#ifdef HAVE_JUMP_LABEL
|
|
|
|
struct insn {
|
|
u16 opcode;
|
|
s32 offset;
|
|
} __packed;
|
|
|
|
struct insn_args {
|
|
struct jump_entry *entry;
|
|
enum jump_label_type type;
|
|
};
|
|
|
|
static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn)
|
|
{
|
|
/* brcl 0,0 */
|
|
insn->opcode = 0xc004;
|
|
insn->offset = 0;
|
|
}
|
|
|
|
static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
|
|
{
|
|
/* brcl 15,offset */
|
|
insn->opcode = 0xc0f4;
|
|
insn->offset = (entry->target - entry->code) >> 1;
|
|
}
|
|
|
|
static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
|
|
struct insn *new)
|
|
{
|
|
unsigned char *ipc = (unsigned char *)entry->code;
|
|
unsigned char *ipe = (unsigned char *)expected;
|
|
unsigned char *ipn = (unsigned char *)new;
|
|
|
|
pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
|
|
pr_emerg("Found: %6ph\n", ipc);
|
|
pr_emerg("Expected: %6ph\n", ipe);
|
|
pr_emerg("New: %6ph\n", ipn);
|
|
panic("Corrupted kernel text");
|
|
}
|
|
|
|
static struct insn orignop = {
|
|
.opcode = 0xc004,
|
|
.offset = JUMP_LABEL_NOP_OFFSET >> 1,
|
|
};
|
|
|
|
static void __jump_label_transform(struct jump_entry *entry,
|
|
enum jump_label_type type,
|
|
int init)
|
|
{
|
|
struct insn old, new;
|
|
|
|
if (type == JUMP_LABEL_JMP) {
|
|
jump_label_make_nop(entry, &old);
|
|
jump_label_make_branch(entry, &new);
|
|
} else {
|
|
jump_label_make_branch(entry, &old);
|
|
jump_label_make_nop(entry, &new);
|
|
}
|
|
if (init) {
|
|
if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
|
|
jump_label_bug(entry, &orignop, &new);
|
|
} else {
|
|
if (memcmp((void *)entry->code, &old, sizeof(old)))
|
|
jump_label_bug(entry, &old, &new);
|
|
}
|
|
s390_kernel_write((void *)entry->code, &new, sizeof(new));
|
|
}
|
|
|
|
static int __sm_arch_jump_label_transform(void *data)
|
|
{
|
|
struct insn_args *args = data;
|
|
|
|
__jump_label_transform(args->entry, args->type, 0);
|
|
return 0;
|
|
}
|
|
|
|
void arch_jump_label_transform(struct jump_entry *entry,
|
|
enum jump_label_type type)
|
|
{
|
|
struct insn_args args;
|
|
|
|
args.entry = entry;
|
|
args.type = type;
|
|
|
|
stop_machine(__sm_arch_jump_label_transform, &args, NULL);
|
|
}
|
|
|
|
void arch_jump_label_transform_static(struct jump_entry *entry,
|
|
enum jump_label_type type)
|
|
{
|
|
__jump_label_transform(entry, type, 1);
|
|
}
|
|
|
|
#endif
|