Skip to content

Commit

Permalink
Merge branches 'doc.2017.08.17a', 'fixes.2017.08.17a', 'hotplug.2017.…
Browse files Browse the repository at this point in the history
…07.25b', 'misc.2017.08.17a', 'spin_unlock_wait_no.2017.08.17a', 'srcu.2017.07.27c' and 'torture.2017.07.24c' into HEAD

doc.2017.08.17a: Documentation updates.
fixes.2017.08.17a: RCU fixes.
hotplug.2017.07.25b: CPU-hotplug updates.
misc.2017.08.17a: Miscellaneous fixes outside of RCU (give or take conflicts).
spin_unlock_wait_no.2017.08.17a: Remove spin_unlock_wait().
srcu.2017.07.27c: SRCU updates.
torture.2017.07.24c: Torture-test updates.
  • Loading branch information
paulmck committed Aug 17, 2017
7 parents 850bf6d + 16c0b10 + 09efeee + 22e4ebb + 952111d + 35732cf + f34c858 commit 656e7c0
Show file tree
Hide file tree
Showing 75 changed files with 898 additions and 1,214 deletions.
2 changes: 1 addition & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -8621,7 +8621,7 @@ M: Mathieu Desnoyers <[email protected]>
M: "Paul E. McKenney" <[email protected]>
L: [email protected]
S: Supported
F: kernel/membarrier.c
F: kernel/sched/membarrier.c
F: include/uapi/linux/membarrier.h

MEMORY MANAGEMENT
Expand Down
5 changes: 0 additions & 5 deletions arch/alpha/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,6 @@
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}

static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.lock == 0;
Expand Down
5 changes: 0 additions & 5 deletions arch/arc/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,6 @@
#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, !VAL);
}

#ifdef CONFIG_ARC_HAS_LLSC

static inline void arch_spin_lock(arch_spinlock_t *lock)
Expand Down
16 changes: 0 additions & 16 deletions arch/arm/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,22 +52,6 @@ static inline void dsb_sev(void)
* memory.
*/

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
u16 owner = READ_ONCE(lock->tickets.owner);

for (;;) {
arch_spinlock_t tmp = READ_ONCE(*lock);

if (tmp.tickets.owner == tmp.tickets.next ||
tmp.tickets.owner != owner)
break;

wfe();
}
smp_acquire__after_ctrl_dep();
}

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

static inline void arch_spin_lock(arch_spinlock_t *lock)
Expand Down
58 changes: 5 additions & 53 deletions arch/arm64/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,58 +26,6 @@
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*/
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
unsigned int tmp;
arch_spinlock_t lockval;
u32 owner;

/*
* Ensure prior spin_lock operations to other locks have completed
* on this CPU before we test whether "lock" is locked.
*/
smp_mb();
owner = READ_ONCE(lock->owner) << 16;

asm volatile(
" sevl\n"
"1: wfe\n"
"2: ldaxr %w0, %2\n"
/* Is the lock free? */
" eor %w1, %w0, %w0, ror #16\n"
" cbz %w1, 3f\n"
/* Lock taken -- has there been a subsequent unlock->lock transition? */
" eor %w1, %w3, %w0, lsl #16\n"
" cbz %w1, 1b\n"
/*
* The owner has been updated, so there was an unlock->lock
* transition that we missed. That means we can rely on the
* store-release of the unlock operation paired with the
* load-acquire of the lock operation to publish any of our
* previous stores to the new lock owner and therefore don't
* need to bother with the writeback below.
*/
" b 4f\n"
"3:\n"
/*
* Serialise against any concurrent lockers by writing back the
* unlocked lock value
*/
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" stxr %w1, %w0, %2\n"
__nops(2),
/* LSE atomics */
" mov %w1, %w0\n"
" cas %w0, %w0, %2\n"
" eor %w1, %w1, %w0\n")
/* Somebody else wrote to the lock, GOTO 10 and reload the value */
" cbnz %w1, 2b\n"
"4:"
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
: "r" (owner)
: "memory");
}

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

Expand Down Expand Up @@ -176,7 +124,11 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)

static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
smp_mb(); /* See arch_spin_unlock_wait */
/*
* Ensure prior spin_lock operations to other locks have completed
* on this CPU before we test whether "lock" is locked.
*/
smp_mb(); /* ^^^ */
return !arch_spin_value_unlocked(READ_ONCE(*lock));
}

Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -360,6 +360,8 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
/*
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
* This full barrier is also required by the membarrier system
* call.
*/
dsb(ish);

Expand Down
5 changes: 0 additions & 5 deletions arch/blackfin/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__raw_spin_unlock_asm(&lock->lock);
}

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}

static inline int arch_read_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) > 0;
Expand Down
39 changes: 21 additions & 18 deletions arch/blackfin/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@
* Licensed under the GPL-2 or later
*/

#define pr_fmt(fmt) "module %s: " fmt, mod->name

#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
Expand All @@ -16,6 +14,11 @@
#include <asm/cacheflush.h>
#include <linux/uaccess.h>

#define mod_err(mod, fmt, ...) \
pr_err("module %s: " fmt, (mod)->name, ##__VA_ARGS__)
#define mod_debug(mod, fmt, ...) \
pr_debug("module %s: " fmt, (mod)->name, ##__VA_ARGS__)

/* Transfer the section to the L1 memory */
int
module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
Expand Down Expand Up @@ -44,7 +47,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_inst_sram_alloc(s->sh_size);
mod->arch.text_l1 = dest;
if (dest == NULL) {
pr_err("L1 inst memory allocation failed\n");
mod_err(mod, "L1 inst memory allocation failed\n");
return -1;
}
dma_memcpy(dest, (void *)s->sh_addr, s->sh_size);
Expand All @@ -56,7 +59,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_data_sram_alloc(s->sh_size);
mod->arch.data_a_l1 = dest;
if (dest == NULL) {
pr_err("L1 data memory allocation failed\n");
mod_err(mod, "L1 data memory allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
Expand All @@ -68,7 +71,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_data_sram_zalloc(s->sh_size);
mod->arch.bss_a_l1 = dest;
if (dest == NULL) {
pr_err("L1 data memory allocation failed\n");
mod_err(mod, "L1 data memory allocation failed\n");
return -1;
}

Expand All @@ -77,7 +80,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_data_B_sram_alloc(s->sh_size);
mod->arch.data_b_l1 = dest;
if (dest == NULL) {
pr_err("L1 data memory allocation failed\n");
mod_err(mod, "L1 data memory allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
Expand All @@ -87,7 +90,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_data_B_sram_alloc(s->sh_size);
mod->arch.bss_b_l1 = dest;
if (dest == NULL) {
pr_err("L1 data memory allocation failed\n");
mod_err(mod, "L1 data memory allocation failed\n");
return -1;
}
memset(dest, 0, s->sh_size);
Expand All @@ -99,7 +102,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l2_sram_alloc(s->sh_size);
mod->arch.text_l2 = dest;
if (dest == NULL) {
pr_err("L2 SRAM allocation failed\n");
mod_err(mod, "L2 SRAM allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
Expand All @@ -111,7 +114,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l2_sram_alloc(s->sh_size);
mod->arch.data_l2 = dest;
if (dest == NULL) {
pr_err("L2 SRAM allocation failed\n");
mod_err(mod, "L2 SRAM allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
Expand All @@ -123,7 +126,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l2_sram_zalloc(s->sh_size);
mod->arch.bss_l2 = dest;
if (dest == NULL) {
pr_err("L2 SRAM allocation failed\n");
mod_err(mod, "L2 SRAM allocation failed\n");
return -1;
}

Expand Down Expand Up @@ -157,8 +160,8 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
Elf32_Sym *sym;
unsigned long location, value, size;

pr_debug("applying relocate section %u to %u\n",
relsec, sechdrs[relsec].sh_info);
mod_debug(mod, "applying relocate section %u to %u\n",
relsec, sechdrs[relsec].sh_info);

for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
Expand All @@ -174,14 +177,14 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,

#ifdef CONFIG_SMP
if (location >= COREB_L1_DATA_A_START) {
pr_err("cannot relocate in L1: %u (SMP kernel)\n",
mod_err(mod, "cannot relocate in L1: %u (SMP kernel)\n",
ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
#endif

pr_debug("location is %lx, value is %lx type is %d\n",
location, value, ELF32_R_TYPE(rel[i].r_info));
mod_debug(mod, "location is %lx, value is %lx type is %d\n",
location, value, ELF32_R_TYPE(rel[i].r_info));

switch (ELF32_R_TYPE(rel[i].r_info)) {

Expand All @@ -200,12 +203,12 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
case R_BFIN_PCREL12_JUMP:
case R_BFIN_PCREL12_JUMP_S:
case R_BFIN_PCREL10:
pr_err("unsupported relocation: %u (no -mlong-calls?)\n",
mod_err(mod, "unsupported relocation: %u (no -mlong-calls?)\n",
ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;

default:
pr_err("unknown relocation: %u\n",
mod_err(mod, "unknown relocation: %u\n",
ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
Expand All @@ -222,7 +225,7 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
isram_memcpy((void *)location, &value, size);
break;
default:
pr_err("invalid relocation for %#lx\n", location);
mod_err(mod, "invalid relocation for %#lx\n", location);
return -ENOEXEC;
}
}
Expand Down
5 changes: 0 additions & 5 deletions arch/hexagon/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -179,11 +179,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
*/
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}

#define arch_spin_is_locked(x) ((x)->lock != 0)

#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
Expand Down
21 changes: 0 additions & 21 deletions arch/ia64/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,22 +76,6 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}

static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
{
int *p = (int *)&lock->lock, ticket;

ia64_invala();

for (;;) {
asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
return;
cpu_relax();
}

smp_acquire__after_ctrl_dep();
}

static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
Expand Down Expand Up @@ -143,11 +127,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
arch_spin_lock(lock);
}

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
__ticket_spin_unlock_wait(lock);
}

#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)

Expand Down
5 changes: 0 additions & 5 deletions arch/m32r/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,6 @@
#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, VAL > 0);
}

/**
* arch_spin_trylock - Try spin lock and return a result
* @lock: Pointer to the lock variable
Expand Down
5 changes: 0 additions & 5 deletions arch/metag/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,6 @@
* locked.
*/

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
Expand Down
5 changes: 0 additions & 5 deletions arch/mn10300/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,6 @@

#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, !VAL);
}

static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(
Expand Down
7 changes: 0 additions & 7 deletions arch/parisc/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,6 @@ static inline int arch_spin_is_locked(arch_spinlock_t *x)

#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)

static inline void arch_spin_unlock_wait(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);

smp_cond_load_acquire(a, VAL);
}

static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{
Expand Down
Loading

0 comments on commit 656e7c0

Please sign in to comment.