diff options
Diffstat (limited to 'arch/arc')
-rw-r--r-- | arch/arc/Makefile | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/atomic.h | 99 | ||||
-rw-r--r-- | arch/arc/include/asm/pgtable.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/spinlock.h | 7 | ||||
-rw-r--r-- | arch/arc/kernel/setup.c | 1 | ||||
-rw-r--r-- | arch/arc/mm/dma.c | 3 | ||||
-rw-r--r-- | arch/arc/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/arc/mm/ioremap.c | 2 |
8 files changed, 99 insertions, 21 deletions
diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 85814e74677d..601ed173080b 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -74,9 +74,7 @@ endif ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE # Generic build system uses -O2, we want -O3 # Note: No need to add to cflags-y as that happens anyways -# -# Disable the false maybe-uninitialized warings gcc spits out at -O3 -ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,) +ARCH_CFLAGS += -O3 endif # small data is default for elf32 tool-chain. If not usable, disable it diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index dd683995bc9d..4e3c1b6b0806 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -67,6 +67,33 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return val; \ } +#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned int val, orig; \ + \ + /* \ + * Explicit full memory barrier needed before/after as \ + * LLOCK/SCOND thmeselves don't provide any such semantics \ + */ \ + smp_mb(); \ + \ + __asm__ __volatile__( \ + "1: llock %[orig], [%[ctr]] \n" \ + " " #asm_op " %[val], %[orig], %[i] \n" \ + " scond %[val], [%[ctr]] \n" \ + " \n" \ + : [val] "=&r" (val), \ + [orig] "=&r" (orig) \ + : [ctr] "r" (&v->counter), \ + [i] "ir" (i) \ + : "cc"); \ + \ + smp_mb(); \ + \ + return orig; \ +} + #else /* !CONFIG_ARC_HAS_LLSC */ #ifndef CONFIG_SMP @@ -129,25 +156,44 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return temp; \ } +#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + unsigned long orig; \ + \ + /* \ + * spin lock/unlock provides the needed smp_mb() before/after \ + */ \ + atomic_ops_lock(flags); \ + orig = v->counter; \ + v->counter c_op i; \ + atomic_ops_unlock(flags); \ + \ + return orig; \ +} + #endif /* !CONFIG_ARC_HAS_LLSC */ #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ - ATOMIC_OP_RETURN(op, c_op, asm_op) + ATOMIC_OP_RETURN(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) #define atomic_andnot atomic_andnot -ATOMIC_OP(and, &=, and) -ATOMIC_OP(andnot, &= ~, bic) -ATOMIC_OP(or, |=, or) -ATOMIC_OP(xor, ^=, xor) +#undef ATOMIC_OPS +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) -#undef SCOND_FAIL_RETRY_VAR_DEF -#undef SCOND_FAIL_RETRY_ASM -#undef SCOND_FAIL_RETRY_VARS +ATOMIC_OPS(and, &=, and) +ATOMIC_OPS(andnot, &= ~, bic) +ATOMIC_OPS(or, |=, or) +ATOMIC_OPS(xor, ^=, xor) #else /* CONFIG_ARC_PLAT_EZNPS */ @@ -208,22 +254,51 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return temp; \ } +#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned int temp = i; \ + \ + /* Explicit full memory barrier needed before/after */ \ + smp_mb(); \ + \ + __asm__ __volatile__( \ + " mov r2, %0\n" \ + " mov r3, %1\n" \ + " .word %2\n" \ + " mov %0, r2" \ + : "+r"(temp) \ + : "r"(&v->counter), "i"(asm_op) \ + : "r2", "r3", "memory"); \ + \ + smp_mb(); \ + \ + return temp; \ +} + #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ - ATOMIC_OP_RETURN(op, c_op, asm_op) + ATOMIC_OP_RETURN(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3) #define atomic_sub(i, v) atomic_add(-(i), (v)) #define atomic_sub_return(i, v) atomic_add_return(-(i), (v)) -ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3) +#undef ATOMIC_OPS +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) + +ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3) #define atomic_andnot(mask, v) atomic_and(~(mask), (v)) -ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3) -ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) +ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3) +ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) #endif /* CONFIG_ARC_PLAT_EZNPS */ #undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 858f98ef7f1b..0f92d97432a2 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -110,7 +110,7 @@ #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE) /* Set of bits not changed in pte_modify */ -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL) /* More Abbrevaited helpers */ #define PAGE_U_NONE __pgprot(___DEF) diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index cded4a9b5438..233d5ffe6ec7 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -15,8 +15,11 @@ #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -#define arch_spin_unlock_wait(x) \ - do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) + +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) +{ + smp_cond_load_acquire(&lock->slock, !VAL); +} #ifdef CONFIG_ARC_HAS_LLSC diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 2ee7a4d758a8..5f8c0b47045a 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/cpu.h> #include <linux/of_fdt.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/cache.h> #include <asm/sections.h> diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 73d7e4c75b7d..ab74b5d9186c 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -92,7 +92,8 @@ static void *arc_dma_alloc(struct device *dev, size_t size, static void arc_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { - struct page *page = virt_to_page(dma_handle); + phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle); + struct page *page = virt_to_page(paddr); int is_non_coh = 1; is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) || diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index af63f4a13e60..e94e5aa33985 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -137,7 +137,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */ if (unlikely(fatal_signal_pending(current))) { diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c index 49b8abd1115c..f52b7db67fd3 100644 --- a/arch/arc/mm/ioremap.c +++ b/arch/arc/mm/ioremap.c @@ -49,7 +49,7 @@ EXPORT_SYMBOL(ioremap); /* * ioremap with access flags * Cache semantics wise it is same as ioremap - "forced" uncached. - * However unline vanilla ioremap which bypasses ARC MMU for addresses in + * However unlike vanilla ioremap which bypasses ARC MMU for addresses in * ARC hardware uncached region, this one still goes thru the MMU as caller * might need finer access control (R/W/X) */ |