Commit 6d0a5984 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Fix typos in user-visible resctrl parameters, and also fix assembly
  constraint bugs that might result in miscompilation"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/asm: Use stricter assembly constraints in bitops
  x86/resctrl: Fix typos in the mba_sc mount option
parents 122c215b 5b77e95d
...@@ -36,16 +36,17 @@ ...@@ -36,16 +36,17 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/ */
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) #define RLONG_ADDR(x) "m" (*(volatile long *) (x))
#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
#define ADDR BITOP_ADDR(addr) #define ADDR RLONG_ADDR(addr)
/* /*
* We do the locked ops that don't return the old value as * We do the locked ops that don't return the old value as
* a mask operation on a byte. * a mask operation on a byte.
*/ */
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) #define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
#define CONST_MASK(nr) (1 << ((nr) & 7)) #define CONST_MASK(nr) (1 << ((nr) & 7))
/** /**
...@@ -73,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr) ...@@ -73,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
: "memory"); : "memory");
} else { } else {
asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
: BITOP_ADDR(addr) : "Ir" (nr) : "memory"); : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
} }
} }
...@@ -88,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr) ...@@ -88,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
*/ */
static __always_inline void __set_bit(long nr, volatile unsigned long *addr) static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
{ {
asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
} }
/** /**
...@@ -110,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr) ...@@ -110,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
: "iq" ((u8)~CONST_MASK(nr))); : "iq" ((u8)~CONST_MASK(nr)));
} else { } else {
asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
: BITOP_ADDR(addr) : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
: "Ir" (nr));
} }
} }
...@@ -131,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad ...@@ -131,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
{ {
asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
} }
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
...@@ -139,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile ...@@ -139,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
bool negative; bool negative;
asm volatile(LOCK_PREFIX "andb %2,%1" asm volatile(LOCK_PREFIX "andb %2,%1"
CC_SET(s) CC_SET(s)
: CC_OUT(s) (negative), ADDR : CC_OUT(s) (negative), WBYTE_ADDR(addr)
: "ir" ((char) ~(1 << nr)) : "memory"); : "ir" ((char) ~(1 << nr)) : "memory");
return negative; return negative;
} }
...@@ -155,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile ...@@ -155,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
* __clear_bit() is non-atomic and implies release semantics before the memory * __clear_bit() is non-atomic and implies release semantics before the memory
* operation. It can be used for an unlock if no other CPUs can concurrently * operation. It can be used for an unlock if no other CPUs can concurrently
* modify other bits in the word. * modify other bits in the word.
*
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/ */
static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{ {
barrier();
__clear_bit(nr, addr); __clear_bit(nr, addr);
} }
...@@ -176,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long * ...@@ -176,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
*/ */
static __always_inline void __change_bit(long nr, volatile unsigned long *addr) static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
{ {
asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
} }
/** /**
...@@ -196,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) ...@@ -196,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
: "iq" ((u8)CONST_MASK(nr))); : "iq" ((u8)CONST_MASK(nr)));
} else { } else {
asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
: BITOP_ADDR(addr) : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
: "Ir" (nr));
} }
} }
...@@ -242,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * ...@@ -242,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
asm(__ASM_SIZE(bts) " %2,%1" asm(__ASM_SIZE(bts) " %2,%1"
CC_SET(c) CC_SET(c)
: CC_OUT(c) (oldbit), ADDR : CC_OUT(c) (oldbit)
: "Ir" (nr)); : ADDR, "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
...@@ -282,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long ...@@ -282,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
asm volatile(__ASM_SIZE(btr) " %2,%1" asm volatile(__ASM_SIZE(btr) " %2,%1"
CC_SET(c) CC_SET(c)
: CC_OUT(c) (oldbit), ADDR : CC_OUT(c) (oldbit)
: "Ir" (nr)); : ADDR, "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
...@@ -294,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon ...@@ -294,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
asm volatile(__ASM_SIZE(btc) " %2,%1" asm volatile(__ASM_SIZE(btc) " %2,%1"
CC_SET(c) CC_SET(c)
: CC_OUT(c) (oldbit), ADDR : CC_OUT(c) (oldbit)
: "Ir" (nr) : "memory"); : ADDR, "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
...@@ -326,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l ...@@ -326,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
asm volatile(__ASM_SIZE(bt) " %2,%1" asm volatile(__ASM_SIZE(bt) " %2,%1"
CC_SET(c) CC_SET(c)
: CC_OUT(c) (oldbit) : CC_OUT(c) (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr)); : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
......
...@@ -2039,14 +2039,14 @@ static int rdt_get_tree(struct fs_context *fc) ...@@ -2039,14 +2039,14 @@ static int rdt_get_tree(struct fs_context *fc)
enum rdt_param { enum rdt_param {
Opt_cdp, Opt_cdp,
Opt_cdpl2, Opt_cdpl2,
Opt_mba_mpbs, Opt_mba_mbps,
nr__rdt_params nr__rdt_params
}; };
static const struct fs_parameter_spec rdt_param_specs[] = { static const struct fs_parameter_spec rdt_param_specs[] = {
fsparam_flag("cdp", Opt_cdp), fsparam_flag("cdp", Opt_cdp),
fsparam_flag("cdpl2", Opt_cdpl2), fsparam_flag("cdpl2", Opt_cdpl2),
fsparam_flag("mba_mpbs", Opt_mba_mpbs), fsparam_flag("mba_MBps", Opt_mba_mbps),
{} {}
}; };
...@@ -2072,7 +2072,7 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) ...@@ -2072,7 +2072,7 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
case Opt_cdpl2: case Opt_cdpl2:
ctx->enable_cdpl2 = true; ctx->enable_cdpl2 = true;
return 0; return 0;
case Opt_mba_mpbs: case Opt_mba_mbps:
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -EINVAL; return -EINVAL;
ctx->enable_mba_mbps = true; ctx->enable_mba_mbps = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment