Guest User

meltdown

a guest
Jan 3rd, 2018
578
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 196.26 KB | None | 0 0
  1. diff -upr linux-3.10.0-693.11.1.el7/arch/arm64/include/asm/barrier.h linux-3.10.0-693.11.6.el7/arch/arm64/include/asm/barrier.h
  2. --- linux-3.10.0-693.11.1.el7/arch/arm64/include/asm/barrier.h 2017-10-27 11:14:15.000000000 +0200
  3. +++ linux-3.10.0-693.11.6.el7/arch/arm64/include/asm/barrier.h 2017-12-28 19:59:43.000000000 +0100
  4. @@ -44,6 +44,8 @@
  5. #define smp_wmb() asm volatile("dmb ishst" : : : "memory")
  6. #endif
  7.  
  8. +#define gmb() do { } while (0)
  9. +
  10. #define read_barrier_depends() do { } while(0)
  11. #define smp_read_barrier_depends() do { } while(0)
  12.  
  13. diff -upr linux-3.10.0-693.11.1.el7/arch/powerpc/include/asm/barrier.h linux-3.10.0-693.11.6.el7/arch/powerpc/include/asm/barrier.h
  14. --- linux-3.10.0-693.11.1.el7/arch/powerpc/include/asm/barrier.h 2017-10-27 11:14:15.000000000 +0200
  15. +++ linux-3.10.0-693.11.6.el7/arch/powerpc/include/asm/barrier.h 2017-12-28 19:59:43.000000000 +0100
  16. @@ -34,6 +34,8 @@
  17. #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
  18. #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
  19.  
  20. +#define gmb() do { } while (0)
  21. +
  22. #define set_mb(var, value) do { var = value; mb(); } while (0)
  23.  
  24. #ifdef __SUBARCH_HAS_LWSYNC
  25. diff -upr linux-3.10.0-693.11.1.el7/arch/s390/include/asm/barrier.h linux-3.10.0-693.11.6.el7/arch/s390/include/asm/barrier.h
  26. --- linux-3.10.0-693.11.1.el7/arch/s390/include/asm/barrier.h 2017-10-27 11:14:15.000000000 +0200
  27. +++ linux-3.10.0-693.11.6.el7/arch/s390/include/asm/barrier.h 2017-12-28 19:59:43.000000000 +0100
  28. @@ -30,6 +30,8 @@
  29. #define smp_rmb() rmb()
  30. #define smp_wmb() wmb()
  31.  
  32. +#define gmb() do { } while (0)
  33. +
  34. #define read_barrier_depends() do { } while (0)
  35. #define smp_read_barrier_depends() do { } while (0)
  36.  
  37. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/boot/compressed/misc.h linux-3.10.0-693.11.6.el7/arch/x86/boot/compressed/misc.h
  38. --- linux-3.10.0-693.11.1.el7/arch/x86/boot/compressed/misc.h 2017-10-27 11:14:15.000000000 +0200
  39. +++ linux-3.10.0-693.11.6.el7/arch/x86/boot/compressed/misc.h 2017-12-28 19:59:43.000000000 +0100
  40. @@ -7,6 +7,7 @@
  41. * we just keep it from happening
  42. */
  43. #undef CONFIG_PARAVIRT
  44. +#undef CONFIG_KAISER
  45. #ifdef CONFIG_X86_32
  46. #define _ASM_X86_DESC_H 1
  47. #endif
  48. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/events/intel/ds.c linux-3.10.0-693.11.6.el7/arch/x86/events/intel/ds.c
  49. --- linux-3.10.0-693.11.1.el7/arch/x86/events/intel/ds.c 2017-10-27 11:14:15.000000000 +0200
  50. +++ linux-3.10.0-693.11.6.el7/arch/x86/events/intel/ds.c 2017-12-28 19:59:43.000000000 +0100
  51. @@ -1,12 +1,16 @@
  52. #include <linux/bitops.h>
  53. #include <linux/types.h>
  54. #include <linux/slab.h>
  55. +#include <linux/kaiser.h>
  56.  
  57. #include <asm/perf_event.h>
  58. #include <asm/insn.h>
  59.  
  60. #include "../perf_event.h"
  61.  
  62. +static
  63. +DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct debug_store, cpu_debug_store);
  64. +
  65. /* The size of a BTS record in bytes: */
  66. #define BTS_RECORD_SIZE 24
  67.  
  68. @@ -268,6 +272,31 @@ void fini_debug_store_on_cpu(int cpu)
  69.  
  70. static DEFINE_PER_CPU(void *, insn_buffer);
  71.  
  72. +static void *dsalloc(size_t size, gfp_t flags, int node)
  73. +{
  74. + unsigned int order = get_order(size);
  75. + struct page *page;
  76. + unsigned long addr;
  77. +
  78. + page = alloc_pages_node(node, flags | __GFP_ZERO, order);
  79. + if (!page)
  80. + return NULL;
  81. + addr = (unsigned long)page_address(page);
  82. + if (kaiser_add_mapping(addr, size, __PAGE_KERNEL | _PAGE_GLOBAL) < 0) {
  83. + __free_pages(page, order);
  84. + addr = 0;
  85. + }
  86. + return (void *)addr;
  87. +}
  88. +
  89. +static void dsfree(const void *buffer, size_t size)
  90. +{
  91. + if (!buffer)
  92. + return;
  93. + kaiser_remove_mapping((unsigned long)buffer, size);
  94. + free_pages((unsigned long)buffer, get_order(size));
  95. +}
  96. +
  97. static int alloc_pebs_buffer(int cpu)
  98. {
  99. struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
  100. @@ -278,7 +307,7 @@ static int alloc_pebs_buffer(int cpu)
  101. if (!x86_pmu.pebs)
  102. return 0;
  103.  
  104. - buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
  105. + buffer = dsalloc(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
  106. if (unlikely(!buffer))
  107. return -ENOMEM;
  108.  
  109. @@ -289,7 +318,7 @@ static int alloc_pebs_buffer(int cpu)
  110. if (x86_pmu.intel_cap.pebs_format < 2) {
  111. ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
  112. if (!ibuffer) {
  113. - kfree(buffer);
  114. + dsfree(buffer, x86_pmu.pebs_buffer_size);
  115. return -ENOMEM;
  116. }
  117. per_cpu(insn_buffer, cpu) = ibuffer;
  118. @@ -315,7 +344,8 @@ static void release_pebs_buffer(int cpu)
  119. kfree(per_cpu(insn_buffer, cpu));
  120. per_cpu(insn_buffer, cpu) = NULL;
  121.  
  122. - kfree((void *)(unsigned long)ds->pebs_buffer_base);
  123. + dsfree((void *)(unsigned long)ds->pebs_buffer_base,
  124. + x86_pmu.pebs_buffer_size);
  125. ds->pebs_buffer_base = 0;
  126. }
  127.  
  128. @@ -329,7 +359,7 @@ static int alloc_bts_buffer(int cpu)
  129. if (!x86_pmu.bts)
  130. return 0;
  131.  
  132. - buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
  133. + buffer = dsalloc(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
  134. if (unlikely(!buffer)) {
  135. WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
  136. return -ENOMEM;
  137. @@ -355,19 +385,15 @@ static void release_bts_buffer(int cpu)
  138. if (!ds || !x86_pmu.bts)
  139. return;
  140.  
  141. - kfree((void *)(unsigned long)ds->bts_buffer_base);
  142. + dsfree((void *)(unsigned long)ds->bts_buffer_base, BTS_BUFFER_SIZE);
  143. ds->bts_buffer_base = 0;
  144. }
  145.  
  146. static int alloc_ds_buffer(int cpu)
  147. {
  148. - int node = cpu_to_node(cpu);
  149. - struct debug_store *ds;
  150. -
  151. - ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
  152. - if (unlikely(!ds))
  153. - return -ENOMEM;
  154. + struct debug_store *ds = per_cpu_ptr(&cpu_debug_store, cpu);
  155.  
  156. + memset(ds, 0, sizeof(*ds));
  157. per_cpu(cpu_hw_events, cpu).ds = ds;
  158.  
  159. return 0;
  160. @@ -381,7 +407,6 @@ static void release_ds_buffer(int cpu)
  161. return;
  162.  
  163. per_cpu(cpu_hw_events, cpu).ds = NULL;
  164. - kfree(ds);
  165. }
  166.  
  167. void release_ds_buffers(void)
  168. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/events/intel/uncore.c linux-3.10.0-693.11.6.el7/arch/x86/events/intel/uncore.c
  169. --- linux-3.10.0-693.11.1.el7/arch/x86/events/intel/uncore.c 2017-10-27 11:14:15.000000000 +0200
  170. +++ linux-3.10.0-693.11.6.el7/arch/x86/events/intel/uncore.c 2017-12-28 19:59:43.000000000 +0100
  171. @@ -840,7 +840,7 @@ static int __init uncore_type_init(struc
  172. pmus[i].type = type;
  173. pmus[i].boxes = kzalloc(size, GFP_KERNEL);
  174. if (!pmus[i].boxes)
  175. - return -ENOMEM;
  176. + goto err;
  177. }
  178.  
  179. type->pmus = pmus;
  180. @@ -854,7 +854,7 @@ static int __init uncore_type_init(struc
  181. attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
  182. sizeof(*attr_group), GFP_KERNEL);
  183. if (!attr_group)
  184. - return -ENOMEM;
  185. + goto err;
  186.  
  187. attrs = (struct attribute **)(attr_group + 1);
  188. attr_group->name = "events";
  189. @@ -867,7 +867,15 @@ static int __init uncore_type_init(struc
  190. }
  191.  
  192. type->pmu_group = &uncore_pmu_attr_group;
  193. +
  194. return 0;
  195. +
  196. +err:
  197. + for (i = 0; i < type->num_boxes; i++)
  198. + kfree(pmus[i].boxes);
  199. + kfree(pmus);
  200. +
  201. + return -ENOMEM;
  202. }
  203.  
  204. static int __init
  205. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/ia32/ia32entry.S linux-3.10.0-693.11.6.el7/arch/x86/ia32/ia32entry.S
  206. --- linux-3.10.0-693.11.1.el7/arch/x86/ia32/ia32entry.S 2017-10-27 11:14:15.000000000 +0200
  207. +++ linux-3.10.0-693.11.6.el7/arch/x86/ia32/ia32entry.S 2017-12-28 19:59:43.000000000 +0100
  208. @@ -15,8 +15,10 @@
  209. #include <asm/irqflags.h>
  210. #include <asm/asm.h>
  211. #include <asm/smap.h>
  212. +#include <asm/spec_ctrl.h>
  213. #include <linux/linkage.h>
  214. #include <linux/err.h>
  215. +#include "../kernel/kaiser.h"
  216.  
  217. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  218. #include <linux/elf-em.h>
  219. @@ -121,8 +123,12 @@ ENTRY(ia32_sysenter_target)
  220. CFI_DEF_CFA rsp,0
  221. CFI_REGISTER rsp,rbp
  222. SWAPGS_UNSAFE_STACK
  223. + SWITCH_TO_KERNEL_CR3
  224. movq PER_CPU_VAR(kernel_stack), %rsp
  225. addq $(KERNEL_STACK_OFFSET),%rsp
  226. + ENABLE_IBRS
  227. + CLEAR_R8_TO_R15
  228. + STUFF_RSB /* no ret allowed before stuffing the RSB */
  229. /*
  230. * No need to follow this irqs on/off section: the syscall
  231. * disabled irqs, here we enable it straight after entry:
  232. @@ -167,6 +173,7 @@ sysenter_dispatch:
  233. testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
  234. jnz sysexit_audit
  235. sysexit_from_sys_call:
  236. + DISABLE_IBRS
  237. andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
  238. /* clear IF, that popfq doesn't enable interrupts early */
  239. andl $~0x200,EFLAGS-R11(%rsp)
  240. @@ -182,6 +189,7 @@ sysexit_from_sys_call:
  241. popq_cfi %rcx /* User %esp */
  242. CFI_REGISTER rsp,rcx
  243. TRACE_IRQS_ON
  244. + SWITCH_TO_USER_CR3
  245. ENABLE_INTERRUPTS_SYSEXIT32
  246.  
  247. #ifdef CONFIG_AUDITSYSCALL
  248. @@ -280,14 +288,16 @@ ENTRY(ia32_cstar_target)
  249. CFI_REGISTER rip,rcx
  250. /*CFI_REGISTER rflags,r11*/
  251. SWAPGS_UNSAFE_STACK
  252. + SWITCH_TO_KERNEL_CR3
  253. movl %esp,%r8d
  254. CFI_REGISTER rsp,r8
  255. movq PER_CPU_VAR(kernel_stack),%rsp
  256. + ENABLE_IBRS
  257. + STUFF_RSB /* no ret allowed before stuffing the RSB */
  258. /*
  259. * No need to follow this irqs on/off section: the syscall
  260. * disabled irqs and here we enable it straight after entry:
  261. */
  262. - ENABLE_INTERRUPTS(CLBR_NONE)
  263. SAVE_ARGS 8,0,0
  264. movl %eax,%eax /* zero extension */
  265. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  266. @@ -300,11 +310,14 @@ ENTRY(ia32_cstar_target)
  267. movq %r11,EFLAGS-ARGOFFSET(%rsp)
  268. /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
  269. movq %r8,RSP-ARGOFFSET(%rsp)
  270. + CLEAR_R8_TO_R15
  271. + ENABLE_INTERRUPTS(CLBR_NONE)
  272. CFI_REL_OFFSET rsp,RSP-ARGOFFSET
  273. /* no need to do an access_ok check here because r8 has been
  274. 32bit zero extended */
  275. /* hardware stack frame is complete now */
  276. ASM_STAC
  277. + movq RSP-ARGOFFSET(%rsp), %r8
  278. 1: movl (%r8),%r9d
  279. _ASM_EXTABLE(1b,ia32_badarg)
  280. ASM_CLAC
  281. @@ -324,6 +337,7 @@ cstar_dispatch:
  282. testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
  283. jnz sysretl_audit
  284. sysretl_from_sys_call:
  285. + DISABLE_IBRS
  286. andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
  287. RESTORE_ARGS 0,-ARG_SKIP,0,0,0
  288. movl RIP-ARGOFFSET(%rsp),%ecx
  289. @@ -336,6 +350,7 @@ sysretl_from_sys_call:
  290. TRACE_IRQS_ON
  291. movl RSP-ARGOFFSET(%rsp),%esp
  292. CFI_RESTORE rsp
  293. + SWITCH_TO_USER_CR3
  294. USERGS_SYSRET32
  295.  
  296. #ifdef CONFIG_AUDITSYSCALL
  297. @@ -408,6 +423,47 @@ ENTRY(ia32_syscall)
  298. PARAVIRT_ADJUST_EXCEPTION_FRAME
  299. ASM_CLAC /* Do this early to minimize exposure */
  300. SWAPGS
  301. + SWITCH_TO_KERNEL_CR3
  302. +
  303. + /*
  304. + * We could remove the boundary checks, but this is more flexible
  305. + * in case we need to change TSS_sp0 during context switch and
  306. + * point it to the real kernel stack if booted in Xen PV.
  307. + */
  308. + cld
  309. + movq %rax, %r8
  310. + movq %rcx, %r9
  311. + /* Check to see if we're on the trampoline stack. */
  312. + movq PER_CPU_VAR(init_tss + TSS_sp0), %rcx
  313. + cmpq %rcx, %rsp
  314. + ja 1f
  315. + leaq -TSS_stack_size(%rcx), %rax
  316. + cmpq %rsp, %rax
  317. + ja 1f
  318. + /*
  319. + * We're on the trampoline stack. Copy the trampoline stack's
  320. + * contents to the kernel task stack and switch to it.
  321. + */
  322. + pushq %rdi
  323. + pushq %rsi
  324. + subq %rsp, %rcx
  325. + movq PER_CPU_VAR(kernel_stack), %rdi
  326. + addq $KERNEL_STACK_OFFSET, %rdi
  327. + subq %rcx, %rdi
  328. + movq %rdi, %rax
  329. + movq %rsp, %rsi
  330. + rep movsb
  331. + movq %rax, %rsp
  332. + popq %rsi
  333. + popq %rdi
  334. +1:
  335. + movq %r8, %rax
  336. + movq %r9, %rcx
  337. +
  338. + ENABLE_IBRS
  339. + CLEAR_R8_TO_R15
  340. + STUFF_RSB /* no ret allowed before stuffing the RSB */
  341. +
  342. /*
  343. * No need to follow this irqs on/off section: the syscall
  344. * disabled irqs and here we enable it straight after entry:
  345. @@ -415,7 +471,6 @@ ENTRY(ia32_syscall)
  346. ENABLE_INTERRUPTS(CLBR_NONE)
  347. movl %eax,%eax
  348. pushq_cfi %rax
  349. - cld
  350. /* note the registers are not zero extended to the sf.
  351. this could be a problem. */
  352. SAVE_ARGS 0,1,0
  353. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/barrier.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/barrier.h
  354. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/barrier.h 2017-10-27 11:14:15.000000000 +0200
  355. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/barrier.h 2017-12-28 19:59:43.000000000 +0100
  356. @@ -24,6 +24,8 @@
  357. #define wmb() asm volatile("sfence" ::: "memory")
  358. #endif
  359.  
  360. +#define gmb() alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC)
  361. +
  362. #ifdef CONFIG_X86_PPRO_FENCE
  363. #define dma_rmb() rmb()
  364. #else
  365. @@ -100,7 +102,6 @@ do { \
  366. */
  367. static __always_inline void rdtsc_barrier(void)
  368. {
  369. - alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
  370. alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
  371. }
  372.  
  373. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/cpufeature.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/cpufeature.h
  374. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/cpufeature.h 2017-10-27 11:14:15.000000000 +0200
  375. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/cpufeature.h 2017-12-28 19:59:43.000000000 +0100
  376. @@ -93,7 +93,7 @@
  377. #define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */
  378. #define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */
  379. #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */
  380. -#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */
  381. +
  382. #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
  383. #define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
  384. #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
  385. @@ -191,6 +191,7 @@
  386. #define X86_FEATURE_CAT_L3 (7*32+ 4) /* Cache Allocation Technology L3 */
  387. #define X86_FEATURE_CAT_L2 (7*32+ 5) /* Cache Allocation Technology L2 */
  388. #define X86_FEATURE_CDP_L3 (7*32+ 6) /* Code and Data Prioritization L3 */
  389. +#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
  390.  
  391. #define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */
  392. #define X86_FEATURE_PROC_FEEDBACK (7*32+ 9) /* AMD ProcFeedbackInterface */
  393. @@ -198,7 +199,8 @@
  394. #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
  395. #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
  396. #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
  397. -#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
  398. +#define X86_FEATURE_SPEC_CTRL ( 7*32+19) /* Control Speculation Control */
  399. +#define X86_FEATURE_IBPB_SUPPORT ( 7*32+20) /* Indirect Branch Prediction Barrier Support */
  400.  
  401. /* Virtualization flags: Linux defined, word 8 */
  402. #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
  403. @@ -284,6 +286,7 @@
  404.  
  405. /* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
  406. #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
  407. +#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
  408.  
  409. /*
  410. * BUG word(s)
  411. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/desc.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/desc.h
  412. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/desc.h 2017-10-27 11:14:15.000000000 +0200
  413. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/desc.h 2017-12-28 19:59:43.000000000 +0100
  414. @@ -43,7 +43,7 @@ struct gdt_page {
  415. struct desc_struct gdt[GDT_ENTRIES];
  416. } __attribute__((aligned(PAGE_SIZE)));
  417.  
  418. -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
  419. +DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct gdt_page, gdt_page);
  420.  
  421. static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
  422. {
  423. Только в linux-3.10.0-693.11.6.el7/arch/x86/include/asm: kaiser.h
  424. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/kvm_host.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/kvm_host.h
  425. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/kvm_host.h 2017-10-27 11:14:15.000000000 +0200
  426. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/kvm_host.h 2017-12-28 19:59:43.000000000 +0100
  427. @@ -32,6 +32,7 @@
  428. #include <asm/msr-index.h>
  429. #include <asm/asm.h>
  430. #include <asm/kvm_page_track.h>
  431. +#include <asm/spec_ctrl.h>
  432.  
  433. #define KVM_MAX_VCPUS 384
  434. #define KVM_SOFT_MAX_VCPUS 384
  435. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/mmu_context.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/mmu_context.h
  436. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/mmu_context.h 2017-10-27 11:14:15.000000000 +0200
  437. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/mmu_context.h 2017-12-28 19:59:43.000000000 +0100
  438. @@ -7,6 +7,7 @@
  439. #include <asm/tlbflush.h>
  440. #include <asm/paravirt.h>
  441. #include <asm/mpx.h>
  442. +#include <asm/spec_ctrl.h>
  443. #ifndef CONFIG_PARAVIRT
  444. #include <asm-generic/mm_hooks.h>
  445.  
  446. @@ -31,6 +32,11 @@ static inline void enter_lazy_tlb(struct
  447. #endif
  448. }
  449.  
  450. +static inline void load_cr3(pgd_t *pgdir)
  451. +{
  452. + __load_cr3(__pa(pgdir));
  453. +}
  454. +
  455. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  456. struct task_struct *tsk)
  457. {
  458. @@ -43,6 +49,12 @@ static inline void switch_mm(struct mm_s
  459. #endif
  460. cpumask_set_cpu(cpu, mm_cpumask(next));
  461.  
  462. +#ifndef CONFIG_PREEMPT_RCU
  463. + spec_ctrl_ibpb_if_different_creds(tsk);
  464. +#else
  465. + spec_ctrl_ibpb();
  466. +#endif
  467. +
  468. /*
  469. * Re-load page tables.
  470. *
  471. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/msr.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/msr.h
  472. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/msr.h 2017-10-27 11:14:15.000000000 +0200
  473. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/msr.h 2017-12-28 19:59:43.000000000 +0100
  474. @@ -154,7 +154,6 @@ static __always_inline unsigned long lon
  475. * that some other imaginary CPU is updating continuously with a
  476. * time stamp.
  477. */
  478. - alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
  479. alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
  480.  
  481. return rdtsc();
  482. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/msr-index.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/msr-index.h
  483. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/msr-index.h 2017-10-27 11:14:15.000000000 +0200
  484. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/msr-index.h 2017-12-28 19:59:43.000000000 +0100
  485. @@ -32,6 +32,10 @@
  486. #define EFER_FFXSR (1<<_EFER_FFXSR)
  487.  
  488. /* Intel MSRs. Some also available on other CPUs */
  489. +
  490. +#define MSR_IA32_SPEC_CTRL 0x00000048
  491. +#define MSR_IA32_PRED_CMD 0x00000049
  492. +
  493. #define MSR_IA32_PERFCTR0 0x000000c1
  494. #define MSR_IA32_PERFCTR1 0x000000c2
  495. #define MSR_FSB_FREQ 0x000000cd
  496. @@ -316,6 +320,9 @@
  497. #define MSR_F15H_PERF_CTR 0xc0010201
  498. #define MSR_F15H_NB_PERF_CTL 0xc0010240
  499. #define MSR_F15H_NB_PERF_CTR 0xc0010241
  500. +#define MSR_F15H_PTSC 0xc0010280
  501. +#define MSR_F15H_IC_CFG 0xc0011021
  502. +#define MSR_F15H_IC_CFG_DIS_IND BIT_ULL(14)
  503.  
  504. /* Fam 10h MSRs */
  505. #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
  506. @@ -325,6 +332,8 @@
  507. #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
  508. #define FAM10H_MMIO_CONF_BASE_SHIFT 20
  509. #define MSR_FAM10H_NODE_ID 0xc001100c
  510. +#define MSR_F10H_DECFG 0xc0011029
  511. +#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
  512.  
  513. /* K8 MSRs */
  514. #define MSR_K8_TOP_MEM1 0xc001001a
  515. @@ -406,6 +415,8 @@
  516. #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
  517. #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
  518. #define FEATURE_CONTROL_LMCE (1<<20)
  519. +#define FEATURE_ENABLE_IBRS (1<<0)
  520. +#define FEATURE_SET_IBPB (1<<0)
  521.  
  522. #define MSR_IA32_APICBASE 0x0000001b
  523. #define MSR_IA32_APICBASE_BSP (1<<8)
  524. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/mwait.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/mwait.h
  525. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/mwait.h 2017-10-27 11:14:15.000000000 +0200
  526. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/mwait.h 2017-12-28 19:59:43.000000000 +0100
  527. @@ -2,6 +2,7 @@
  528. #define _ASM_X86_MWAIT_H
  529.  
  530. #include <linux/sched.h>
  531. +#include <asm/spec_ctrl.h>
  532.  
  533. #define MWAIT_SUBSTATE_MASK 0xf
  534. #define MWAIT_CSTATE_MASK 0xf
  535. @@ -46,9 +47,16 @@ static inline void mwait_idle_with_hints
  536. if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
  537. clflush((void *)&current_thread_info()->flags);
  538.  
  539. + /*
  540. + * IRQs must be disabled here and nmi uses the
  541. + * save_paranoid model which always enables ibrs on
  542. + * exception entry before any indirect jump can run.
  543. + */
  544. + spec_ctrl_disable_ibrs();
  545. __monitor((void *)&current_thread_info()->flags, 0, 0);
  546. if (!need_resched())
  547. __mwait(eax, ecx);
  548. + spec_ctrl_enable_ibrs();
  549. }
  550. __current_clr_polling();
  551. }
  552. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/pgtable_64.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/pgtable_64.h
  553. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/pgtable_64.h 2017-10-27 11:14:15.000000000 +0200
  554. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/pgtable_64.h 2017-12-28 19:59:43.000000000 +0100
  555. @@ -2,6 +2,7 @@
  556. #define _ASM_X86_PGTABLE_64_H
  557.  
  558. #include <linux/const.h>
  559. +#include <linux/kaiser.h>
  560. #include <asm/pgtable_64_types.h>
  561.  
  562. #ifndef __ASSEMBLY__
  563. @@ -112,10 +113,159 @@ static inline void native_pud_clear(pud_
  564. native_set_pud(pud, native_make_pud(0));
  565. }
  566.  
  567. +#ifdef CONFIG_KAISER
  568. +/*
  569. + * All top-level KAISER page tables are order-1 pages (8k-aligned
  570. + * and 8k in size). The kernel one is at the beginning 4k and
  571. + * the user (shadow) one is in the last 4k. To switch between
  572. + * them, you just need to flip the 12th bit in their addresses.
  573. + */
  574. +#define KAISER_PGTABLE_SWITCH_BIT PAGE_SHIFT
  575. +
  576. +/*
  577. + * This generates better code than the inline assembly in
  578. + * __set_bit().
  579. + */
  580. +static inline void *ptr_set_bit(void *ptr, int bit)
  581. +{
  582. + unsigned long __ptr = (unsigned long)ptr;
  583. +
  584. + __ptr |= (1<<bit);
  585. + return (void *)__ptr;
  586. +}
  587. +static inline void *ptr_clear_bit(void *ptr, int bit)
  588. +{
  589. + unsigned long __ptr = (unsigned long)ptr;
  590. +
  591. + __ptr &= ~(1<<bit);
  592. + return (void *)__ptr;
  593. +}
  594. +
  595. +static inline pgd_t *kernel_to_shadow_pgdp(pgd_t *pgdp)
  596. +{
  597. + return ptr_set_bit(pgdp, KAISER_PGTABLE_SWITCH_BIT);
  598. +}
  599. +static inline pgd_t *shadow_to_kernel_pgdp(pgd_t *pgdp)
  600. +{
  601. + return ptr_clear_bit(pgdp, KAISER_PGTABLE_SWITCH_BIT);
  602. +}
  603. +#endif /* CONFIG_KAISER */
  604. +
  605. +/*
  606. + * Page table pages are page-aligned. The lower half of the top
  607. + * level is used for userspace and the top half for the kernel.
  608. + *
  609. + * Returns true for parts of the PGD that map userspace and
  610. + * false for the parts that map the kernel.
  611. + */
  612. +static inline bool pgdp_maps_userspace(void *__ptr)
  613. +{
  614. + unsigned long ptr = (unsigned long)__ptr;
  615. +
  616. + return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2);
  617. +}
  618. +
  619. +/*
  620. + * Does this PGD allow access from userspace?
  621. + */
  622. +static inline bool pgd_userspace_access(pgd_t pgd)
  623. +{
  624. + return pgd.pgd & _PAGE_USER;
  625. +}
  626. +
  627. +static inline void kaiser_poison_pgd(pgd_t *pgd)
  628. +{
  629. + if (pgd->pgd & _PAGE_PRESENT && __supported_pte_mask & _PAGE_NX)
  630. + pgd->pgd |= _PAGE_NX;
  631. +}
  632. +
  633. +static inline void kaiser_unpoison_pgd(pgd_t *pgd)
  634. +{
  635. + if (pgd->pgd & _PAGE_PRESENT && __supported_pte_mask & _PAGE_NX)
  636. + pgd->pgd &= ~_PAGE_NX;
  637. +}
  638. +
  639. +static inline void kaiser_poison_pgd_atomic(pgd_t *pgd)
  640. +{
  641. + BUILD_BUG_ON(_PAGE_NX == 0);
  642. + if (pgd->pgd & _PAGE_PRESENT && __supported_pte_mask & _PAGE_NX)
  643. + set_bit(_PAGE_BIT_NX, &pgd->pgd);
  644. +}
  645. +
  646. +static inline void kaiser_unpoison_pgd_atomic(pgd_t *pgd)
  647. +{
  648. + if (pgd->pgd & _PAGE_PRESENT && __supported_pte_mask & _PAGE_NX)
  649. + clear_bit(_PAGE_BIT_NX, &pgd->pgd);
  650. +}
  651. +
  652. +/*
  653. + * Take a PGD location (pgdp) and a pgd value that needs
  654. + * to be set there. Populates the shadow and returns
  655. + * the resulting PGD that must be set in the kernel copy
  656. + * of the page tables.
  657. + */
  658. +static inline pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, pgd_t pgd)
  659. +{
  660. +#ifdef CONFIG_KAISER
  661. + if (pgd_userspace_access(pgd)) {
  662. + if (pgdp_maps_userspace(pgdp)) {
  663. + VM_WARN_ON_ONCE(!is_kaiser_pgd(pgdp));
  664. + /*
  665. + * The user/shadow page tables get the full
  666. + * PGD, accessible from userspace:
  667. + */
  668. + kernel_to_shadow_pgdp(pgdp)->pgd = pgd.pgd;
  669. + /*
  670. + * For the copy of the pgd that the kernel
  671. + * uses, make it unusable to userspace. This
  672. + * ensures if we get out to userspace with the
  673. + * wrong CR3 value, userspace will crash
  674. + * instead of running.
  675. + */
  676. + if (kaiser_active())
  677. + kaiser_poison_pgd(&pgd);
  678. + }
  679. + } else if (pgd_userspace_access(*pgdp)) {
  680. + /*
  681. + * We are clearing a _PAGE_USER PGD for which we
  682. + * presumably populated the shadow. We must now
  683. + * clear the shadow PGD entry.
  684. + */
  685. + if (pgdp_maps_userspace(pgdp)) {
  686. + VM_WARN_ON_ONCE(!is_kaiser_pgd(pgdp));
  687. + kernel_to_shadow_pgdp(pgdp)->pgd = pgd.pgd;
  688. + } else {
  689. + /*
  690. + * Attempted to clear a _PAGE_USER PGD which
  691. + * is in the kernel porttion of the address
  692. + * space. PGDs are pre-populated and we
  693. + * never clear them.
  694. + */
  695. + WARN_ON_ONCE(1);
  696. + }
  697. + } else {
  698. + /*
  699. + * _PAGE_USER was not set in either the PGD being set
  700. + * or cleared. All kernel PGDs should be
  701. + * pre-populated so this should never happen after
  702. + * boot.
  703. + */
  704. + VM_WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
  705. + is_kaiser_pgd(pgdp));
  706. + }
  707. +#endif
  708. + /* return the copy of the PGD we want the kernel to use: */
  709. + return pgd;
  710. +}
  711. +
  712. static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
  713. {
  714. mm_track_pgd(pgdp);
  715. +#ifdef CONFIG_KAISER
  716. + *pgdp = kaiser_set_shadow_pgd(pgdp, pgd);
  717. +#else /* CONFIG_KAISER */
  718. *pgdp = pgd;
  719. +#endif
  720. }
  721.  
  722. static inline void native_pgd_clear(pgd_t *pgd)
  723. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/pgtable.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/pgtable.h
  724. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/pgtable.h 2017-10-27 11:14:15.000000000 +0200
  725. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/pgtable.h 2017-12-28 19:59:43.000000000 +0100
  726. @@ -660,7 +660,12 @@ static inline pud_t *pud_offset(pgd_t *p
  727.  
  728. static inline int pgd_bad(pgd_t pgd)
  729. {
  730. - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
  731. + unsigned long ignore_flags = _PAGE_USER;
  732. +
  733. + if (IS_ENABLED(CONFIG_KAISER))
  734. + ignore_flags |= _PAGE_NX;
  735. +
  736. + return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
  737. }
  738.  
  739. static inline int pgd_none(pgd_t pgd)
  740. @@ -886,6 +891,11 @@ static inline void pmdp_set_wrprotect(st
  741. static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
  742. {
  743. memcpy(dst, src, count * sizeof(pgd_t));
  744. +#ifdef CONFIG_KAISER
  745. + /* Clone the shadow pgd part as well */
  746. + memcpy(kernel_to_shadow_pgdp(dst), kernel_to_shadow_pgdp(src),
  747. + count * sizeof(pgd_t));
  748. +#endif
  749. }
  750.  
  751. #define PTE_SHIFT ilog2(PTRS_PER_PTE)
  752. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/pgtable_types.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/pgtable_types.h
  753. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/pgtable_types.h 2017-10-27 11:14:15.000000000 +0200
  754. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/pgtable_types.h 2017-12-28 19:59:43.000000000 +0100
  755. @@ -175,6 +175,17 @@
  756. #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
  757. #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
  758.  
  759. +/* The ASID is the lower 12 bits of CR3 */
  760. +#define X86_CR3_PCID_ASID_MASK (_AC((1<<12)-1, UL))
  761. +
  762. +/* Mask for all the PCID-related bits in CR3: */
  763. +#define X86_CR3_PCID_MASK (X86_CR3_PCID_NOFLUSH | X86_CR3_PCID_ASID_MASK)
  764. +
  765. +/* Make sure this is only usable in KAISER #ifdef'd code: */
  766. +#ifdef CONFIG_KAISER
  767. +#define X86_CR3_KAISER_SWITCH_BIT 11
  768. +#endif
  769. +
  770. /*
  771. * The cache modes defined here are used to translate between pure SW usage
  772. * and the HW defined cache mode bits and/or PAT entries.
  773. @@ -214,8 +225,20 @@ enum page_cache_mode {
  774. #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
  775. _PAGE_ACCESSED)
  776.  
  777. +/*
  778. + * Disable global pages for anything using the default
  779. + * __PAGE_KERNEL* macros. PGE will still be enabled
  780. + * and _PAGE_GLOBAL may still be used carefully.
  781. + */
  782. +#ifdef CONFIG_KAISER
  783. +#define __PAGE_KERNEL_GLOBAL 0
  784. +#else
  785. +#define __PAGE_KERNEL_GLOBAL _PAGE_GLOBAL
  786. +#endif
  787. +
  788. #define __PAGE_KERNEL_EXEC \
  789. - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
  790. + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | \
  791. + __PAGE_KERNEL_GLOBAL)
  792. #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
  793.  
  794. #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
  795. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/processor.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/processor.h
  796. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/processor.h 2017-10-27 11:14:15.000000000 +0200
  797. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/processor.h 2017-12-28 19:59:43.000000000 +0100
  798. @@ -29,6 +29,7 @@ struct mm_struct;
  799. #include <linux/math64.h>
  800. #include <linux/err.h>
  801. #include <linux/irqflags.h>
  802. +#include <linux/magic.h>
  803.  
  804. #include <linux/rh_kabi.h>
  805.  
  806. @@ -226,11 +227,6 @@ static inline void native_cpuid(unsigned
  807. : "memory");
  808. }
  809.  
  810. -static inline void load_cr3(pgd_t *pgdir)
  811. -{
  812. - write_cr3(__pa(pgdir));
  813. -}
  814. -
  815. #ifdef CONFIG_X86_32
  816. /* This is the TSS defined by the hardware. */
  817. struct x86_hw_tss {
  818. @@ -306,11 +302,30 @@ struct tss_struct {
  819. /*
  820. * .. and then another 0x100 bytes for the emergency kernel stack:
  821. */
  822. + RH_KABI_FILL_HOLE(unsigned long stack_canary)
  823. unsigned long stack[64];
  824.  
  825. -} ____cacheline_aligned;
  826. + /*
  827. + *
  828. + * The Intel SDM says (Volume 3, 7.2.1):
  829. + *
  830. + * Avoid placing a page boundary in the part of the TSS that the
  831. + * processor reads during a task switch (the first 104 bytes). The
  832. + * processor may not correctly perform address translations if a
  833. + * boundary occurs in this area. During a task switch, the processor
  834. + * reads and writes into the first 104 bytes of each TSS (using
  835. + * contiguous physical addresses beginning with the physical address
  836. + * of the first byte of the TSS). So, after TSS access begins, if
  837. + * part of the 104 bytes is not physically contiguous, the processor
  838. + * will access incorrect information without generating a page-fault
  839. + * exception.
  840. + *
  841. + * There are also a lot of errata involving the TSS spanning a page
  842. + * boundary. Assert that we're not doing that.
  843. + */
  844. +} __attribute__((__aligned__(PAGE_SIZE)));
  845.  
  846. -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
  847. +DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct tss_struct, init_tss);
  848.  
  849. /*
  850. * Save the original ist values for checking stack pointers during debugging
  851. @@ -615,8 +630,13 @@ static inline void set_in_cr4(unsigned l
  852. unsigned long cr4;
  853.  
  854. mmu_cr4_features |= mask;
  855. - if (trampoline_cr4_features)
  856. - *trampoline_cr4_features = mmu_cr4_features;
  857. + if (trampoline_cr4_features) {
  858. + /*
  859. + * Mask off features that don't work outside long mode (just
  860. + * PCIDE for now).
  861. + */
  862. + *trampoline_cr4_features = mmu_cr4_features & ~X86_CR4_PCIDE;
  863. + }
  864. cr4 = read_cr4();
  865. cr4 |= mask;
  866. write_cr4(cr4);
  867. @@ -937,7 +957,8 @@ extern unsigned long thread_saved_pc(str
  868. }
  869.  
  870. #define INIT_TSS { \
  871. - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
  872. + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack), \
  873. + .stack_canary = STACK_END_MAGIC, \
  874. }
  875.  
  876. /*
  877. Только в linux-3.10.0-693.11.6.el7/arch/x86/include/asm: spec_ctrl.h
  878. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/stacktrace.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/stacktrace.h
  879. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/stacktrace.h 2017-10-27 11:14:15.000000000 +0200
  880. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/stacktrace.h 2017-12-28 19:59:43.000000000 +0100
  881. @@ -9,8 +9,6 @@
  882. #include <linux/uaccess.h>
  883. #include <linux/ptrace.h>
  884.  
  885. -extern int kstack_depth_to_print;
  886. -
  887. struct thread_info;
  888. struct stacktrace_ops;
  889.  
  890. @@ -85,10 +83,6 @@ extern void
  891. show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
  892. unsigned long *stack, unsigned long bp, char *log_lvl);
  893.  
  894. -extern void
  895. -show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
  896. - unsigned long *sp, unsigned long bp, char *log_lvl);
  897. -
  898. extern unsigned int code_bytes;
  899.  
  900. /* The form of the top of the frame on the stack */
  901. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/thread_info.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/thread_info.h
  902. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/thread_info.h 2017-10-27 11:14:15.000000000 +0200
  903. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/thread_info.h 2017-12-28 19:59:43.000000000 +0100
  904. @@ -204,6 +204,8 @@ static inline struct thread_info *curren
  905. #ifndef __ASSEMBLY__
  906. DECLARE_PER_CPU(unsigned long, kernel_stack);
  907. DECLARE_PER_CPU(unsigned long, __kernel_stack_70__);
  908. +DECLARE_PER_CPU_USER_MAPPED(unsigned int, kaiser_enabled_pcp);
  909. +DECLARE_PER_CPU_USER_MAPPED(unsigned int, spec_ctrl_pcp);
  910.  
  911. static inline struct thread_info *current_thread_info(void)
  912. {
  913. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/asm/tlbflush.h linux-3.10.0-693.11.6.el7/arch/x86/include/asm/tlbflush.h
  914. --- linux-3.10.0-693.11.1.el7/arch/x86/include/asm/tlbflush.h 2017-10-27 11:14:15.000000000 +0200
  915. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/asm/tlbflush.h 2017-12-28 19:59:43.000000000 +0100
  916. @@ -15,9 +15,104 @@
  917. #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
  918. #endif
  919.  
  920. +static inline void __invpcid(unsigned long pcid, unsigned long addr,
  921. + unsigned long type)
  922. +{
  923. + struct { u64 d[2]; } desc = { { pcid, addr } };
  924. +
  925. + /*
  926. + * The memory clobber is because the whole point is to invalidate
  927. + * stale TLB entries and, especially if we're flushing global
  928. + * mappings, we don't want the compiler to reorder any subsequent
  929. + * memory accesses before the TLB flush.
  930. + *
  931. + * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
  932. + * invpcid (%rcx), %rax in long mode.
  933. + */
  934. + asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
  935. + : : "m" (desc), "a" (type), "c" (&desc) : "memory");
  936. +}
  937. +
  938. +#define INVPCID_TYPE_INDIV_ADDR 0
  939. +#define INVPCID_TYPE_SINGLE_CTXT 1
  940. +#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
  941. +#define INVPCID_TYPE_ALL_NON_GLOBAL 3
  942. +
  943. +/* Flush all mappings for a given pcid and addr, not including globals. */
  944. +static inline void invpcid_flush_one(unsigned long pcid,
  945. + unsigned long addr)
  946. +{
  947. + __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
  948. +}
  949. +
  950. +/* Flush all mappings for a given PCID, not including globals. */
  951. +static inline void invpcid_flush_single_context(unsigned long pcid)
  952. +{
  953. + __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
  954. +}
  955. +
  956. +/* Flush all mappings, including globals, for all PCIDs. */
  957. +static inline void invpcid_flush_all(void)
  958. +{
  959. + __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
  960. +}
  961. +
  962. +/* Flush all mappings for all PCIDs except globals. */
  963. +static inline void invpcid_flush_all_nonglobals(void)
  964. +{
  965. + __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
  966. +}
  967. +
  968. +#ifdef CONFIG_KAISER
  969. +static __always_inline void __load_cr3(unsigned long cr3)
  970. +{
  971. + if (static_cpu_has(X86_FEATURE_PCID) && kaiser_active()) {
  972. + unsigned long shadow_cr3;
  973. + VM_WARN_ON(cr3 & KAISER_SHADOW_PCID_ASID);
  974. + VM_WARN_ON(cr3 & (1<<KAISER_PGTABLE_SWITCH_BIT));
  975. + VM_WARN_ON(cr3 & X86_CR3_PCID_NOFLUSH);
  976. +
  977. + if (this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) {
  978. + invpcid_flush_single_context(KAISER_SHADOW_PCID_ASID);
  979. + write_cr3(cr3);
  980. + return;
  981. + }
  982. +
  983. + shadow_cr3 = cr3 | (1<<KAISER_PGTABLE_SWITCH_BIT) |
  984. + KAISER_SHADOW_PCID_ASID;
  985. + asm volatile("\tjmp 1f\n\t"
  986. + "2:\n\t"
  987. + ".section .entry.text, \"ax\"\n\t"
  988. + "1:\n\t"
  989. + "pushf\n\t"
  990. + "cli\n\t"
  991. + "movq %0, %%cr3\n\t"
  992. + "movq %1, %%cr3\n\t"
  993. + "popf\n\t"
  994. + "jmp 2b\n\t"
  995. + ".previous" : :
  996. + "r" (shadow_cr3), "r" (cr3) :
  997. + "memory");
  998. + } else
  999. + write_cr3(cr3);
  1000. +}
  1001. +#else /* CONFIG_KAISER */
  1002. +static __always_inline void __load_cr3(unsigned long cr3)
  1003. +{
  1004. + write_cr3(cr3);
  1005. +}
  1006. +#endif /* CONFIG_KAISER */
  1007. +
  1008. static inline void __native_flush_tlb(void)
  1009. {
  1010. - native_write_cr3(native_read_cr3());
  1011. + if (!static_cpu_has(X86_FEATURE_INVPCID)) {
  1012. + __load_cr3(native_read_cr3());
  1013. + return;
  1014. + }
  1015. + /*
  1016. + * Note, this works with CR4.PCIDE=0 or 1.
  1017. + */
  1018. + invpcid_flush_all_nonglobals();
  1019. }
  1020.  
  1021. static inline void __native_flush_tlb_global_irq_disabled(void)
  1022. @@ -25,9 +120,24 @@ static inline void __native_flush_tlb_gl
  1023. unsigned long cr4;
  1024.  
  1025. cr4 = native_read_cr4();
  1026. - /* clear PGE */
  1027. - native_write_cr4(cr4 & ~X86_CR4_PGE);
  1028. - /* write old PGE again and flush TLBs */
  1029. + /*
  1030. + * This function is only called on systems that support X86_CR4_PGE
  1031. + * and where we expect X86_CR4_PGE to be set. Warn if we are called
  1032. + * without PGE set.
  1033. + */
  1034. + WARN_ON_ONCE(!(cr4 & X86_CR4_PGE));
  1035. +
  1036. + /*
  1037. + * Architecturally, any _change_ to X86_CR4_PGE will fully flush
  1038. + * all entries. Make sure that we _change_ the bit, regardless of
  1039. + * whether we had X86_CR4_PGE set in the first place.
  1040. + *
  1041. + * Note that just toggling PGE *also* flushes all entries from all
  1042. + * PCIDs, regardless of the state of X86_CR4_PCIDE.
  1043. + */
  1044. + native_write_cr4(cr4 ^ X86_CR4_PGE);
  1045. +
  1046. + /* Put original CR4 value back: */
  1047. native_write_cr4(cr4);
  1048. }
  1049.  
  1050. @@ -35,6 +145,17 @@ static inline void __native_flush_tlb_gl
  1051. {
  1052. unsigned long flags;
  1053.  
  1054. + if (static_cpu_has(X86_FEATURE_INVPCID)) {
  1055. + /*
  1056. + * Using INVPCID is considerably faster than a pair of writes
  1057. + * to CR4 sandwiched inside an IRQ flag save/restore.
  1058. + *
  1059. + * Note, this works with CR4.PCIDE=0 or 1.
  1060. + */
  1061. + invpcid_flush_all();
  1062. + return;
  1063. + }
  1064. +
  1065. /*
  1066. * Read-modify-write to CR4 - protect it from preemption and
  1067. * from interrupts. (Use the raw variant because this code can
  1068. @@ -49,7 +170,56 @@ static inline void __native_flush_tlb_gl
  1069.  
  1070. static inline void __native_flush_tlb_single(unsigned long addr)
  1071. {
  1072. - asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
  1073. +#ifdef CONFIG_KAISER
  1074. + unsigned long cr3, shadow_cr3;
  1075. +
  1076. + /* Flush the address out of both PCIDs. */
  1077. + /*
  1078. + * An optimization here might be to determine addresses
  1079. + * that are only kernel-mapped and only flush the kernel
  1080. + * ASID. But, userspace flushes are probably much more
  1081. + * important performance-wise.
  1082. + *
  1083. + * Make sure to do only a single invpcid when KAISER is
  1084. + * disabled and we have only a single ASID.
  1085. + */
  1086. + if (static_cpu_has(X86_FEATURE_PCID) && kaiser_active()) {
  1087. + /*
  1088. + * Some platforms #GP if we call invpcid(type=1/2) before
  1089. + * CR4.PCIDE=1. Just call invpcid in the case we are called
  1090. + * early.
  1091. + */
  1092. + if (this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) {
  1093. + invpcid_flush_one(KAISER_SHADOW_PCID_ASID, addr);
  1094. + invpcid_flush_one(0, addr);
  1095. + return;
  1096. + }
  1097. +
  1098. + cr3 = native_read_cr3();
  1099. + VM_WARN_ON(cr3 & KAISER_SHADOW_PCID_ASID);
  1100. + VM_WARN_ON(cr3 & (1<<KAISER_PGTABLE_SWITCH_BIT));
  1101. + VM_WARN_ON(cr3 & X86_CR3_PCID_NOFLUSH);
  1102. + cr3 |= X86_CR3_PCID_NOFLUSH;
  1103. + shadow_cr3 = cr3 | (1<<KAISER_PGTABLE_SWITCH_BIT) |
  1104. + KAISER_SHADOW_PCID_ASID;
  1105. + asm volatile("\tjmp 1f\n\t"
  1106. + "2:\n\t"
  1107. + ".section .entry.text, \"ax\"\n\t"
  1108. + "1:\n\t"
  1109. + "pushf\n\t"
  1110. + "cli\n\t"
  1111. + "movq %0, %%cr3\n\t"
  1112. + "invlpg (%2)\n\t"
  1113. + "movq %1, %%cr3\n\t"
  1114. + "popf\n\t"
  1115. + "invlpg (%2)\n\t"
  1116. + "jmp 2b\n\t"
  1117. + ".previous" : :
  1118. + "r" (shadow_cr3), "r" (cr3), "r" (addr) :
  1119. + "memory");
  1120. + } else
  1121. +#endif
  1122. + asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
  1123. }
  1124.  
  1125. static inline void __flush_tlb_all(void)
  1126. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/include/uapi/asm/processor-flags.h linux-3.10.0-693.11.6.el7/arch/x86/include/uapi/asm/processor-flags.h
  1127. --- linux-3.10.0-693.11.1.el7/arch/x86/include/uapi/asm/processor-flags.h 2017-10-27 11:14:15.000000000 +0200
  1128. +++ linux-3.10.0-693.11.6.el7/arch/x86/include/uapi/asm/processor-flags.h 2017-12-28 19:59:43.000000000 +0100
  1129. @@ -79,7 +79,8 @@
  1130. #define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT)
  1131. #define X86_CR3_PCD_BIT 4 /* Page Cache Disable */
  1132. #define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT)
  1133. -#define X86_CR3_PCID_MASK _AC(0x00000fff,UL) /* PCID Mask */
  1134. +#define X86_CR3_PCID_NOFLUSH_BIT 63 /* Preserve old PCID */
  1135. +#define X86_CR3_PCID_NOFLUSH _BITULL(X86_CR3_PCID_NOFLUSH_BIT)
  1136.  
  1137. /*
  1138. * Intel CPU features in CR4
  1139. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/Kconfig linux-3.10.0-693.11.6.el7/arch/x86/Kconfig
  1140. --- linux-3.10.0-693.11.1.el7/arch/x86/Kconfig 2017-10-27 11:14:15.000000000 +0200
  1141. +++ linux-3.10.0-693.11.6.el7/arch/x86/Kconfig 2017-12-28 19:59:43.000000000 +0100
  1142. @@ -2001,6 +2001,14 @@ config COMPAT_VDSO
  1143. depends on X86_32 || IA32_EMULATION
  1144. ---help---
  1145. Map the 32-bit VDSO to the predictable old-style address too.
  1146. + # The VSYSCALL page comes from the kernel page tables
  1147. + # and is not available when KAISER is enabled.
  1148. + depends on ! KAISER
  1149. +
  1150. + When KAISER is enabled, the vsyscall area will become
  1151. + unreadable. This emulation option still works, but KAISER
  1152. + will make it harder to do things like trace code using the
  1153. + emulation.
  1154.  
  1155. Say N here if you are running a sufficiently recent glibc
  1156. version (2.3.3 or later), to remove the high-mapped
  1157. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/asm-offsets_64.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/asm-offsets_64.c
  1158. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/asm-offsets_64.c 2017-10-27 11:14:15.000000000 +0200
  1159. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/asm-offsets_64.c 2017-12-28 19:59:43.000000000 +0100
  1160. @@ -78,6 +78,9 @@ int main(void)
  1161. #undef ENTRY
  1162.  
  1163. OFFSET(TSS_ist, tss_struct, x86_tss.ist);
  1164. + OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
  1165. + OFFSET(TSS_stack, tss_struct, stack);
  1166. + DEFINE(TSS_stack_size, sizeof(init_tss.stack));
  1167. BLANK();
  1168.  
  1169. DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1);
  1170. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/cpu/amd.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/cpu/amd.c
  1171. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/cpu/amd.c 2017-10-27 11:14:15.000000000 +0200
  1172. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/cpu/amd.c 2017-12-28 19:59:43.000000000 +0100
  1173. @@ -704,8 +704,17 @@ static void init_amd(struct cpuinfo_x86
  1174. set_cpu_cap(c, X86_FEATURE_K8);
  1175.  
  1176. if (cpu_has_xmm2) {
  1177. - /* MFENCE stops RDTSC speculation */
  1178. - set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
  1179. + /*
  1180. + * Use LFENCE for execution serialization. On some families
  1181. + * LFENCE is already serialized and the MSR is not available,
  1182. + * but msr_set_bit() uses rdmsrl_safe() and wrmsrl_safe().
  1183. + */
  1184. + if (c->x86 > 0xf)
  1185. + msr_set_bit(MSR_F10H_DECFG,
  1186. + MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
  1187. +
  1188. + /* LFENCE with MSR_F10H_DECFG[1]=1 stops RDTSC speculation */
  1189. + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
  1190. }
  1191.  
  1192. #ifdef CONFIG_X86_64
  1193. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/cpu/common.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/cpu/common.c
  1194. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/cpu/common.c 2017-10-27 11:14:15.000000000 +0200
  1195. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/cpu/common.c 2017-12-28 19:59:43.000000000 +0100
  1196. @@ -42,6 +42,7 @@
  1197. #include <asm/pat.h>
  1198. #include <asm/microcode.h>
  1199. #include <asm/microcode_intel.h>
  1200. +#include <asm/kaiser.h>
  1201.  
  1202. #ifdef CONFIG_X86_LOCAL_APIC
  1203. #include <asm/uv/uv.h>
  1204. @@ -93,7 +94,7 @@ static const struct cpu_dev default_cpu
  1205.  
  1206. static const struct cpu_dev *this_cpu = &default_cpu;
  1207.  
  1208. -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
  1209. +DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct gdt_page, gdt_page) = { .gdt = {
  1210. #ifdef CONFIG_X86_64
  1211. /*
  1212. * We need valid kernel segments for data and code in long mode too
  1213. @@ -202,6 +203,40 @@ static int __init x86_mpx_setup(char *s)
  1214. }
  1215. __setup("nompx", x86_mpx_setup);
  1216.  
  1217. +#ifdef CONFIG_X86_64
  1218. +static int __init x86_nopcid_setup(char *s)
  1219. +{
  1220. + /* nopcid doesn't accept parameters */
  1221. + if (s)
  1222. + return -EINVAL;
  1223. +
  1224. + /* do not emit a message if the feature is not present */
  1225. + if (!boot_cpu_has(X86_FEATURE_PCID))
  1226. + return 0;
  1227. +
  1228. + setup_clear_cpu_cap(X86_FEATURE_PCID);
  1229. + pr_info("nopcid: PCID feature disabled\n");
  1230. + return 0;
  1231. +}
  1232. +early_param("nopcid", x86_nopcid_setup);
  1233. +
  1234. +static int __init x86_noinvpcid_setup(char *s)
  1235. +{
  1236. + /* noinvpcid doesn't accept parameters */
  1237. + if (s)
  1238. + return -EINVAL;
  1239. +
  1240. + /* do not emit a message if the feature is not present */
  1241. + if (!boot_cpu_has(X86_FEATURE_INVPCID))
  1242. + return 0;
  1243. +
  1244. + setup_clear_cpu_cap(X86_FEATURE_INVPCID);
  1245. + pr_info("noinvpcid: INVPCID feature disabled\n");
  1246. + return 0;
  1247. +}
  1248. +early_param("noinvpcid", x86_noinvpcid_setup);
  1249. +#endif
  1250. +
  1251. #ifdef CONFIG_X86_32
  1252. static int cachesize_override = -1;
  1253. static int disable_x86_serial_nr = 1;
  1254. @@ -1002,6 +1037,8 @@ static void identify_cpu(struct cpuinfo_
  1255. setup_smep(c);
  1256. setup_smap(c);
  1257.  
  1258. + spec_ctrl_init(c);
  1259. +
  1260. /*
  1261. * The vendor-specific functions might have changed features.
  1262. * Now we do "generic changes."
  1263. @@ -1227,6 +1264,10 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) =
  1264. init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
  1265.  
  1266. DEFINE_PER_CPU(unsigned int, irq_count) = -1;
  1267. +DEFINE_PER_CPU_USER_MAPPED(unsigned int, kaiser_enabled_pcp) ____cacheline_aligned;
  1268. +DEFINE_PER_CPU_USER_MAPPED(unsigned int, spec_ctrl_pcp);
  1269. +EXPORT_PER_CPU_SYMBOL_GPL(spec_ctrl_pcp);
  1270. +DEFINE_PER_CPU_USER_MAPPED(unsigned long, kaiser_scratch);
  1271.  
  1272. DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
  1273.  
  1274. @@ -1241,7 +1282,7 @@ static const unsigned int exception_stac
  1275. [DEBUG_STACK - 1] = DEBUG_STKSZ
  1276. };
  1277.  
  1278. -static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
  1279. +DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(char, exception_stacks
  1280. [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
  1281.  
  1282. /* May not be marked __init: used by software suspend */
  1283. @@ -1443,7 +1484,9 @@ void cpu_init(void)
  1284. BUG_ON(me->mm);
  1285. enter_lazy_tlb(&init_mm, me);
  1286.  
  1287. - load_sp0(t, &current->thread);
  1288. + __this_cpu_write(init_tss.x86_tss.sp0,
  1289. + (unsigned long) t + offsetofend(struct tss_struct,
  1290. + stack));
  1291. set_tss_desc(cpu, t);
  1292. load_TR_desc();
  1293. load_LDT(&init_mm.context);
  1294. @@ -1455,6 +1498,8 @@ void cpu_init(void)
  1295.  
  1296. if (is_uv_system())
  1297. uv_cpu_init();
  1298. +
  1299. + WARN_ON((unsigned long) &t->x86_tss & ~PAGE_MASK);
  1300. }
  1301.  
  1302. #else
  1303. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/cpu/microcode/core.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/cpu/microcode/core.c
  1304. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/cpu/microcode/core.c 2017-10-27 11:14:15.000000000 +0200
  1305. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/cpu/microcode/core.c 2017-12-28 19:59:43.000000000 +0100
  1306. @@ -39,6 +39,7 @@
  1307. #include <asm/microcode.h>
  1308. #include <asm/processor.h>
  1309. #include <asm/cmdline.h>
  1310. +#include <asm/spec_ctrl.h>
  1311.  
  1312. #define MICROCODE_VERSION "2.01"
  1313.  
  1314. @@ -318,8 +319,10 @@ static ssize_t microcode_write(struct fi
  1315. if (do_microcode_update(buf, len) == 0)
  1316. ret = (ssize_t)len;
  1317.  
  1318. - if (ret > 0)
  1319. + if (ret > 0) {
  1320. perf_check_microcode();
  1321. + spec_ctrl_rescan_cpuid();
  1322. + }
  1323.  
  1324. mutex_unlock(&microcode_mutex);
  1325. put_online_cpus();
  1326. @@ -410,8 +413,10 @@ static ssize_t reload_store(struct devic
  1327. if (!ret)
  1328. ret = tmp_ret;
  1329. }
  1330. - if (!ret)
  1331. + if (!ret) {
  1332. perf_check_microcode();
  1333. + spec_ctrl_rescan_cpuid();
  1334. + }
  1335. mutex_unlock(&microcode_mutex);
  1336. put_online_cpus();
  1337.  
  1338. @@ -645,8 +650,10 @@ int __init microcode_init(void)
  1339. mutex_lock(&microcode_mutex);
  1340.  
  1341. error = subsys_interface_register(&mc_cpu_interface);
  1342. - if (!error)
  1343. + if (!error) {
  1344. perf_check_microcode();
  1345. + spec_ctrl_rescan_cpuid();
  1346. + }
  1347. mutex_unlock(&microcode_mutex);
  1348. put_online_cpus();
  1349.  
  1350. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/cpu/scattered.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/cpu/scattered.c
  1351. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/cpu/scattered.c 2017-10-27 11:14:15.000000000 +0200
  1352. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/cpu/scattered.c 2017-12-28 19:59:43.000000000 +0100
  1353. @@ -22,6 +22,7 @@ static const struct cpuid_bit cpuid_bits
  1354. { X86_FEATURE_INTEL_PT, CPUID_EBX,25, 0x00000007, 0 },
  1355. { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
  1356. { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
  1357. + { X86_FEATURE_SPEC_CTRL, CPUID_EDX, 26, 0x00000007, 0 },
  1358. { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
  1359. { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
  1360. { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
  1361. @@ -31,6 +32,7 @@ static const struct cpuid_bit cpuid_bits
  1362. { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
  1363. { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX,11, 0x80000007, 0 },
  1364. { X86_FEATURE_AVIC, CPUID_EDX,13, 0x8000000a, 0 },
  1365. + { X86_FEATURE_IBPB_SUPPORT, CPUID_EBX,12, 0x80000008, 0 },
  1366. { 0, 0, 0, 0 }
  1367. };
  1368.  
  1369. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/dumpstack_32.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/dumpstack_32.c
  1370. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/dumpstack_32.c 2017-10-27 11:14:15.000000000 +0200
  1371. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/dumpstack_32.c 2017-12-28 19:59:43.000000000 +0100
  1372. @@ -54,34 +54,6 @@ void dump_trace(struct task_struct *task
  1373. }
  1374. EXPORT_SYMBOL(dump_trace);
  1375.  
  1376. -void
  1377. -show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
  1378. - unsigned long *sp, unsigned long bp, char *log_lvl)
  1379. -{
  1380. - unsigned long *stack;
  1381. - int i;
  1382. -
  1383. - if (sp == NULL) {
  1384. - if (task)
  1385. - sp = (unsigned long *)task->thread.sp;
  1386. - else
  1387. - sp = (unsigned long *)&sp;
  1388. - }
  1389. -
  1390. - stack = sp;
  1391. - for (i = 0; i < kstack_depth_to_print; i++) {
  1392. - if (kstack_end(stack))
  1393. - break;
  1394. - if (i && ((i % STACKSLOTS_PER_LINE) == 0))
  1395. - pr_cont("\n");
  1396. - pr_cont(" %08lx", *stack++);
  1397. - touch_nmi_watchdog();
  1398. - }
  1399. - pr_cont("\n");
  1400. - show_trace_log_lvl(task, regs, sp, bp, log_lvl);
  1401. -}
  1402. -
  1403. -
  1404. void show_regs(struct pt_regs *regs)
  1405. {
  1406. int i;
  1407. @@ -99,8 +71,7 @@ void show_regs(struct pt_regs *regs)
  1408. unsigned char c;
  1409. u8 *ip;
  1410.  
  1411. - pr_emerg("Stack:\n");
  1412. - show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
  1413. + show_trace_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
  1414.  
  1415. pr_emerg("Code:");
  1416.  
  1417. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/dumpstack_64.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/dumpstack_64.c
  1418. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/dumpstack_64.c 2017-10-27 11:14:15.000000000 +0200
  1419. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/dumpstack_64.c 2017-12-28 19:59:43.000000000 +0100
  1420. @@ -195,55 +195,6 @@ void dump_trace(struct task_struct *task
  1421. }
  1422. EXPORT_SYMBOL(dump_trace);
  1423.  
  1424. -void
  1425. -show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
  1426. - unsigned long *sp, unsigned long bp, char *log_lvl)
  1427. -{
  1428. - unsigned long *irq_stack_end;
  1429. - unsigned long *irq_stack;
  1430. - unsigned long *stack;
  1431. - int cpu;
  1432. - int i;
  1433. -
  1434. - preempt_disable();
  1435. - cpu = smp_processor_id();
  1436. -
  1437. - irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
  1438. - irq_stack = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
  1439. -
  1440. - /*
  1441. - * Debugging aid: "show_stack(NULL, NULL);" prints the
  1442. - * back trace for this cpu:
  1443. - */
  1444. - if (sp == NULL) {
  1445. - if (task)
  1446. - sp = (unsigned long *)task->thread.sp;
  1447. - else
  1448. - sp = (unsigned long *)&sp;
  1449. - }
  1450. -
  1451. - stack = sp;
  1452. - for (i = 0; i < kstack_depth_to_print; i++) {
  1453. - if (stack >= irq_stack && stack <= irq_stack_end) {
  1454. - if (stack == irq_stack_end) {
  1455. - stack = (unsigned long *) (irq_stack_end[-1]);
  1456. - pr_cont(" <EOI> ");
  1457. - }
  1458. - } else {
  1459. - if (((long) stack & (THREAD_SIZE-1)) == 0)
  1460. - break;
  1461. - }
  1462. - if (i && ((i % STACKSLOTS_PER_LINE) == 0))
  1463. - pr_cont("\n");
  1464. - pr_cont(" %016lx", *stack++);
  1465. - touch_nmi_watchdog();
  1466. - }
  1467. - preempt_enable();
  1468. -
  1469. - pr_cont("\n");
  1470. - show_trace_log_lvl(task, regs, sp, bp, log_lvl);
  1471. -}
  1472. -
  1473. void show_regs(struct pt_regs *regs)
  1474. {
  1475. int i;
  1476. @@ -263,8 +214,7 @@ void show_regs(struct pt_regs *regs)
  1477. unsigned char c;
  1478. u8 *ip;
  1479.  
  1480. - printk(KERN_DEFAULT "Stack:\n");
  1481. - show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
  1482. + show_trace_log_lvl(NULL, regs, (unsigned long *)sp,
  1483. 0, KERN_DEFAULT);
  1484.  
  1485. printk(KERN_DEFAULT "Code: ");
  1486. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/dumpstack.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/dumpstack.c
  1487. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/dumpstack.c 2017-10-27 11:14:15.000000000 +0200
  1488. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/dumpstack.c 2017-12-28 19:59:43.000000000 +0100
  1489. @@ -22,7 +22,6 @@
  1490. int panic_on_unrecovered_nmi;
  1491. int panic_on_io_nmi;
  1492. unsigned int code_bytes = 64;
  1493. -int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
  1494. static int die_counter;
  1495.  
  1496. void printk_address(unsigned long address, int reliable)
  1497. @@ -188,7 +187,7 @@ void show_stack(struct task_struct *task
  1498. bp = stack_frame(current, NULL);
  1499. }
  1500.  
  1501. - show_stack_log_lvl(task, NULL, sp, bp, "");
  1502. + show_trace_log_lvl(task, NULL, sp, bp, "");
  1503. }
  1504.  
  1505. static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
  1506. @@ -304,22 +303,6 @@ void die(const char *str, struct pt_regs
  1507. oops_end(flags, regs, sig);
  1508. }
  1509.  
  1510. -static int __init kstack_setup(char *s)
  1511. -{
  1512. - ssize_t ret;
  1513. - unsigned long val;
  1514. -
  1515. - if (!s)
  1516. - return -EINVAL;
  1517. -
  1518. - ret = kstrtoul(s, 0, &val);
  1519. - if (ret)
  1520. - return ret;
  1521. - kstack_depth_to_print = val;
  1522. - return 0;
  1523. -}
  1524. -early_param("kstack", kstack_setup);
  1525. -
  1526. static int __init code_bytes_setup(char *s)
  1527. {
  1528. ssize_t ret;
  1529. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/entry_64.S linux-3.10.0-693.11.6.el7/arch/x86/kernel/entry_64.S
  1530. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/entry_64.S 2017-10-27 11:14:15.000000000 +0200
  1531. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/entry_64.S 2017-12-28 19:59:43.000000000 +0100
  1532. @@ -57,7 +57,9 @@
  1533. #include <asm/asm.h>
  1534. #include <asm/context_tracking.h>
  1535. #include <asm/smap.h>
  1536. +#include <asm/spec_ctrl.h>
  1537. #include <linux/err.h>
  1538. +#include "kaiser.h"
  1539.  
  1540. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  1541. #include <linux/elf-em.h>
  1542. @@ -262,6 +264,34 @@ ENDPROC(native_usergs_sysret64)
  1543. testl $3, CS-RBP(%rsi)
  1544. je 1f
  1545. SWAPGS
  1546. + SWITCH_TO_KERNEL_CR3
  1547. + ENABLE_IBRS_CLOBBER /* no indirect jump allowed before IBRS */
  1548. + movq PER_CPU_VAR(kernel_stack), %rsp
  1549. + STUFF_RSB /* no ret allowed before stuffing the RSB */
  1550. + movq %rsi, %rsp
  1551. +1:
  1552. + /* Check to see if we're on the trampoline stack. */
  1553. + movq PER_CPU_VAR(init_tss + TSS_sp0), %rcx
  1554. + cmpq %rcx, %rsp
  1555. + ja 1f
  1556. + leaq -TSS_stack_size(%rcx), %rax
  1557. + cmpq %rsp, %rax
  1558. + ja 1f
  1559. + /*
  1560. + * We're on the trampoline stack. Copy the trampoline stack's
  1561. + * contents to the kernel task stack and switch to it.
  1562. + */
  1563. + pushq %rdi
  1564. + subq %rsp, %rcx
  1565. + movq PER_CPU_VAR(kernel_stack), %rdi
  1566. + addq $KERNEL_STACK_OFFSET, %rdi
  1567. + subq %rcx, %rdi
  1568. + movq %rdi, %rax
  1569. + movq %rsp, %rsi
  1570. + rep movsb
  1571. + movq %rax, %rsp
  1572. + popq %rdi
  1573. + movq %rsp, %rsi /* we earlier saved %rsp in %rsi, update it to new one */
  1574. /*
  1575. * irq_count is used to check if a CPU is already on an interrupt stack
  1576. * or not. While this is essentially redundant with preempt_count it is
  1577. @@ -325,7 +355,11 @@ ENTRY(save_paranoid)
  1578. js 1f /* negative -> in kernel */
  1579. SWAPGS
  1580. xorl %ebx,%ebx
  1581. -1: ret
  1582. +1:
  1583. + SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
  1584. + ENABLE_IBRS_SAVE_AND_CLOBBER save_reg=%r13d /* no indirect jump allowed before IBRS */
  1585. + STUFF_RSB /* no ret allowed before stuffing the RSB */
  1586. + ret
  1587. CFI_ENDPROC
  1588. END(save_paranoid)
  1589. .popsection
  1590. @@ -391,7 +425,7 @@ END(ret_from_fork)
  1591. * r8 arg4
  1592. * r9 arg5
  1593. * r11 eflags for syscall/sysret, temporary for C
  1594. - * r12-r15,rbp,rbx saved by C code, not touched.
  1595. + * r12-r15,rbp,rbx saved by C code, not used.
  1596. *
  1597. * Interrupts are off on entry.
  1598. * Only called from user space.
  1599. @@ -418,18 +452,23 @@ ENTRY(system_call)
  1600. */
  1601. GLOBAL(system_call_after_swapgs)
  1602.  
  1603. + SWITCH_TO_KERNEL_CR3
  1604. movq %rsp,PER_CPU_VAR(old_rsp)
  1605. movq PER_CPU_VAR(kernel_stack),%rsp
  1606. + ENABLE_IBRS /* no indirect jump allowed before IBRS */
  1607. + STUFF_RSB /* no ret allowed before stuffing the RSB */
  1608. /*
  1609. * No need to follow this irqs off/on section - it's straight
  1610. * and short:
  1611. */
  1612. ENABLE_INTERRUPTS(CLBR_NONE)
  1613. SAVE_ARGS 8,0
  1614. - movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  1615. - movq %rcx,RIP-ARGOFFSET(%rsp)
  1616. - CFI_REL_OFFSET rip,RIP-ARGOFFSET
  1617. - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
  1618. + SAVE_REST
  1619. + CLEAR_EXTRA_REGS
  1620. + movq %rax,ORIG_RAX(%rsp)
  1621. + movq %rcx,RIP(%rsp)
  1622. + CFI_REL_OFFSET rip,RIP
  1623. + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP)
  1624. jnz tracesys
  1625. system_call_fastpath:
  1626. #if __SYSCALL_MASK == ~0
  1627. @@ -441,7 +480,7 @@ system_call_fastpath:
  1628. ja badsys
  1629. movq %r10,%rcx
  1630. call *sys_call_table(,%rax,8) # XXX: rip relative
  1631. - movq %rax,RAX-ARGOFFSET(%rsp)
  1632. + movq %rax,RAX(%rsp)
  1633. /*
  1634. * Syscall return path ending with SYSRET (fast path)
  1635. * Has incomplete stack frame and undefined top of stack.
  1636. @@ -453,7 +492,7 @@ sysret_check:
  1637. LOCKDEP_SYS_EXIT
  1638. DISABLE_INTERRUPTS(CLBR_NONE)
  1639. TRACE_IRQS_OFF
  1640. - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
  1641. + movl TI_flags+THREAD_INFO(%rsp,RIP),%edx
  1642. andl %edi,%edx
  1643. jnz sysret_careful
  1644. CFI_REMEMBER_STATE
  1645. @@ -461,11 +500,14 @@ sysret_check:
  1646. * sysretq will re-enable interrupts:
  1647. */
  1648. TRACE_IRQS_ON
  1649. - movq RIP-ARGOFFSET(%rsp),%rcx
  1650. + DISABLE_IBRS_CLOBBER
  1651. + movq RIP(%rsp),%rcx
  1652. CFI_REGISTER rip,rcx
  1653. + RESTORE_REST
  1654. RESTORE_ARGS 1,-ARG_SKIP,0
  1655. /*CFI_REGISTER rflags,r11*/
  1656. movq PER_CPU_VAR(old_rsp), %rsp
  1657. + SWITCH_TO_USER_CR3
  1658. USERGS_SYSRET64
  1659.  
  1660. CFI_RESTORE_STATE
  1661. @@ -494,11 +536,11 @@ sysret_signal:
  1662. * These all wind up with the iret return path anyway,
  1663. * so just join that path right now.
  1664. */
  1665. - FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
  1666. + FIXUP_TOP_OF_STACK %r11
  1667. jmp int_check_syscall_exit_work
  1668.  
  1669. badsys:
  1670. - movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  1671. + movq $-ENOSYS,RAX(%rsp)
  1672. jmp ret_from_sys_call
  1673.  
  1674. #ifdef CONFIG_AUDITSYSCALL
  1675. @@ -514,7 +556,7 @@ auditsys:
  1676. movq %rdi,%rsi /* 2nd arg: 1st syscall arg */
  1677. movq %rax,%rdi /* 1st arg: syscall number */
  1678. call __audit_syscall_entry
  1679. - LOAD_ARGS 0 /* reload call-clobbered registers */
  1680. + LOAD_ARGS ARGOFFSET /* reload call-clobbered registers */
  1681. jmp system_call_fastpath
  1682.  
  1683. /*
  1684. @@ -523,7 +565,7 @@ auditsys:
  1685. * masked off.
  1686. */
  1687. sysret_audit:
  1688. - movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */
  1689. + movq RAX(%rsp),%rsi /* second arg, syscall return value */
  1690. cmpq $-MAX_ERRNO,%rsi /* is it < -MAX_ERRNO? */
  1691. setbe %al /* 1 if so, 0 if not */
  1692. movzbl %al,%edi /* zero-extend that into %edi */
  1693. @@ -535,10 +577,9 @@ sysret_audit:
  1694. /* Do syscall tracing */
  1695. tracesys:
  1696. #ifdef CONFIG_AUDITSYSCALL
  1697. - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
  1698. + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP)
  1699. jz auditsys
  1700. #endif
  1701. - SAVE_REST
  1702. movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
  1703. FIXUP_TOP_OF_STACK %rdi
  1704. movq %rsp,%rdi
  1705. @@ -549,18 +590,18 @@ tracesys:
  1706. * the value it wants us to use in the table lookup.
  1707. */
  1708. LOAD_ARGS ARGOFFSET, 1
  1709. - RESTORE_REST
  1710. #if __SYSCALL_MASK == ~0
  1711. cmpq $__NR_syscall_max,%rax
  1712. #else
  1713. andl $__SYSCALL_MASK,%eax
  1714. cmpl $__NR_syscall_max,%eax
  1715. #endif
  1716. - ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
  1717. + ja 1f /* RAX(%rsp) set to -ENOSYS above */
  1718. movq %r10,%rcx /* fixup for C */
  1719. call *sys_call_table(,%rax,8)
  1720. - movq %rax,RAX-ARGOFFSET(%rsp)
  1721. - /* Use IRET because user could have changed frame */
  1722. + movq %rax,RAX(%rsp)
  1723. +1: RESTORE_REST
  1724. +/* Use IRET because user could have changed frame */
  1725.  
  1726. /*
  1727. * Syscall return path ending with IRET.
  1728. @@ -599,8 +640,8 @@ int_careful:
  1729. int_very_careful:
  1730. TRACE_IRQS_ON
  1731. ENABLE_INTERRUPTS(CLBR_NONE)
  1732. -int_check_syscall_exit_work:
  1733. SAVE_REST
  1734. +int_check_syscall_exit_work:
  1735. /* Check for syscall exit trace */
  1736. testl $_TIF_WORK_SYSCALL_EXIT,%edx
  1737. jz int_signal
  1738. @@ -629,15 +670,11 @@ END(system_call)
  1739. .macro FORK_LIKE func
  1740. ENTRY(stub_\func)
  1741. CFI_STARTPROC
  1742. - popq %r11 /* save return address */
  1743. - PARTIAL_FRAME 0
  1744. - SAVE_REST
  1745. - pushq %r11 /* put it back on stack */
  1746. - FIXUP_TOP_OF_STACK %r11, 8
  1747. DEFAULT_FRAME 0 8 /* offset 8: return address */
  1748. + FIXUP_TOP_OF_STACK %r11, 8
  1749. call sys_\func
  1750. RESTORE_TOP_OF_STACK %r11, 8
  1751. - ret $REST_SKIP /* pop extended registers */
  1752. + ret
  1753. CFI_ENDPROC
  1754. END(stub_\func)
  1755. .endm
  1756. @@ -645,10 +682,10 @@ END(stub_\func)
  1757. .macro FIXED_FRAME label,func
  1758. ENTRY(\label)
  1759. CFI_STARTPROC
  1760. - PARTIAL_FRAME 0 8 /* offset 8: return address */
  1761. - FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
  1762. + DEFAULT_FRAME 0 8 /* offset 8: return address */
  1763. + FIXUP_TOP_OF_STACK %r11, 8
  1764. call \func
  1765. - RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
  1766. + RESTORE_TOP_OF_STACK %r11, 8
  1767. ret
  1768. CFI_ENDPROC
  1769. END(\label)
  1770. @@ -659,24 +696,10 @@ END(\label)
  1771. FORK_LIKE vfork
  1772. FIXED_FRAME stub_iopl, sys_iopl
  1773.  
  1774. -ENTRY(ptregscall_common)
  1775. - DEFAULT_FRAME 1 8 /* offset 8: return address */
  1776. - RESTORE_TOP_OF_STACK %r11, 8
  1777. - movq_cfi_restore R15+8, r15
  1778. - movq_cfi_restore R14+8, r14
  1779. - movq_cfi_restore R13+8, r13
  1780. - movq_cfi_restore R12+8, r12
  1781. - movq_cfi_restore RBP+8, rbp
  1782. - movq_cfi_restore RBX+8, rbx
  1783. - ret $REST_SKIP /* pop extended registers */
  1784. - CFI_ENDPROC
  1785. -END(ptregscall_common)
  1786. -
  1787. ENTRY(stub_execve)
  1788. CFI_STARTPROC
  1789. addq $8, %rsp
  1790. - PARTIAL_FRAME 0
  1791. - SAVE_REST
  1792. + DEFAULT_FRAME 0
  1793. FIXUP_TOP_OF_STACK %r11
  1794. call sys_execve
  1795. movq %rax,RAX(%rsp)
  1796. @@ -692,8 +715,7 @@ END(stub_execve)
  1797. ENTRY(stub_rt_sigreturn)
  1798. CFI_STARTPROC
  1799. addq $8, %rsp
  1800. - PARTIAL_FRAME 0
  1801. - SAVE_REST
  1802. + DEFAULT_FRAME 0
  1803. FIXUP_TOP_OF_STACK %r11
  1804. call sys_rt_sigreturn
  1805. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  1806. @@ -706,8 +728,7 @@ END(stub_rt_sigreturn)
  1807. ENTRY(stub_x32_rt_sigreturn)
  1808. CFI_STARTPROC
  1809. addq $8, %rsp
  1810. - PARTIAL_FRAME 0
  1811. - SAVE_REST
  1812. + DEFAULT_FRAME 0
  1813. FIXUP_TOP_OF_STACK %r11
  1814. call sys32_x32_rt_sigreturn
  1815. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  1816. @@ -719,8 +740,7 @@ END(stub_x32_rt_sigreturn)
  1817. ENTRY(stub_x32_execve)
  1818. CFI_STARTPROC
  1819. addq $8, %rsp
  1820. - PARTIAL_FRAME 0
  1821. - SAVE_REST
  1822. + DEFAULT_FRAME 0
  1823. FIXUP_TOP_OF_STACK %r11
  1824. call compat_sys_execve
  1825. RESTORE_TOP_OF_STACK %r11
  1826. @@ -840,8 +860,31 @@ retint_swapgs: /* return to user-space
  1827. */
  1828. DISABLE_INTERRUPTS(CLBR_ANY)
  1829. TRACE_IRQS_IRETQ
  1830. +
  1831. +retint_userspace_restore_args:
  1832. + DISABLE_IBRS_CLOBBER
  1833. + /*
  1834. + * Returning from an interrupt/NMI/exception to user space, currently
  1835. + * on the kernel task stack, which is not user-mapped. Copy the iret
  1836. + * frame to the trampoline stack and switch to it before returning.
  1837. + */
  1838. + RESTORE_ARGS 1,8,1
  1839. + pushq %rax
  1840. + movq %rsp, %rax
  1841. + movq PER_CPU_VAR(init_tss + TSS_sp0), %rsp
  1842. + pushq SS-ORIG_RAX(%rax)
  1843. + pushq RSP-ORIG_RAX(%rax)
  1844. + pushq EFLAGS-ORIG_RAX(%rax)
  1845. + pushq CS-ORIG_RAX(%rax)
  1846. + pushq RIP-ORIG_RAX(%rax)
  1847. + movq (%rax), %rax
  1848. + SWITCH_TO_USER_CR3
  1849. SWAPGS
  1850. - jmp restore_args
  1851. + jmp irq_return
  1852. +
  1853. +paranoid_userspace_restore_all:
  1854. + RESTORE_REST
  1855. + jmp retint_userspace_restore_args
  1856.  
  1857. retint_restore_args: /* return to kernel space */
  1858. DISABLE_INTERRUPTS(CLBR_ANY)
  1859. @@ -849,7 +892,6 @@ retint_restore_args: /* return to kernel
  1860. * The iretq could re-enable interrupts:
  1861. */
  1862. TRACE_IRQS_IRETQ
  1863. -restore_args:
  1864. RESTORE_ARGS 1,8,1
  1865.  
  1866. irq_return:
  1867. @@ -876,7 +918,16 @@ bad_iret:
  1868. */
  1869. pushq $0
  1870.  
  1871. + /*
  1872. + * If a kernel bug clears user CS bit and in turn we'll skip SWAPGS in
  1873. + * general_protection, skip the SWAPGS here as well so we won't hard reboot.
  1874. + * This increases robustness of bad_iret to kernel bugs as well.
  1875. + */
  1876. + testl $3, 8*2(%rsp)
  1877. + je 1f
  1878. SWAPGS
  1879. +1:
  1880. +
  1881. jmp general_protection
  1882.  
  1883. .previous
  1884. @@ -1340,23 +1391,27 @@ ENTRY(paranoid_exit)
  1885. DISABLE_INTERRUPTS(CLBR_NONE)
  1886. TRACE_IRQS_OFF_DEBUG
  1887. testl %ebx,%ebx /* swapgs needed? */
  1888. - jnz paranoid_restore
  1889. + jnz paranoid_noswapgs_restore
  1890. testl $3,CS(%rsp)
  1891. jnz paranoid_userspace
  1892. -paranoid_swapgs:
  1893. - TRACE_IRQS_IRETQ 0
  1894. - SWAPGS_UNSAFE_STACK
  1895. +paranoid_swapgs_restore:
  1896. + /* return to kernel with swapgs */
  1897. + RESTORE_IBRS_CLOBBER save_reg=%r13d
  1898. + RESTORE_CR3 scratch_reg=%rax save_reg=%r14
  1899. RESTORE_ALL 8
  1900. + SWAPGS
  1901. jmp irq_return
  1902. -paranoid_restore:
  1903. - TRACE_IRQS_IRETQ_DEBUG 0
  1904. +paranoid_noswapgs_restore:
  1905. + /* return to kernel */
  1906. + RESTORE_IBRS_CLOBBER save_reg=%r13d
  1907. + RESTORE_CR3 scratch_reg=%rax save_reg=%r14
  1908. RESTORE_ALL 8
  1909. jmp irq_return
  1910. paranoid_userspace:
  1911. GET_THREAD_INFO(%rcx)
  1912. movl TI_flags(%rcx),%ebx
  1913. andl $_TIF_WORK_MASK,%ebx
  1914. - jz paranoid_swapgs
  1915. + jz paranoid_userspace_restore_all
  1916. movq %rsp,%rdi /* &pt_regs */
  1917. call sync_regs
  1918. movq %rax,%rsp /* switch stack for scheduling */
  1919. @@ -1410,7 +1465,33 @@ ENTRY(error_entry)
  1920. je error_kernelspace
  1921. error_swapgs:
  1922. SWAPGS
  1923. + SWITCH_TO_KERNEL_CR3
  1924. + movq %rsp, %rsi
  1925. + movq PER_CPU_VAR(kernel_stack), %rsp
  1926. + ENABLE_IBRS_CLOBBER /* no indirect jump allowed before IBRS */
  1927. + STUFF_RSB /* no ret allowed before stuffing the RSB */
  1928. + movq %rsi, %rsp
  1929. error_sti:
  1930. + /* Check to see if we're on the trampoline stack. */
  1931. + movq PER_CPU_VAR(init_tss + TSS_sp0), %rcx
  1932. + cmpq %rcx, %rsp
  1933. + ja 1f
  1934. + leaq -TSS_stack_size(%rcx), %rax
  1935. + cmpq %rsp, %rax
  1936. + ja 1f
  1937. + /*
  1938. + * We're on the trampoline stack. Copy the trampoline stack's
  1939. + * contents to the kernel task stack and switch to it.
  1940. + */
  1941. + subq %rsp, %rcx
  1942. + movq PER_CPU_VAR(kernel_stack), %rdi
  1943. + addq $KERNEL_STACK_OFFSET, %rdi
  1944. + subq %rcx, %rdi
  1945. + movq %rdi, %rax
  1946. + movq %rsp, %rsi
  1947. + rep movsb
  1948. + movq %rax, %rsp
  1949. +1:
  1950. TRACE_IRQS_OFF
  1951. ret
  1952.  
  1953. @@ -1478,7 +1559,7 @@ ENTRY(nmi)
  1954. /*
  1955. * Fix up the exception frame if we're on Xen.
  1956. * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
  1957. - * one value to the stack on native, so it may clobber the rdx
  1958. + * one value to the stack on native, so it may clobber the rsi
  1959. * scratch slot, but it won't clobber any of the important
  1960. * slots past it.
  1961. *
  1962. @@ -1525,9 +1606,9 @@ ENTRY(nmi)
  1963. * other IST entries.
  1964. */
  1965.  
  1966. - /* Use %rdx as out temp variable throughout */
  1967. - pushq_cfi %rdx
  1968. - CFI_REL_OFFSET rdx, 0
  1969. + /* Use %rsi as out temp variable throughout */
  1970. + pushq_cfi %rsi
  1971. + CFI_REL_OFFSET rsi, 0
  1972.  
  1973. testb $3, CS-RIP+8(%rsp)
  1974. jz .Lnmi_from_kernel
  1975. @@ -1544,19 +1625,20 @@ ENTRY(nmi)
  1976. */
  1977.  
  1978. SWAPGS_UNSAFE_STACK
  1979. + SWITCH_TO_KERNEL_CR3
  1980. cld
  1981. - movq %rsp, %rdx
  1982. + movq %rsp, %rsi
  1983. movq PER_CPU_VAR(kernel_stack), %rsp
  1984. addq $KERNEL_STACK_OFFSET, %rsp
  1985. - pushq 5*8(%rdx) /* pt_regs->ss */
  1986. - pushq 4*8(%rdx) /* pt_regs->rsp */
  1987. - pushq 3*8(%rdx) /* pt_regs->flags */
  1988. - pushq 2*8(%rdx) /* pt_regs->cs */
  1989. - pushq 1*8(%rdx) /* pt_regs->rip */
  1990. + pushq 5*8(%rsi) /* pt_regs->ss */
  1991. + pushq 4*8(%rsi) /* pt_regs->rsp */
  1992. + pushq 3*8(%rsi) /* pt_regs->flags */
  1993. + pushq 2*8(%rsi) /* pt_regs->cs */
  1994. + pushq 1*8(%rsi) /* pt_regs->rip */
  1995. pushq $-1 /* pt_regs->orig_ax */
  1996. pushq %rdi /* pt_regs->di */
  1997. - pushq %rsi /* pt_regs->si */
  1998. - pushq (%rdx) /* pt_regs->dx */
  1999. + pushq (%rsi) /* pt_regs->si */
  2000. + pushq %rdx /* pt_regs->dx */
  2001. pushq %rcx /* pt_regs->cx */
  2002. pushq %rax /* pt_regs->ax */
  2003. pushq %r8 /* pt_regs->r8 */
  2004. @@ -1570,6 +1652,9 @@ ENTRY(nmi)
  2005. pushq %r14 /* pt_regs->r14 */
  2006. pushq %r15 /* pt_regs->r15 */
  2007.  
  2008. + ENABLE_IBRS_CLOBBER /* no indirect jump allowed before IBRS */
  2009. + STUFF_RSB /* no ret allowed before stuffing the RSB */
  2010. +
  2011. /*
  2012. * At this point we no longer need to worry about stack damage
  2013. * due to nesting -- we're on the normal thread stack and we're
  2014. @@ -1585,10 +1670,8 @@ ENTRY(nmi)
  2015. * work, because we don't want to enable interrupts. Fortunately,
  2016. * do_nmi doesn't modify pt_regs.
  2017. */
  2018. - SWAPGS
  2019. -
  2020. addq $6*8, %rsp /* skip bx, bp, and r12-r15 */
  2021. - jmp restore_args
  2022. + jmp retint_userspace_restore_args
  2023.  
  2024. .Lnmi_from_kernel:
  2025. /*
  2026. @@ -1600,7 +1683,7 @@ ENTRY(nmi)
  2027. * | original CS |
  2028. * | original RIP |
  2029. * +---------------------------------------------------------+
  2030. - * | temp storage for rdx |
  2031. + * | temp storage for rsi |
  2032. * +---------------------------------------------------------+
  2033. * | "NMI executing" variable |
  2034. * +---------------------------------------------------------+
  2035. @@ -1642,11 +1725,11 @@ ENTRY(nmi)
  2036. * about to about to call do_nmi anyway, so we can just
  2037. * resume the outer NMI.
  2038. */
  2039. - movq $repeat_nmi, %rdx
  2040. - cmpq 8(%rsp), %rdx
  2041. + movq $repeat_nmi, %rsi
  2042. + cmpq 8(%rsp), %rsi
  2043. ja 1f
  2044. - movq $end_repeat_nmi, %rdx
  2045. - cmpq 8(%rsp), %rdx
  2046. + movq $end_repeat_nmi, %rsi
  2047. + cmpq 8(%rsp), %rsi
  2048. ja nested_nmi_out
  2049. 1:
  2050.  
  2051. @@ -1670,8 +1753,8 @@ ENTRY(nmi)
  2052. * if it controls the kernel's RSP. We set DF before we clear
  2053. * "NMI executing".
  2054. */
  2055. - lea 6*8(%rsp), %rdx
  2056. - test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi
  2057. + lea 6*8(%rsp), %rsi
  2058. + test_in_nmi rsi, 4*8(%rsp), nested_nmi, first_nmi
  2059.  
  2060. /* Ah, it is within the NMI stack. */
  2061.  
  2062. @@ -1687,12 +1770,12 @@ nested_nmi:
  2063. * Modify the "iret" frame to point to repeat_nmi, forcing another
  2064. * iteration of NMI handling.
  2065. */
  2066. - leaq -1*8(%rsp), %rdx
  2067. - movq %rdx, %rsp
  2068. + leaq -1*8(%rsp), %rsi
  2069. + movq %rsi, %rsp
  2070. CFI_ADJUST_CFA_OFFSET 1*8
  2071. - leaq -10*8(%rsp), %rdx
  2072. + leaq -10*8(%rsp), %rsi
  2073. pushq_cfi $__KERNEL_DS
  2074. - pushq_cfi %rdx
  2075. + pushq_cfi %rsi
  2076. pushfq_cfi
  2077. pushq_cfi $__KERNEL_CS
  2078. pushq_cfi $repeat_nmi
  2079. @@ -1702,17 +1785,17 @@ nested_nmi:
  2080. CFI_ADJUST_CFA_OFFSET -6*8
  2081.  
  2082. nested_nmi_out:
  2083. - popq_cfi %rdx
  2084. - CFI_RESTORE rdx
  2085. + popq_cfi %rsi
  2086. + CFI_RESTORE rsi
  2087.  
  2088. /* We are returning to kernel mode, so this cannot result in a fault. */
  2089. INTERRUPT_RETURN
  2090.  
  2091. CFI_RESTORE_STATE
  2092. first_nmi:
  2093. - /* Restore rdx. */
  2094. - movq (%rsp), %rdx
  2095. - CFI_RESTORE rdx
  2096. + /* Restore rsi. */
  2097. + movq (%rsp), %rsi
  2098. + CFI_RESTORE rsi
  2099.  
  2100. /* Set "NMI executing" on the stack. */
  2101. pushq_cfi $1
  2102. @@ -1796,6 +1879,10 @@ end_repeat_nmi:
  2103. movq $-1,%rsi
  2104. call do_nmi
  2105.  
  2106. + RESTORE_IBRS_CLOBBER save_reg=%r13d
  2107. +
  2108. + RESTORE_CR3 scratch_reg=%rax save_reg=%r14
  2109. +
  2110. /* Did the NMI take a page fault? Restore cr2 if it did */
  2111. movq %cr2, %rcx
  2112. cmpq %rcx, %r12
  2113. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/head64.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/head64.c
  2114. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/head64.c 2017-10-27 11:14:15.000000000 +0200
  2115. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/head64.c 2017-12-28 19:59:43.000000000 +0100
  2116. @@ -46,7 +46,7 @@ static void __init reset_early_page_tabl
  2117.  
  2118. next_early_pgt = 0;
  2119.  
  2120. - write_cr3(__pa(early_level4_pgt));
  2121. + __load_cr3(__pa(early_level4_pgt));
  2122. }
  2123.  
  2124. /* Create a new PMD entry */
  2125. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/head_64.S linux-3.10.0-693.11.6.el7/arch/x86/kernel/head_64.S
  2126. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/head_64.S 2017-10-27 11:14:15.000000000 +0200
  2127. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/head_64.S 2017-12-28 19:59:43.000000000 +0100
  2128. @@ -440,6 +440,27 @@ early_idt_ripmsg:
  2129. .balign PAGE_SIZE; \
  2130. GLOBAL(name)
  2131.  
  2132. +#ifdef CONFIG_KAISER
  2133. +/*
  2134. + * Each PGD needs to be 8k long and 8k aligned. We do not
  2135. + * ever go out to userspace with these, so we do not
  2136. + * strictly *need* the second page, but this allows us to
  2137. + * have a single set_pgd() implementation that does not
  2138. + * need to worry about whether it has 4k or 8k to work
  2139. + * with.
  2140. + *
  2141. + * This ensures PGDs are 8k long:
  2142. + */
  2143. +#define KAISER_USER_PGD_FILL 512
  2144. +/* This ensures they are 8k-aligned: */
  2145. +#define NEXT_PGD_PAGE(name) \
  2146. + .balign 2 * PAGE_SIZE; \
  2147. +GLOBAL(name)
  2148. +#else
  2149. +#define NEXT_PGD_PAGE(name) NEXT_PAGE(name)
  2150. +#define KAISER_USER_PGD_FILL 0
  2151. +#endif
  2152. +
  2153. /* Automate the creation of 1 to 1 mapping pmd entries */
  2154. #define PMDS(START, PERM, COUNT) \
  2155. i = 0 ; \
  2156. @@ -449,9 +470,10 @@ GLOBAL(name)
  2157. .endr
  2158.  
  2159. __INITDATA
  2160. -NEXT_PAGE(early_level4_pgt)
  2161. +NEXT_PGD_PAGE(early_level4_pgt)
  2162. .fill 511,8,0
  2163. .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
  2164. + .fill KAISER_USER_PGD_FILL,8,0
  2165.  
  2166. NEXT_PAGE(early_dynamic_pgts)
  2167. .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
  2168. @@ -459,16 +481,18 @@ NEXT_PAGE(early_dynamic_pgts)
  2169. .data
  2170.  
  2171. #ifndef CONFIG_XEN
  2172. -NEXT_PAGE(init_level4_pgt)
  2173. +NEXT_PGD_PAGE(init_level4_pgt)
  2174. .fill 512,8,0
  2175. + .fill KAISER_USER_PGD_FILL,8,0
  2176. #else
  2177. -NEXT_PAGE(init_level4_pgt)
  2178. +NEXT_PGD_PAGE(init_level4_pgt)
  2179. .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  2180. .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
  2181. .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  2182. .org init_level4_pgt + L4_START_KERNEL*8, 0
  2183. /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
  2184. .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
  2185. + .fill KAISER_USER_PGD_FILL,8,0
  2186.  
  2187. NEXT_PAGE(level3_ident_pgt)
  2188. .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  2189. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/hpet.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/hpet.c
  2190. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/hpet.c 2017-10-27 11:14:15.000000000 +0200
  2191. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/hpet.c 2017-12-28 19:59:43.000000000 +0100
  2192. @@ -11,6 +11,7 @@
  2193. #include <linux/cpu.h>
  2194. #include <linux/pm.h>
  2195. #include <linux/io.h>
  2196. +#include <linux/kaiser.h>
  2197.  
  2198. #include <asm/fixmap.h>
  2199. #include <asm/hpet.h>
  2200. @@ -76,6 +77,8 @@ static inline void hpet_set_mapping(void
  2201. hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
  2202. #ifdef CONFIG_X86_64
  2203. __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VVAR_NOCACHE);
  2204. + kaiser_add_mapping(__fix_to_virt(VSYSCALL_HPET), PAGE_SIZE,
  2205. + __PAGE_KERNEL_VVAR_NOCACHE | _PAGE_GLOBAL);
  2206. #endif
  2207. }
  2208.  
  2209. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/irq_64.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/irq_64.c
  2210. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/irq_64.c 2017-10-27 11:14:15.000000000 +0200
  2211. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/irq_64.c 2017-12-28 19:59:43.000000000 +0100
  2212. @@ -16,6 +16,7 @@
  2213. #include <linux/ftrace.h>
  2214. #include <linux/uaccess.h>
  2215. #include <linux/smp.h>
  2216. +#include <linux/magic.h>
  2217. #include <asm/io_apic.h>
  2218. #include <asm/idle.h>
  2219. #include <asm/apic.h>
  2220. @@ -46,6 +47,9 @@ static inline void stack_overflow_check(
  2221. u64 estack_top, estack_bottom;
  2222. u64 curbase = (u64)task_stack_page(current);
  2223.  
  2224. + if (WARN_ON(__this_cpu_read(init_tss.stack_canary) != STACK_END_MAGIC))
  2225. + __this_cpu_write(init_tss.stack_canary, STACK_END_MAGIC);
  2226. +
  2227. if (user_mode_vm(regs))
  2228. return;
  2229.  
  2230. Только в linux-3.10.0-693.11.6.el7/arch/x86/kernel: kaiser.h
  2231. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/ldt.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/ldt.c
  2232. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/ldt.c 2017-10-27 11:14:15.000000000 +0200
  2233. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/ldt.c 2017-12-28 19:59:43.000000000 +0100
  2234. @@ -10,6 +10,7 @@
  2235. #include <linux/gfp.h>
  2236. #include <linux/sched.h>
  2237. #include <linux/string.h>
  2238. +#include <linux/kaiser.h>
  2239. #include <linux/mm.h>
  2240. #include <linux/smp.h>
  2241. #include <linux/vmalloc.h>
  2242. @@ -28,10 +29,19 @@ static void flush_ldt(void *current_mm)
  2243. }
  2244. #endif
  2245.  
  2246. +static void free_ldt(void *ldt, int size)
  2247. +{
  2248. + if (size * LDT_ENTRY_SIZE > PAGE_SIZE)
  2249. + vfree(ldt);
  2250. + else
  2251. + put_page(virt_to_page(ldt));
  2252. +}
  2253. +
  2254. static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
  2255. {
  2256. void *oldldt, *newldt;
  2257. int oldsize;
  2258. + int ret;
  2259.  
  2260. if (mincount <= pc->size)
  2261. return 0;
  2262. @@ -45,6 +55,13 @@ static int alloc_ldt(mm_context_t *pc, i
  2263.  
  2264. if (!newldt)
  2265. return -ENOMEM;
  2266. + ret = kaiser_add_mapping((unsigned long)newldt,
  2267. + mincount * LDT_ENTRY_SIZE,
  2268. + __PAGE_KERNEL | _PAGE_GLOBAL);
  2269. + if (ret) {
  2270. + free_ldt(newldt, mincount);
  2271. + return -ENOMEM;
  2272. + }
  2273.  
  2274. if (oldsize)
  2275. memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
  2276. @@ -76,11 +93,10 @@ static int alloc_ldt(mm_context_t *pc, i
  2277. #endif
  2278. }
  2279. if (oldsize) {
  2280. + kaiser_remove_mapping((unsigned long)oldldt,
  2281. + oldsize * LDT_ENTRY_SIZE);
  2282. paravirt_free_ldt(oldldt, oldsize);
  2283. - if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
  2284. - vfree(oldldt);
  2285. - else
  2286. - put_page(virt_to_page(oldldt));
  2287. + free_ldt(oldldt, oldsize);
  2288. }
  2289. return 0;
  2290. }
  2291. @@ -131,6 +147,8 @@ void destroy_context(struct mm_struct *m
  2292. if (mm == current->active_mm)
  2293. clear_LDT();
  2294. #endif
  2295. + kaiser_remove_mapping((unsigned long)mm->context.ldt,
  2296. + mm->context.size * LDT_ENTRY_SIZE);
  2297. paravirt_free_ldt(mm->context.ldt, mm->context.size);
  2298. if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
  2299. vfree(mm->context.ldt);
  2300. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/Makefile linux-3.10.0-693.11.6.el7/arch/x86/kernel/Makefile
  2301. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/Makefile 2017-10-27 11:14:15.000000000 +0200
  2302. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/Makefile 2017-12-28 19:59:43.000000000 +0100
  2303. @@ -120,3 +120,5 @@ ifeq ($(CONFIG_X86_64),y)
  2304. obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
  2305. obj-y += vsmp_64.o
  2306. endif
  2307. +
  2308. +obj-y += spec_ctrl.o
  2309. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/paravirt_patch_64.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/paravirt_patch_64.c
  2310. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/paravirt_patch_64.c 2017-10-27 11:14:15.000000000 +0200
  2311. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/paravirt_patch_64.c 2017-12-28 19:59:43.000000000 +0100
  2312. @@ -1,5 +1,6 @@
  2313. #include <asm/paravirt.h>
  2314. #include <asm/asm-offsets.h>
  2315. +#include <asm/processor.h>
  2316. #include <linux/stringify.h>
  2317.  
  2318. DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
  2319. @@ -66,7 +67,14 @@ unsigned native_patch(u8 type, u16 clobb
  2320. PATCH_SITE(pv_mmu_ops, read_cr3);
  2321. PATCH_SITE(pv_mmu_ops, write_cr3);
  2322. PATCH_SITE(pv_cpu_ops, clts);
  2323. - PATCH_SITE(pv_mmu_ops, flush_tlb_single);
  2324. + case PARAVIRT_PATCH(pv_mmu_ops.flush_tlb_single):
  2325. + if (!boot_cpu_has(X86_FEATURE_PCID)) {
  2326. + start = start_pv_mmu_ops_flush_tlb_single;
  2327. + end = end_pv_mmu_ops_flush_tlb_single;
  2328. + goto patch_site;
  2329. + } else {
  2330. + goto patch_default;
  2331. + }
  2332. PATCH_SITE(pv_cpu_ops, wbinvd);
  2333. #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
  2334. case PARAVIRT_PATCH(pv_lock_ops.unlock_kick):
  2335. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/process_64.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/process_64.c
  2336. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/process_64.c 2017-10-27 11:14:15.000000000 +0200
  2337. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/process_64.c 2017-12-28 19:59:43.000000000 +0100
  2338. @@ -280,9 +280,6 @@ __switch_to(struct task_struct *prev_p,
  2339.  
  2340. fpu = switch_fpu_prepare(prev_p, next_p, cpu);
  2341.  
  2342. - /* Reload esp0 and ss1. */
  2343. - load_sp0(tss, next);
  2344. -
  2345. /* We must save %fs and %gs before load_TLS() because
  2346. * %fs and %gs may be cleared by load_TLS().
  2347. *
  2348. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/process.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/process.c
  2349. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/process.c 2017-10-27 11:14:15.000000000 +0200
  2350. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/process.c 2017-12-28 19:59:43.000000000 +0100
  2351. @@ -37,7 +37,7 @@
  2352. * section. Since TSS's are completely CPU-local, we want them
  2353. * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  2354. */
  2355. -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
  2356. +DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct tss_struct, init_tss) = INIT_TSS;
  2357.  
  2358. #ifdef CONFIG_X86_64
  2359. static DEFINE_PER_CPU(unsigned char, is_idle);
  2360. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/reboot.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/reboot.c
  2361. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/reboot.c 2017-10-27 11:14:15.000000000 +0200
  2362. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/reboot.c 2017-12-28 19:59:43.000000000 +0100
  2363. @@ -180,6 +180,10 @@ void __noreturn machine_real_restart(uns
  2364. load_cr3(initial_page_table);
  2365. #else
  2366. write_cr3(real_mode_header->trampoline_pgd);
  2367. +
  2368. + /* Exiting long mode will fail if CR4.PCIDE is set. */
  2369. + if (static_cpu_has(X86_FEATURE_PCID))
  2370. + clear_in_cr4(X86_CR4_PCIDE);
  2371. #endif
  2372.  
  2373. /* Jump to the identity-mapped low memory code */
  2374. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/smpboot.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/smpboot.c
  2375. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/smpboot.c 2017-10-27 11:14:15.000000000 +0200
  2376. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/smpboot.c 2017-12-28 19:59:43.000000000 +0100
  2377. @@ -198,10 +198,12 @@ static int enable_start_cpu0;
  2378. static void notrace start_secondary(void *unused)
  2379. {
  2380. /*
  2381. - * Don't put *anything* before cpu_init(), SMP booting is too
  2382. - * fragile that we want to limit the things done here to the
  2383. - * most necessary things.
  2384. + * Don't put *anything* except direct CPU state initialization
  2385. + * before cpu_init(), SMP booting is too fragile that we want to
  2386. + * limit the things done here to the most necessary things.
  2387. */
  2388. + if (boot_cpu_has(X86_FEATURE_PCID))
  2389. + write_cr4(read_cr4() | X86_CR4_PCIDE);
  2390. cpu_init();
  2391. x86_cpuinit.early_percpu_clock_init();
  2392. preempt_disable();
  2393. @@ -1650,9 +1652,13 @@ void native_play_dead(void)
  2394. play_dead_common();
  2395. tboot_shutdown(TB_SHUTDOWN_WFS);
  2396.  
  2397. + spec_ctrl_disable_ibrs();
  2398. +
  2399. mwait_play_dead(); /* Only returns on failure */
  2400. if (cpuidle_play_dead())
  2401. hlt_play_dead();
  2402. +
  2403. + spec_ctrl_enable_ibrs();
  2404. }
  2405.  
  2406. #else /* ... !CONFIG_HOTPLUG_CPU */
  2407. Только в linux-3.10.0-693.11.6.el7/arch/x86/kernel: spec_ctrl.c
  2408. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/vmlinux.lds.S linux-3.10.0-693.11.6.el7/arch/x86/kernel/vmlinux.lds.S
  2409. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/vmlinux.lds.S 2017-10-27 11:14:15.000000000 +0200
  2410. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/vmlinux.lds.S 2017-12-28 19:59:43.000000000 +0100
  2411. @@ -79,6 +79,12 @@ PHDRS {
  2412. note PT_NOTE FLAGS(0); /* ___ */
  2413. }
  2414.  
  2415. +#ifdef CONFIG_KAISER
  2416. +#define ALIGN_KAISER() . = ALIGN(PAGE_SIZE);
  2417. +#else
  2418. +#define ALIGN_KAISER()
  2419. +#endif
  2420. +
  2421. SECTIONS
  2422. {
  2423. #ifdef CONFIG_X86_32
  2424. @@ -100,9 +106,11 @@ SECTIONS
  2425. SCHED_TEXT
  2426. CPUIDLE_TEXT
  2427. LOCK_TEXT
  2428. + ALIGN_KAISER()
  2429. KPROBES_TEXT
  2430. ENTRY_TEXT
  2431. IRQENTRY_TEXT
  2432. + ALIGN_KAISER()
  2433. *(.fixup)
  2434. *(.gnu.warning)
  2435. /* End of text section */
  2436. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kernel/vsyscall_64.c linux-3.10.0-693.11.6.el7/arch/x86/kernel/vsyscall_64.c
  2437. --- linux-3.10.0-693.11.1.el7/arch/x86/kernel/vsyscall_64.c 2017-10-27 11:14:15.000000000 +0200
  2438. +++ linux-3.10.0-693.11.6.el7/arch/x86/kernel/vsyscall_64.c 2017-12-28 19:59:43.000000000 +0100
  2439. @@ -56,7 +56,7 @@
  2440. DEFINE_VVAR(int, vgetcpu_mode);
  2441. DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
  2442.  
  2443. -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
  2444. +enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
  2445.  
  2446. static int __init vsyscall_setup(char *str)
  2447. {
  2448. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kvm/cpuid.c linux-3.10.0-693.11.6.el7/arch/x86/kvm/cpuid.c
  2449. --- linux-3.10.0-693.11.1.el7/arch/x86/kvm/cpuid.c 2017-10-27 11:14:15.000000000 +0200
  2450. +++ linux-3.10.0-693.11.6.el7/arch/x86/kvm/cpuid.c 2017-12-28 19:59:43.000000000 +0100
  2451. @@ -68,8 +68,16 @@ u64 kvm_supported_xcr0(void)
  2452. #define F(x) bit(X86_FEATURE_##x)
  2453.  
  2454. /* These are scattered features in cpufeatures.h. */
  2455. +
  2456. +/* CPUID[eax=7,ecx=0].edx */
  2457. #define KVM_CPUID_BIT_AVX512_4VNNIW 2
  2458. #define KVM_CPUID_BIT_AVX512_4FMAPS 3
  2459. +#define KVM_CPUID_BIT_SPEC_CTRL 26
  2460. +#define KVM_CPUID_BIT_STIBP 27
  2461. +
  2462. +/* CPUID[eax=0x80000008].ebx */
  2463. +#define KVM_CPUID_BIT_IBPB_SUPPORT 12
  2464. +
  2465. #define KF(x) bit(KVM_CPUID_BIT_##x)
  2466.  
  2467. int kvm_update_cpuid(struct kvm_vcpu *vcpu)
  2468. @@ -375,7 +383,12 @@ static inline int __do_cpuid_ent(struct
  2469.  
  2470. /* cpuid 7.0.edx*/
  2471. const u32 kvm_cpuid_7_0_edx_x86_features =
  2472. - KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS);
  2473. + KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS) |
  2474. + KF(SPEC_CTRL) | KF(STIBP);
  2475. +
  2476. + /* cpuid 0x80000008.ebx */
  2477. + const u32 kvm_cpuid_80000008_ebx_x86_features =
  2478. + KF(IBPB_SUPPORT);
  2479.  
  2480. /* all calls to cpuid_count() should be made on the same cpu */
  2481. get_cpu();
  2482. @@ -607,7 +620,9 @@ static inline int __do_cpuid_ent(struct
  2483. if (!g_phys_as)
  2484. g_phys_as = phys_as;
  2485. entry->eax = g_phys_as | (virt_as << 8);
  2486. - entry->ebx = entry->edx = 0;
  2487. + entry->ebx &= kvm_cpuid_80000008_ebx_x86_features;
  2488. + entry->ebx &= get_scattered_cpuid_leaf(0x80000008, 0, CPUID_EBX);
  2489. + entry->edx = 0;
  2490. break;
  2491. }
  2492. case 0x80000019:
  2493. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kvm/svm.c linux-3.10.0-693.11.6.el7/arch/x86/kvm/svm.c
  2494. --- linux-3.10.0-693.11.1.el7/arch/x86/kvm/svm.c 2017-10-27 11:14:15.000000000 +0200
  2495. +++ linux-3.10.0-693.11.6.el7/arch/x86/kvm/svm.c 2017-12-28 19:59:43.000000000 +0100
  2496. @@ -163,6 +163,8 @@ struct vcpu_svm {
  2497. u64 gs_base;
  2498. } host;
  2499.  
  2500. + u64 spec_ctrl;
  2501. +
  2502. u32 *msrpm;
  2503.  
  2504. ulong nmi_iret_rip;
  2505. @@ -199,7 +201,7 @@ static DEFINE_PER_CPU(u64, current_tsc_r
  2506.  
  2507. static const struct svm_direct_access_msrs {
  2508. u32 index; /* Index of the MSR */
  2509. - bool always; /* True if intercept is always on */
  2510. + bool always; /* True if intercept is always off */
  2511. } direct_access_msrs[] = {
  2512. { .index = MSR_STAR, .always = true },
  2513. { .index = MSR_IA32_SYSENTER_CS, .always = true },
  2514. @@ -211,6 +213,8 @@ static const struct svm_direct_access_ms
  2515. { .index = MSR_CSTAR, .always = true },
  2516. { .index = MSR_SYSCALL_MASK, .always = true },
  2517. #endif
  2518. + { .index = MSR_IA32_SPEC_CTRL, .always = true },
  2519. + { .index = MSR_IA32_PRED_CMD, .always = true },
  2520. { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
  2521. { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
  2522. { .index = MSR_IA32_LASTINTFROMIP, .always = false },
  2523. @@ -469,6 +473,8 @@ struct svm_cpu_data {
  2524. struct kvm_ldttss_desc *tss_desc;
  2525.  
  2526. struct page *save_area;
  2527. +
  2528. + struct vmcb *current_vmcb;
  2529. };
  2530.  
  2531. static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
  2532. @@ -1476,11 +1482,18 @@ static void svm_free_vcpu(struct kvm_vcp
  2533. __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
  2534. kvm_vcpu_uninit(vcpu);
  2535. kmem_cache_free(kvm_vcpu_cache, svm);
  2536. +
  2537. + /*
  2538. + * The VMCB could be recycled, causing a false negative in svm_vcpu_load;
  2539. + * block speculative execution.
  2540. + */
  2541. + spec_ctrl_ibpb();
  2542. }
  2543.  
  2544. static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  2545. {
  2546. struct vcpu_svm *svm = to_svm(vcpu);
  2547. + struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
  2548. int i;
  2549.  
  2550. if (unlikely(cpu != vcpu->cpu)) {
  2551. @@ -1506,6 +1519,11 @@ static void svm_vcpu_load(struct kvm_vcp
  2552. }
  2553. }
  2554.  
  2555. + if (sd->current_vmcb != svm->vmcb) {
  2556. + sd->current_vmcb = svm->vmcb;
  2557. + spec_ctrl_ibpb();
  2558. + }
  2559. +
  2560. avic_vcpu_load(vcpu, cpu);
  2561. }
  2562.  
  2563. @@ -2522,6 +2540,11 @@ static int nested_svm_vmexit(struct vcpu
  2564. if (!nested_vmcb)
  2565. return 1;
  2566.  
  2567. + /*
  2568. + * No need for IBPB here, the L1 hypervisor should be running with
  2569. + * IBRS=1 and inserts one already when switching L2 VMs.
  2570. + */
  2571. +
  2572. /* Exit Guest-Mode */
  2573. leave_guest_mode(&svm->vcpu);
  2574. svm->nested.vmcb = 0;
  2575. @@ -2688,6 +2711,11 @@ static bool nested_svm_vmrun(struct vcpu
  2576. if (!nested_vmcb)
  2577. return false;
  2578.  
  2579. + /*
  2580. + * No need for IBPB here, since the nested VM is less privileged. The
  2581. + * L1 hypervisor inserts one already when switching L2 VMs.
  2582. + */
  2583. +
  2584. if (!nested_vmcb_checks(nested_vmcb)) {
  2585. nested_vmcb->control.exit_code = SVM_EXIT_ERR;
  2586. nested_vmcb->control.exit_code_hi = 0;
  2587. @@ -3317,6 +3345,9 @@ static int svm_get_msr(struct kvm_vcpu *
  2588. case MSR_VM_CR:
  2589. msr_info->data = svm->nested.vm_cr_msr;
  2590. break;
  2591. + case MSR_IA32_SPEC_CTRL:
  2592. + msr_info->data = svm->spec_ctrl;
  2593. + break;
  2594. case MSR_IA32_UCODE_REV:
  2595. msr_info->data = 0x01000065;
  2596. break;
  2597. @@ -3436,6 +3467,9 @@ static int svm_set_msr(struct kvm_vcpu *
  2598. case MSR_VM_IGNNE:
  2599. vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
  2600. break;
  2601. + case MSR_IA32_SPEC_CTRL:
  2602. + svm->spec_ctrl = data;
  2603. + break;
  2604. case MSR_IA32_APICBASE:
  2605. if (kvm_vcpu_apicv_active(vcpu))
  2606. avic_update_vapic_bar(to_svm(vcpu), data);
  2607. @@ -4396,6 +4430,8 @@ static void svm_vcpu_run(struct kvm_vcpu
  2608.  
  2609. local_irq_enable();
  2610.  
  2611. + spec_ctrl_vmenter_ibrs(svm->spec_ctrl);
  2612. +
  2613. asm volatile (
  2614. "push %%" _ASM_BP "; \n\t"
  2615. "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
  2616. @@ -4440,6 +4476,25 @@ static void svm_vcpu_run(struct kvm_vcpu
  2617. "mov %%r14, %c[r14](%[svm]) \n\t"
  2618. "mov %%r15, %c[r15](%[svm]) \n\t"
  2619. #endif
  2620. + /*
  2621. + * Clear host registers marked as clobbered to prevent
  2622. + * speculative use.
  2623. + */
  2624. + "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
  2625. + "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
  2626. + "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
  2627. + "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
  2628. + "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
  2629. +#ifdef CONFIG_X86_64
  2630. + "xor %%r8, %%r8 \n\t"
  2631. + "xor %%r9, %%r9 \n\t"
  2632. + "xor %%r10, %%r10 \n\t"
  2633. + "xor %%r11, %%r11 \n\t"
  2634. + "xor %%r12, %%r12 \n\t"
  2635. + "xor %%r13, %%r13 \n\t"
  2636. + "xor %%r14, %%r14 \n\t"
  2637. + "xor %%r15, %%r15 \n\t"
  2638. +#endif
  2639. "pop %%" _ASM_BP
  2640. :
  2641. : [svm]"a"(svm),
  2642. @@ -4470,7 +4525,7 @@ static void svm_vcpu_run(struct kvm_vcpu
  2643. );
  2644.  
  2645. #ifdef CONFIG_X86_64
  2646. - wrmsrl(MSR_GS_BASE, svm->host.gs_base);
  2647. + native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
  2648. #else
  2649. loadsegment(fs, svm->host.fs);
  2650. #ifndef CONFIG_X86_32_LAZY_GS
  2651. @@ -4478,6 +4533,12 @@ static void svm_vcpu_run(struct kvm_vcpu
  2652. #endif
  2653. #endif
  2654.  
  2655. + if (cpu_has_spec_ctrl()) {
  2656. + rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
  2657. + __spec_ctrl_vmexit_ibrs(svm->spec_ctrl);
  2658. + }
  2659. + stuff_RSB();
  2660. +
  2661. reload_tss(vcpu);
  2662.  
  2663. local_irq_disable();
  2664. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kvm/vmx.c linux-3.10.0-693.11.6.el7/arch/x86/kvm/vmx.c
  2665. --- linux-3.10.0-693.11.1.el7/arch/x86/kvm/vmx.c 2017-10-27 11:14:15.000000000 +0200
  2666. +++ linux-3.10.0-693.11.6.el7/arch/x86/kvm/vmx.c 2017-12-28 19:59:43.000000000 +0100
  2667. @@ -544,6 +544,8 @@ struct vcpu_vmx {
  2668. u64 msr_host_kernel_gs_base;
  2669. u64 msr_guest_kernel_gs_base;
  2670. #endif
  2671. + u64 spec_ctrl;
  2672. +
  2673. u32 vm_entry_controls_shadow;
  2674. u32 vm_exit_controls_shadow;
  2675. /*
  2676. @@ -2076,6 +2078,7 @@ static void vmx_vcpu_load(struct kvm_vcp
  2677. if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
  2678. per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
  2679. vmcs_load(vmx->loaded_vmcs->vmcs);
  2680. + spec_ctrl_ibpb();
  2681. }
  2682.  
  2683. if (vmx->loaded_vmcs->cpu != cpu) {
  2684. @@ -2796,6 +2799,9 @@ static int vmx_get_msr(struct kvm_vcpu *
  2685. case MSR_IA32_TSC:
  2686. msr_info->data = guest_read_tsc(vcpu);
  2687. break;
  2688. + case MSR_IA32_SPEC_CTRL:
  2689. + msr_info->data = to_vmx(vcpu)->spec_ctrl;
  2690. + break;
  2691. case MSR_IA32_SYSENTER_CS:
  2692. msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
  2693. break;
  2694. @@ -2891,6 +2897,9 @@ static int vmx_set_msr(struct kvm_vcpu *
  2695. case MSR_IA32_TSC:
  2696. kvm_write_tsc(vcpu, msr_info);
  2697. break;
  2698. + case MSR_IA32_SPEC_CTRL:
  2699. + to_vmx(vcpu)->spec_ctrl = msr_info->data;
  2700. + break;
  2701. case MSR_IA32_CR_PAT:
  2702. if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
  2703. if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
  2704. @@ -3328,6 +3337,12 @@ static void free_loaded_vmcs(struct load
  2705. free_vmcs(loaded_vmcs->vmcs);
  2706. loaded_vmcs->vmcs = NULL;
  2707. WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
  2708. +
  2709. + /*
  2710. + * The VMCS could be recycled, causing a false negative in vmx_vcpu_load;
  2711. + * block speculative execution.
  2712. + */
  2713. + spec_ctrl_ibpb();
  2714. }
  2715.  
  2716. static void free_kvm_area(void)
  2717. @@ -6196,6 +6211,8 @@ static __init int hardware_setup(void)
  2718. vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
  2719. vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
  2720. vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
  2721. + vmx_disable_intercept_for_msr(MSR_IA32_SPEC_CTRL, false);
  2722. + vmx_disable_intercept_for_msr(MSR_IA32_PRED_CMD, false);
  2723.  
  2724. memcpy(vmx_msr_bitmap_legacy_x2apic,
  2725. vmx_msr_bitmap_legacy, PAGE_SIZE);
  2726. @@ -8548,6 +8565,8 @@ static void __noclone vmx_vcpu_run(struc
  2727. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  2728. vmx_set_interrupt_shadow(vcpu, 0);
  2729.  
  2730. + spec_ctrl_vmenter_ibrs(vmx->spec_ctrl);
  2731. +
  2732. atomic_switch_perf_msrs(vmx);
  2733. debugctlmsr = get_debugctlmsr();
  2734.  
  2735. @@ -8621,6 +8640,23 @@ static void __noclone vmx_vcpu_run(struc
  2736.  
  2737. "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
  2738. "setbe %c[fail](%0) \n\t"
  2739. + /*
  2740. + * Clear host registers marked as clobbered to prevent
  2741. + * speculative use.
  2742. + */
  2743. + "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
  2744. + "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
  2745. + "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
  2746. +#ifdef CONFIG_X86_64
  2747. + "xor %%r8, %%r8 \n\t"
  2748. + "xor %%r9, %%r9 \n\t"
  2749. + "xor %%r10, %%r10 \n\t"
  2750. + "xor %%r11, %%r11 \n\t"
  2751. + "xor %%r12, %%r12 \n\t"
  2752. + "xor %%r13, %%r13 \n\t"
  2753. + "xor %%r14, %%r14 \n\t"
  2754. + "xor %%r15, %%r15 \n\t"
  2755. +#endif
  2756. ".pushsection .rodata \n\t"
  2757. ".global vmx_return \n\t"
  2758. "vmx_return: " _ASM_PTR " 2b \n\t"
  2759. @@ -8657,6 +8693,12 @@ static void __noclone vmx_vcpu_run(struc
  2760. #endif
  2761. );
  2762.  
  2763. + if (cpu_has_spec_ctrl()) {
  2764. + rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
  2765. + __spec_ctrl_vmexit_ibrs(vmx->spec_ctrl);
  2766. + }
  2767. + stuff_RSB();
  2768. +
  2769. /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
  2770. if (debugctlmsr)
  2771. update_debugctlmsr(debugctlmsr);
  2772. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/kvm/x86.c linux-3.10.0-693.11.6.el7/arch/x86/kvm/x86.c
  2773. --- linux-3.10.0-693.11.1.el7/arch/x86/kvm/x86.c 2017-10-27 11:14:15.000000000 +0200
  2774. +++ linux-3.10.0-693.11.6.el7/arch/x86/kvm/x86.c 2017-12-28 19:59:43.000000000 +0100
  2775. @@ -768,7 +768,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, u
  2776. return 1;
  2777.  
  2778. /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
  2779. - if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
  2780. + if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_ASID_MASK) ||
  2781. + !is_long_mode(vcpu))
  2782. return 1;
  2783. }
  2784.  
  2785. @@ -969,6 +970,7 @@ static u32 msrs_to_save[] = {
  2786. #endif
  2787. MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
  2788. MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
  2789. + MSR_IA32_SPEC_CTRL,
  2790. };
  2791.  
  2792. static unsigned num_msrs_to_save;
  2793. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/mm/init.c linux-3.10.0-693.11.6.el7/arch/x86/mm/init.c
  2794. --- linux-3.10.0-693.11.1.el7/arch/x86/mm/init.c 2017-10-27 11:14:15.000000000 +0200
  2795. +++ linux-3.10.0-693.11.6.el7/arch/x86/mm/init.c 2017-12-28 19:59:43.000000000 +0100
  2796. @@ -18,6 +18,8 @@
  2797. #include <asm/dma.h> /* for MAX_DMA_PFN */
  2798. #include <asm/microcode.h>
  2799. #include <asm/kaslr.h>
  2800. +#include <asm/cpufeature.h>
  2801. +#include <asm/mmu_context.h>
  2802.  
  2803. #include "mm_internal.h"
  2804.  
  2805. @@ -192,6 +194,47 @@ static void __init probe_page_size_mask(
  2806. }
  2807. }
  2808.  
  2809. +static void setup_pcid(void)
  2810. +{
  2811. +#ifdef CONFIG_X86_64
  2812. + if (boot_cpu_has(X86_FEATURE_PCID)) {
  2813. + if (boot_cpu_has(X86_FEATURE_PGE)) {
  2814. + /*
  2815. + * This can't be cr4_set_bits_and_update_boot() --
  2816. + * the trampoline code can't handle CR4.PCIDE and
  2817. + * it wouldn't do any good anyway. Despite the name,
  2818. + * cr4_set_bits_and_update_boot() doesn't actually
  2819. + * cause the bits in question to remain set all the
  2820. + * way through the secondary boot asm.
  2821. + *
  2822. + * Instead, we brute-force it and set CR4.PCIDE
  2823. + * manually in start_secondary().
  2824. + */
  2825. + set_in_cr4(X86_CR4_PCIDE);
  2826. + /*
  2827. + * INVPCID's single-context modes (2/3) only work
  2828. + * if we set X86_CR4_PCIDE, *and* we INVPCID
  2829. + * support. It's unusable on systems that have
  2830. + * X86_CR4_PCIDE clear, or that have no INVPCID
  2831. + * support at all.
  2832. + */
  2833. + if (boot_cpu_has(X86_FEATURE_INVPCID))
  2834. + setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE);
  2835. + } else {
  2836. + /*
  2837. + * flush_tlb_all(), as currently implemented, won't
  2838. + * work if PCID is on but PGE is not. Since that
  2839. + * combination doesn't exist on real hardware, there's
  2840. + * no reason to try to fully support it, but it's
  2841. + * polite to avoid corrupting data if we're on
  2842. + * an improperly configured VM.
  2843. + */
  2844. + setup_clear_cpu_cap(X86_FEATURE_PCID);
  2845. + }
  2846. + }
  2847. +#endif
  2848. +}
  2849. +
  2850. #ifdef CONFIG_X86_32
  2851. #define NR_RANGE_MR 3
  2852. #else /* CONFIG_X86_64 */
  2853. @@ -570,6 +613,7 @@ void __init init_mem_mapping(void)
  2854. unsigned long end;
  2855.  
  2856. probe_page_size_mask();
  2857. + setup_pcid();
  2858.  
  2859. #ifdef CONFIG_X86_64
  2860. end = max_pfn << PAGE_SHIFT;
  2861. Только в linux-3.10.0-693.11.6.el7/arch/x86/mm: kaiser.c
  2862. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/mm/Makefile linux-3.10.0-693.11.6.el7/arch/x86/mm/Makefile
  2863. --- linux-3.10.0-693.11.1.el7/arch/x86/mm/Makefile 2017-10-27 11:14:15.000000000 +0200
  2864. +++ linux-3.10.0-693.11.6.el7/arch/x86/mm/Makefile 2017-12-28 19:59:43.000000000 +0100
  2865. @@ -33,5 +33,6 @@ obj-$(CONFIG_MEMTEST) += memtest.o
  2866.  
  2867. obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
  2868. obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
  2869. +obj-$(CONFIG_KAISER) += kaiser.o
  2870.  
  2871. obj-$(CONFIG_TRACK_DIRTY_PAGES) += track.o
  2872. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/mm/pageattr.c linux-3.10.0-693.11.6.el7/arch/x86/mm/pageattr.c
  2873. --- linux-3.10.0-693.11.1.el7/arch/x86/mm/pageattr.c 2017-10-27 11:14:15.000000000 +0200
  2874. +++ linux-3.10.0-693.11.6.el7/arch/x86/mm/pageattr.c 2017-12-28 19:59:43.000000000 +0100
  2875. @@ -501,9 +501,9 @@ try_preserve_large_page(pte_t *kpte, uns
  2876. * for the ancient hardware that doesn't support it.
  2877. */
  2878. if (pgprot_val(req_prot) & _PAGE_PRESENT)
  2879. - pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
  2880. + pgprot_val(req_prot) |= _PAGE_PSE | __PAGE_KERNEL_GLOBAL;
  2881. else
  2882. - pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
  2883. + pgprot_val(req_prot) &= ~(_PAGE_PSE | __PAGE_KERNEL_GLOBAL);
  2884.  
  2885. req_prot = canon_pgprot(req_prot);
  2886.  
  2887. @@ -617,9 +617,9 @@ __split_large_page(struct cpa_data *cpa,
  2888. * for the ancient hardware that doesn't support it.
  2889. */
  2890. if (pgprot_val(ref_prot) & _PAGE_PRESENT)
  2891. - pgprot_val(ref_prot) |= _PAGE_GLOBAL;
  2892. + pgprot_val(ref_prot) |= __PAGE_KERNEL_GLOBAL;
  2893. else
  2894. - pgprot_val(ref_prot) &= ~_PAGE_GLOBAL;
  2895. + pgprot_val(ref_prot) &= ~__PAGE_KERNEL_GLOBAL;
  2896.  
  2897. /*
  2898. * Get the target pfn from the original entry:
  2899. @@ -1134,9 +1134,9 @@ repeat:
  2900. * support it.
  2901. */
  2902. if (pgprot_val(new_prot) & _PAGE_PRESENT)
  2903. - pgprot_val(new_prot) |= _PAGE_GLOBAL;
  2904. + pgprot_val(new_prot) |= __PAGE_KERNEL_GLOBAL;
  2905. else
  2906. - pgprot_val(new_prot) &= ~_PAGE_GLOBAL;
  2907. + pgprot_val(new_prot) &= ~__PAGE_KERNEL_GLOBAL;
  2908.  
  2909. /*
  2910. * We need to keep the pfn from the existing PTE,
  2911. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/mm/pgtable.c linux-3.10.0-693.11.6.el7/arch/x86/mm/pgtable.c
  2912. --- linux-3.10.0-693.11.1.el7/arch/x86/mm/pgtable.c 2017-10-27 11:14:15.000000000 +0200
  2913. +++ linux-3.10.0-693.11.6.el7/arch/x86/mm/pgtable.c 2017-12-28 19:59:43.000000000 +0100
  2914. @@ -274,12 +274,23 @@ static void pgd_prepopulate_pmd(struct m
  2915. }
  2916. }
  2917.  
  2918. +#ifdef CONFIG_KAISER
  2919. +/*
  2920. + * Instead of one pgd, we aquire two pgds. Being order-1, it is
  2921. + * both 8k in size and 8k-aligned. That lets us just flip bit 12
  2922. + * in a pointer to swap between the two 4k halves.
  2923. + */
  2924. +#define PGD_ALLOCATION_ORDER 1
  2925. +#else
  2926. +#define PGD_ALLOCATION_ORDER 0
  2927. +#endif
  2928. +
  2929. pgd_t *pgd_alloc(struct mm_struct *mm)
  2930. {
  2931. pgd_t *pgd;
  2932. pmd_t *pmds[PREALLOCATED_PMDS];
  2933.  
  2934. - pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
  2935. + pgd = (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
  2936.  
  2937. if (pgd == NULL)
  2938. goto out;
  2939. @@ -309,7 +320,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
  2940. out_free_pmds:
  2941. free_pmds(pmds);
  2942. out_free_pgd:
  2943. - free_page((unsigned long)pgd);
  2944. + free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
  2945. out:
  2946. return NULL;
  2947. }
  2948. @@ -319,7 +330,7 @@ void pgd_free(struct mm_struct *mm, pgd_
  2949. pgd_mop_up_pmds(mm, pgd);
  2950. pgd_dtor(pgd);
  2951. paravirt_pgd_free(mm, pgd);
  2952. - free_page((unsigned long)pgd);
  2953. + free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
  2954. }
  2955.  
  2956. /*
  2957. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/platform/efi/early_printk.c linux-3.10.0-693.11.6.el7/arch/x86/platform/efi/early_printk.c
  2958. --- linux-3.10.0-693.11.1.el7/arch/x86/platform/efi/early_printk.c 2017-10-27 11:14:15.000000000 +0200
  2959. +++ linux-3.10.0-693.11.6.el7/arch/x86/platform/efi/early_printk.c 2017-12-28 19:59:43.000000000 +0100
  2960. @@ -187,6 +187,7 @@ early_efi_write(struct console *con, con
  2961. if (efi_y + font->height > si->lfb_height) {
  2962. u32 i;
  2963.  
  2964. + gmb();
  2965. efi_y -= font->height;
  2966. early_efi_scroll_up();
  2967.  
  2968. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/power/cpu.c linux-3.10.0-693.11.6.el7/arch/x86/power/cpu.c
  2969. --- linux-3.10.0-693.11.1.el7/arch/x86/power/cpu.c 2017-10-27 11:14:15.000000000 +0200
  2970. +++ linux-3.10.0-693.11.6.el7/arch/x86/power/cpu.c 2017-12-28 19:59:43.000000000 +0100
  2971. @@ -24,6 +24,7 @@
  2972. #include <asm/debugreg.h>
  2973. #include <asm/fpu-internal.h> /* pcntxt_mask */
  2974. #include <asm/cpu.h>
  2975. +#include <asm/mmu_context.h>
  2976.  
  2977. #ifdef CONFIG_X86_32
  2978. unsigned long saved_context_ebx;
  2979. @@ -183,7 +184,6 @@ static void notrace __restore_processor_
  2980. write_cr8(ctxt->cr8);
  2981. write_cr4(ctxt->cr4);
  2982. #endif
  2983. - write_cr3(ctxt->cr3);
  2984. write_cr2(ctxt->cr2);
  2985. write_cr0(ctxt->cr0);
  2986.  
  2987. @@ -226,6 +226,12 @@ static void notrace __restore_processor_
  2988. #endif
  2989.  
  2990. /*
  2991. + * __load_cr3 requires kernel %gs to be initialized to be able
  2992. + * to access per-cpu areas.
  2993. + */
  2994. + __load_cr3(ctxt->cr3);
  2995. +
  2996. + /*
  2997. * restore XCR0 for xsave capable cpu's.
  2998. */
  2999. if (cpu_has_xsave)
  3000. @@ -237,6 +243,7 @@ static void notrace __restore_processor_
  3001. x86_platform.restore_sched_clock_state();
  3002. mtrr_bp_restore();
  3003. perf_restore_debug_store();
  3004. + spec_ctrl_cpu_init();
  3005. }
  3006.  
  3007. /* Needed by apm.c */
  3008. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/um/asm/barrier.h linux-3.10.0-693.11.6.el7/arch/x86/um/asm/barrier.h
  3009. --- linux-3.10.0-693.11.1.el7/arch/x86/um/asm/barrier.h 2017-10-27 11:14:15.000000000 +0200
  3010. +++ linux-3.10.0-693.11.6.el7/arch/x86/um/asm/barrier.h 2017-12-28 19:59:43.000000000 +0100
  3011. @@ -64,7 +64,6 @@
  3012. */
  3013. static inline void rdtsc_barrier(void)
  3014. {
  3015. - alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
  3016. alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
  3017. }
  3018.  
  3019. diff -upr linux-3.10.0-693.11.1.el7/arch/x86/xen/smp.c linux-3.10.0-693.11.6.el7/arch/x86/xen/smp.c
  3020. --- linux-3.10.0-693.11.1.el7/arch/x86/xen/smp.c 2017-10-27 11:14:15.000000000 +0200
  3021. +++ linux-3.10.0-693.11.6.el7/arch/x86/xen/smp.c 2017-12-28 19:59:43.000000000 +0100
  3022. @@ -23,6 +23,7 @@
  3023. #include <asm/desc.h>
  3024. #include <asm/pgtable.h>
  3025. #include <asm/cpu.h>
  3026. +#include <asm/mmu_context.h>
  3027.  
  3028. #include <xen/interface/xen.h>
  3029. #include <xen/interface/vcpu.h>
  3030. diff -upr linux-3.10.0-693.11.1.el7/block/cfq-iosched.c linux-3.10.0-693.11.6.el7/block/cfq-iosched.c
  3031. --- linux-3.10.0-693.11.1.el7/block/cfq-iosched.c 2017-10-27 11:14:15.000000000 +0200
  3032. +++ linux-3.10.0-693.11.6.el7/block/cfq-iosched.c 2017-12-28 19:59:43.000000000 +0100
  3033. @@ -2758,10 +2758,12 @@ static void cfq_arm_slice_timer(struct c
  3034. */
  3035. if (sample_valid(cic->ttime.ttime_samples) &&
  3036. (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
  3037. + gmb();
  3038. cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
  3039. cic->ttime.ttime_mean);
  3040. return;
  3041. }
  3042. + gmb();
  3043.  
  3044. /* There are other queues in the group, don't do group idle */
  3045. if (group_idle && cfqq->cfqg->nr_cfqq > 1)
  3046. diff -upr linux-3.10.0-693.11.1.el7/Documentation/kernel-parameters.txt linux-3.10.0-693.11.6.el7/Documentation/kernel-parameters.txt
  3047. --- linux-3.10.0-693.11.1.el7/Documentation/kernel-parameters.txt 2017-10-27 11:14:15.000000000 +0200
  3048. +++ linux-3.10.0-693.11.6.el7/Documentation/kernel-parameters.txt 2017-12-28 19:59:43.000000000 +0100
  3049. @@ -1479,8 +1479,7 @@ bytes respectively. Such letter suffixes
  3050. Valid arguments: on, off
  3051. Default: on
  3052.  
  3053. - kstack=N [X86] Print N words from the kernel stack
  3054. - in oops dumps.
  3055. + kpti [X86-64] Enable kernel page table isolation.
  3056.  
  3057. kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
  3058. Default is 0 (don't ignore, but inject #GP)
  3059. @@ -2223,6 +2222,8 @@ bytes respectively. Such letter suffixes
  3060. steal time is computed, but won't influence scheduler
  3061. behaviour
  3062.  
  3063. + nopti [X86-64] Disable kernel page table isolation.
  3064. +
  3065. nolapic [X86-32,APIC] Do not enable or use the local APIC.
  3066.  
  3067. nolapic_timer [X86-32,APIC] Do not use the local APIC timer.
  3068. Только в linux-3.10.0-693.11.6.el7/Documentation: spec_ctrl.txt
  3069. diff -upr linux-3.10.0-693.11.1.el7/Documentation/sysctl/kernel.txt linux-3.10.0-693.11.6.el7/Documentation/sysctl/kernel.txt
  3070. --- linux-3.10.0-693.11.1.el7/Documentation/sysctl/kernel.txt 2017-10-27 11:14:15.000000000 +0200
  3071. +++ linux-3.10.0-693.11.6.el7/Documentation/sysctl/kernel.txt 2017-12-28 19:59:43.000000000 +0100
  3072. @@ -40,7 +40,6 @@ show up in /proc/sys/kernel:
  3073. - hung_task_timeout_secs
  3074. - hung_task_warnings
  3075. - kptr_restrict
  3076. -- kstack_depth_to_print [ X86 only ]
  3077. - l2cr [ PPC only ]
  3078. - modprobe ==> Documentation/debugging-modules.txt
  3079. - modules_disabled
  3080. @@ -388,13 +387,6 @@ When kptr_restrict is set to (2), kernel
  3081.  
  3082. ==============================================================
  3083.  
  3084. -kstack_depth_to_print: (X86 only)
  3085. -
  3086. -Controls the number of words to print when dumping the raw
  3087. -kernel stack.
  3088. -
  3089. -==============================================================
  3090. -
  3091. l2cr: (PPC only)
  3092.  
  3093. This flag controls the L2 cache of G3 processor boards. If
  3094. diff -upr linux-3.10.0-693.11.1.el7/Documentation/x86/x86_64/boot-options.txt linux-3.10.0-693.11.6.el7/Documentation/x86/x86_64/boot-options.txt
  3095. --- linux-3.10.0-693.11.1.el7/Documentation/x86/x86_64/boot-options.txt 2017-10-27 11:14:15.000000000 +0200
  3096. +++ linux-3.10.0-693.11.6.el7/Documentation/x86/x86_64/boot-options.txt 2017-12-28 19:59:43.000000000 +0100
  3097. @@ -288,10 +288,6 @@ IOMMU (input/output memory management un
  3098. space might stop working. Use this option if you have devices that
  3099. are accessed from userspace directly on some PCI host bridge.
  3100.  
  3101. -Debugging
  3102. -
  3103. - kstack=N Print N words from the kernel stack in oops dumps.
  3104. -
  3105. pagefaulttrace Dump all page faults. Only useful for extreme debugging
  3106. and will create a lot of output.
  3107.  
  3108. diff -upr linux-3.10.0-693.11.1.el7/drivers/ata/sata_sil24.c linux-3.10.0-693.11.6.el7/drivers/ata/sata_sil24.c
  3109. --- linux-3.10.0-693.11.1.el7/drivers/ata/sata_sil24.c 2017-10-27 11:14:15.000000000 +0200
  3110. +++ linux-3.10.0-693.11.6.el7/drivers/ata/sata_sil24.c 2017-12-28 19:59:43.000000000 +0100
  3111. @@ -1041,6 +1041,7 @@ static void sil24_error_intr(struct ata_
  3112. pmp = (context >> 5) & 0xf;
  3113.  
  3114. if (pmp < ap->nr_pmp_links) {
  3115. + gmb();
  3116. link = &ap->pmp_link[pmp];
  3117. ehi = &link->eh_info;
  3118. qc = ata_qc_from_tag(ap, link->active_tag);
  3119. diff -upr linux-3.10.0-693.11.1.el7/drivers/cpufreq/amd_freq_sensitivity.c linux-3.10.0-693.11.6.el7/drivers/cpufreq/amd_freq_sensitivity.c
  3120. --- linux-3.10.0-693.11.1.el7/drivers/cpufreq/amd_freq_sensitivity.c 2017-10-27 11:14:15.000000000 +0200
  3121. +++ linux-3.10.0-693.11.6.el7/drivers/cpufreq/amd_freq_sensitivity.c 2017-12-28 19:59:43.000000000 +0100
  3122. @@ -85,13 +85,16 @@ static unsigned int amd_powersave_bias_t
  3123. if (data->freq_prev == policy->cur)
  3124. freq_next = policy->cur;
  3125.  
  3126. - if (freq_next > policy->cur)
  3127. + if (freq_next > policy->cur) {
  3128. + gmb();
  3129. freq_next = policy->cur;
  3130. - else if (freq_next < policy->cur)
  3131. + } else if (freq_next < policy->cur) {
  3132. + gmb();
  3133. freq_next = policy->min;
  3134. - else {
  3135. + } else {
  3136. unsigned int index;
  3137.  
  3138. + gmb();
  3139. cpufreq_frequency_table_target(policy,
  3140. od_info->freq_table, policy->cur - 1,
  3141. CPUFREQ_RELATION_H, &index);
  3142. diff -upr linux-3.10.0-693.11.1.el7/drivers/dax/dax.c linux-3.10.0-693.11.6.el7/drivers/dax/dax.c
  3143. --- linux-3.10.0-693.11.1.el7/drivers/dax/dax.c 2017-10-27 11:14:15.000000000 +0200
  3144. +++ linux-3.10.0-693.11.6.el7/drivers/dax/dax.c 2017-12-28 19:59:43.000000000 +0100
  3145. @@ -420,6 +420,7 @@ static phys_addr_t pgoff_to_phys(struct
  3146. }
  3147.  
  3148. if (i < dax_dev->num_resources) {
  3149. + gmb();
  3150. res = &dax_dev->res[i];
  3151. if (phys + size - 1 <= res->end)
  3152. return phys;
  3153. diff -upr linux-3.10.0-693.11.1.el7/drivers/gpu/drm/ast/ast_post.c linux-3.10.0-693.11.6.el7/drivers/gpu/drm/ast/ast_post.c
  3154. --- linux-3.10.0-693.11.1.el7/drivers/gpu/drm/ast/ast_post.c 2017-10-27 11:14:15.000000000 +0200
  3155. +++ linux-3.10.0-693.11.6.el7/drivers/gpu/drm/ast/ast_post.c 2017-12-28 19:59:43.000000000 +0100
  3156. @@ -658,6 +658,7 @@ FINETUNE_START:
  3157. passcnt = 0;
  3158. for (cnt = 0; cnt < 16; cnt++) {
  3159. if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
  3160. + gmb();
  3161. gold_sadj[0] += dllmin[cnt];
  3162. passcnt++;
  3163. }
  3164. @@ -676,6 +677,7 @@ FINETUNE_DONE:
  3165. for (cnt = 0; cnt < 8; cnt++) {
  3166. data >>= 3;
  3167. if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
  3168. + gmb();
  3169. dlli = dllmin[cnt];
  3170. if (gold_sadj[0] >= dlli) {
  3171. dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
  3172. @@ -698,6 +700,7 @@ FINETUNE_DONE:
  3173. for (cnt = 8; cnt < 16; cnt++) {
  3174. data >>= 3;
  3175. if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
  3176. + gmb();
  3177. dlli = dllmin[cnt];
  3178. if (gold_sadj[1] >= dlli) {
  3179. dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
  3180. diff -upr linux-3.10.0-693.11.1.el7/drivers/gpu/drm/i915/gvt/mmio.c linux-3.10.0-693.11.6.el7/drivers/gpu/drm/i915/gvt/mmio.c
  3181. --- linux-3.10.0-693.11.1.el7/drivers/gpu/drm/i915/gvt/mmio.c 2017-10-27 11:14:15.000000000 +0200
  3182. +++ linux-3.10.0-693.11.6.el7/drivers/gpu/drm/i915/gvt/mmio.c 2017-12-28 19:59:43.000000000 +0100
  3183. @@ -79,6 +79,7 @@ static void failsafe_emulate_mmio_rw(str
  3184. bytes);
  3185. } else if (reg_is_gtt(gvt, offset) &&
  3186. vgpu->gtt.ggtt_mm->virtual_page_table) {
  3187. + gmb();
  3188. offset -= gvt->device_info.gtt_start_offset;
  3189. pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
  3190. if (read)
  3191. diff -upr linux-3.10.0-693.11.1.el7/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c linux-3.10.0-693.11.6.el7/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
  3192. --- linux-3.10.0-693.11.1.el7/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c 2017-10-27 11:14:15.000000000 +0200
  3193. +++ linux-3.10.0-693.11.6.el7/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c 2017-12-28 19:59:43.000000000 +0100
  3194. @@ -34,6 +34,7 @@ nvbios_addr(struct nvkm_bios *bios, u32
  3195. u32 p = *addr;
  3196.  
  3197. if (*addr > bios->image0_size && bios->imaged_addr) {
  3198. + gmb();
  3199. *addr -= bios->image0_size;
  3200. *addr += bios->imaged_addr;
  3201. }
  3202. diff -upr linux-3.10.0-693.11.1.el7/drivers/gpu/drm/radeon/ni_dpm.c linux-3.10.0-693.11.6.el7/drivers/gpu/drm/radeon/ni_dpm.c
  3203. --- linux-3.10.0-693.11.1.el7/drivers/gpu/drm/radeon/ni_dpm.c 2017-10-27 11:14:15.000000000 +0200
  3204. +++ linux-3.10.0-693.11.6.el7/drivers/gpu/drm/radeon/ni_dpm.c 2017-12-28 19:59:43.000000000 +0100
  3205. @@ -1344,10 +1344,13 @@ static int ni_get_std_voltage_value(stru
  3206. u16 *std_voltage)
  3207. {
  3208. if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries &&
  3209. - ((u32)voltage->index < rdev->pm.dpm.dyn_state.cac_leakage_table.count))
  3210. + ((u32)voltage->index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)) {
  3211. + gmb();
  3212. *std_voltage = rdev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
  3213. - else
  3214. + } else {
  3215. + gmb();
  3216. *std_voltage = be16_to_cpu(voltage->value);
  3217. + }
  3218.  
  3219. return 0;
  3220. }
  3221. diff -upr linux-3.10.0-693.11.1.el7/drivers/hid/hid-input.c linux-3.10.0-693.11.6.el7/drivers/hid/hid-input.c
  3222. --- linux-3.10.0-693.11.1.el7/drivers/hid/hid-input.c 2017-10-27 11:14:15.000000000 +0200
  3223. +++ linux-3.10.0-693.11.6.el7/drivers/hid/hid-input.c 2017-12-28 19:59:43.000000000 +0100
  3224. @@ -952,6 +952,8 @@ mapped:
  3225. if (usage->type == EV_ABS &&
  3226. (usage->hat_min < usage->hat_max || usage->hat_dir)) {
  3227. int i;
  3228. +
  3229. + gmb();
  3230. for (i = usage->code; i < usage->code + 2 && i <= max; i++) {
  3231. input_set_abs_params(input, i, -1, 1, 0, 0);
  3232. set_bit(i, input->absbit);
  3233. diff -upr linux-3.10.0-693.11.1.el7/drivers/infiniband/hw/mthca/mthca_cq.c linux-3.10.0-693.11.6.el7/drivers/infiniband/hw/mthca/mthca_cq.c
  3234. --- linux-3.10.0-693.11.1.el7/drivers/infiniband/hw/mthca/mthca_cq.c 2017-10-27 11:14:15.000000000 +0200
  3235. +++ linux-3.10.0-693.11.6.el7/drivers/infiniband/hw/mthca/mthca_cq.c 2017-12-28 19:59:43.000000000 +0100
  3236. @@ -339,6 +339,7 @@ void mthca_cq_resize_copy_cqes(struct mt
  3237. */
  3238. if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) &&
  3239. cq->ibcq.cqe < cq->resize_buf->cqe) {
  3240. + gmb();
  3241. cq->cons_index &= cq->ibcq.cqe;
  3242. if (cqe_sw(get_cqe(cq, cq->ibcq.cqe)))
  3243. cq->cons_index -= cq->ibcq.cqe + 1;
  3244. diff -upr linux-3.10.0-693.11.1.el7/drivers/infiniband/hw/mthca/mthca_profile.c linux-3.10.0-693.11.6.el7/drivers/infiniband/hw/mthca/mthca_profile.c
  3245. --- linux-3.10.0-693.11.1.el7/drivers/infiniband/hw/mthca/mthca_profile.c 2017-10-27 11:14:15.000000000 +0200
  3246. +++ linux-3.10.0-693.11.6.el7/drivers/infiniband/hw/mthca/mthca_profile.c 2017-12-28 19:59:43.000000000 +0100
  3247. @@ -135,8 +135,10 @@ s64 mthca_make_profile(struct mthca_dev
  3248. */
  3249. for (i = MTHCA_RES_NUM; i > 0; --i)
  3250. for (j = 1; j < i; ++j) {
  3251. - if (profile[j].size > profile[j - 1].size)
  3252. + if (profile[j].size > profile[j - 1].size) {
  3253. + gmb();
  3254. swap(profile[j], profile[j - 1]);
  3255. + }
  3256. }
  3257.  
  3258. for (i = 0; i < MTHCA_RES_NUM; ++i) {
  3259. diff -upr linux-3.10.0-693.11.1.el7/drivers/infiniband/hw/nes/nes_mgt.c linux-3.10.0-693.11.6.el7/drivers/infiniband/hw/nes/nes_mgt.c
  3260. --- linux-3.10.0-693.11.1.el7/drivers/infiniband/hw/nes/nes_mgt.c 2017-10-27 11:14:15.000000000 +0200
  3261. +++ linux-3.10.0-693.11.6.el7/drivers/infiniband/hw/nes/nes_mgt.c 2017-12-28 19:59:43.000000000 +0100
  3262. @@ -796,6 +796,7 @@ static void nes_mgt_ce_handler(struct ne
  3263. qp_id = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_ACCQP_ID_IDX]);
  3264. qp_id &= 0x001fffff;
  3265. if (qp_id < nesadapter->max_qp) {
  3266. + gmb();
  3267. context = (unsigned long)nesadapter->qp_table[qp_id - NES_FIRST_QPN];
  3268. nesqp = (struct nes_qp *)context;
  3269. }
  3270. diff -upr linux-3.10.0-693.11.1.el7/drivers/infiniband/hw/qib/qib_user_sdma.c linux-3.10.0-693.11.6.el7/drivers/infiniband/hw/qib/qib_user_sdma.c
  3271. --- linux-3.10.0-693.11.1.el7/drivers/infiniband/hw/qib/qib_user_sdma.c 2017-10-27 11:14:15.000000000 +0200
  3272. +++ linux-3.10.0-693.11.6.el7/drivers/infiniband/hw/qib/qib_user_sdma.c 2017-12-28 19:59:43.000000000 +0100
  3273. @@ -1281,6 +1281,7 @@ retry:
  3274. * buffer packet.
  3275. */
  3276. if (ofs > dd->piosize2kmax_dwords) {
  3277. + gmb();
  3278. for (j = pkt->index; j <= i; j++) {
  3279. ppd->sdma_descq[dtail].qw[0] |=
  3280. cpu_to_le64(1ULL << 14);
  3281. diff -upr linux-3.10.0-693.11.1.el7/drivers/md/dm-cache-target.c linux-3.10.0-693.11.6.el7/drivers/md/dm-cache-target.c
  3282. --- linux-3.10.0-693.11.1.el7/drivers/md/dm-cache-target.c 2017-10-27 11:14:15.000000000 +0200
  3283. +++ linux-3.10.0-693.11.6.el7/drivers/md/dm-cache-target.c 2017-12-28 19:59:43.000000000 +0100
  3284. @@ -3522,6 +3522,7 @@ static void cache_io_hints(struct dm_tar
  3285. */
  3286. if (io_opt_sectors < cache->sectors_per_block ||
  3287. do_div(io_opt_sectors, cache->sectors_per_block)) {
  3288. + gmb();
  3289. blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
  3290. blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
  3291. }
  3292. diff -upr linux-3.10.0-693.11.1.el7/drivers/md/dm-era-target.c linux-3.10.0-693.11.6.el7/drivers/md/dm-era-target.c
  3293. --- linux-3.10.0-693.11.1.el7/drivers/md/dm-era-target.c 2017-10-27 11:14:15.000000000 +0200
  3294. +++ linux-3.10.0-693.11.6.el7/drivers/md/dm-era-target.c 2017-12-28 19:59:43.000000000 +0100
  3295. @@ -1703,6 +1703,7 @@ static void era_io_hints(struct dm_targe
  3296. */
  3297. if (io_opt_sectors < era->sectors_per_block ||
  3298. do_div(io_opt_sectors, era->sectors_per_block)) {
  3299. + gmb();
  3300. blk_limits_io_min(limits, 0);
  3301. blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT);
  3302. }
  3303. diff -upr linux-3.10.0-693.11.1.el7/drivers/md/raid10.c linux-3.10.0-693.11.6.el7/drivers/md/raid10.c
  3304. --- linux-3.10.0-693.11.1.el7/drivers/md/raid10.c 2017-10-27 11:14:15.000000000 +0200
  3305. +++ linux-3.10.0-693.11.6.el7/drivers/md/raid10.c 2017-12-28 19:59:43.000000000 +0100
  3306. @@ -1266,6 +1266,7 @@ static bool raid10_make_request(struct m
  3307. bio->bi_sector + sectors > conf->reshape_progress)
  3308. : (bio->bi_sector + sectors > conf->reshape_safe &&
  3309. bio->bi_sector < conf->reshape_progress))) {
  3310. + gmb();
  3311. /* Need to update reshape_position in metadata */
  3312. mddev->reshape_position = conf->reshape_progress;
  3313. set_mask_bits(&mddev->sb_flags, 0,
  3314. diff -upr linux-3.10.0-693.11.1.el7/drivers/md/raid5.c linux-3.10.0-693.11.6.el7/drivers/md/raid5.c
  3315. --- linux-3.10.0-693.11.1.el7/drivers/md/raid5.c 2017-10-27 11:14:15.000000000 +0200
  3316. +++ linux-3.10.0-693.11.6.el7/drivers/md/raid5.c 2017-12-28 19:59:43.000000000 +0100
  3317. @@ -7592,6 +7592,7 @@ static int check_stripe_cache(struct mdd
  3318. > conf->min_nr_stripes ||
  3319. ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
  3320. > conf->min_nr_stripes) {
  3321. + gmb();
  3322. pr_warn("md/raid:%s: reshape: not enough stripes. Needed %lu\n",
  3323. mdname(mddev),
  3324. ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
  3325. diff -upr linux-3.10.0-693.11.1.el7/drivers/media/common/btcx-risc.c linux-3.10.0-693.11.6.el7/drivers/media/common/btcx-risc.c
  3326. --- linux-3.10.0-693.11.1.el7/drivers/media/common/btcx-risc.c 2017-10-27 11:14:15.000000000 +0200
  3327. +++ linux-3.10.0-693.11.6.el7/drivers/media/common/btcx-risc.c 2017-12-28 19:59:43.000000000 +0100
  3328. @@ -172,6 +172,7 @@ btcx_sort_clips(struct v4l2_clip *clips,
  3329. for (i = nclips-2; i >= 0; i--) {
  3330. for (n = 0, j = 0; j <= i; j++) {
  3331. if (clips[j].c.left > clips[j+1].c.left) {
  3332. + gmb();
  3333. swap = clips[j];
  3334. clips[j] = clips[j+1];
  3335. clips[j+1] = swap;
  3336. diff -upr linux-3.10.0-693.11.1.el7/drivers/media/dvb-frontends/au8522_common.c linux-3.10.0-693.11.6.el7/drivers/media/dvb-frontends/au8522_common.c
  3337. --- linux-3.10.0-693.11.1.el7/drivers/media/dvb-frontends/au8522_common.c 2017-10-27 11:14:15.000000000 +0200
  3338. +++ linux-3.10.0-693.11.6.el7/drivers/media/dvb-frontends/au8522_common.c 2017-12-28 19:59:43.000000000 +0100
  3339. @@ -202,11 +202,13 @@ int au8522_led_ctrl(struct au8522_state
  3340. val &= ~led_config->led_states[i];
  3341.  
  3342. /* set selected LED state */
  3343. - if (led < led_config->num_led_states)
  3344. + if (led < led_config->num_led_states) {
  3345. + gmb();
  3346. val |= led_config->led_states[led];
  3347. - else if (led_config->num_led_states)
  3348. + } else if (led_config->num_led_states) {
  3349. val |=
  3350. led_config->led_states[led_config->num_led_states - 1];
  3351. + }
  3352.  
  3353. ret = au8522_writereg(state, 0x8000 |
  3354. (led_config->gpio_leds & ~0xc000), val);
  3355. diff -upr linux-3.10.0-693.11.1.el7/drivers/media/dvb-frontends/dib7000m.c linux-3.10.0-693.11.6.el7/drivers/media/dvb-frontends/dib7000m.c
  3356. --- linux-3.10.0-693.11.1.el7/drivers/media/dvb-frontends/dib7000m.c 2017-10-27 11:14:15.000000000 +0200
  3357. +++ linux-3.10.0-693.11.6.el7/drivers/media/dvb-frontends/dib7000m.c 2017-12-28 19:59:43.000000000 +0100
  3358. @@ -641,14 +641,18 @@ static int dib7000m_agc_soft_split(struc
  3359. // n_agc_global
  3360. agc = dib7000m_read_word(state, 390);
  3361.  
  3362. - if (agc > state->current_agc->split.min_thres)
  3363. + if (agc > state->current_agc->split.min_thres) {
  3364. + gmb();
  3365. split_offset = state->current_agc->split.min;
  3366. - else if (agc < state->current_agc->split.max_thres)
  3367. + } else if (agc < state->current_agc->split.max_thres) {
  3368. + gmb();
  3369. split_offset = state->current_agc->split.max;
  3370. - else
  3371. + } else {
  3372. + gmb();
  3373. split_offset = state->current_agc->split.max *
  3374. (agc - state->current_agc->split.min_thres) /
  3375. (state->current_agc->split.max_thres - state->current_agc->split.min_thres);
  3376. + }
  3377.  
  3378. dprintk( "AGC split_offset: %d",split_offset);
  3379.  
  3380. diff -upr linux-3.10.0-693.11.1.el7/drivers/media/dvb-frontends/dib8000.c linux-3.10.0-693.11.6.el7/drivers/media/dvb-frontends/dib8000.c
  3381. --- linux-3.10.0-693.11.1.el7/drivers/media/dvb-frontends/dib8000.c 2017-10-27 11:14:15.000000000 +0200
  3382. +++ linux-3.10.0-693.11.6.el7/drivers/media/dvb-frontends/dib8000.c 2017-12-28 19:59:43.000000000 +0100
  3383. @@ -1181,14 +1181,18 @@ static int dib8000_agc_soft_split(struct
  3384. // n_agc_global
  3385. agc = dib8000_read_word(state, 390);
  3386.  
  3387. - if (agc > state->current_agc->split.min_thres)
  3388. + if (agc > state->current_agc->split.min_thres) {
  3389. + gmb();
  3390. split_offset = state->current_agc->split.min;
  3391. - else if (agc < state->current_agc->split.max_thres)
  3392. + } else if (agc < state->current_agc->split.max_thres) {
  3393. + gmb();
  3394. split_offset = state->current_agc->split.max;
  3395. - else
  3396. + } else {
  3397. + gmb();
  3398. split_offset = state->current_agc->split.max *
  3399. (agc - state->current_agc->split.min_thres) /
  3400. (state->current_agc->split.max_thres - state->current_agc->split.min_thres);
  3401. + }
  3402.  
  3403. dprintk("AGC split_offset: %d", split_offset);
  3404.  
  3405. diff -upr linux-3.10.0-693.11.1.el7/drivers/media/dvb-frontends/stb0899_drv.c linux-3.10.0-693.11.6.el7/drivers/media/dvb-frontends/stb0899_drv.c
  3406. --- linux-3.10.0-693.11.1.el7/drivers/media/dvb-frontends/stb0899_drv.c 2017-10-27 11:14:15.000000000 +0200
  3407. +++ linux-3.10.0-693.11.6.el7/drivers/media/dvb-frontends/stb0899_drv.c 2017-12-28 19:59:43.000000000 +0100
  3408. @@ -940,11 +940,14 @@ static int stb0899_table_lookup(const st
  3409. int res = 0;
  3410. int min = 0, med;
  3411.  
  3412. - if (val < tab[min].read)
  3413. + if (val < tab[min].read) {
  3414. + gmb();
  3415. res = tab[min].real;
  3416. - else if (val >= tab[max].read)
  3417. + } else if (val >= tab[max].read) {
  3418. + gmb();
  3419. res = tab[max].real;
  3420. - else {
  3421. + } else {
  3422. + gmb();
  3423. while ((max - min) > 1) {
  3424. med = (max + min) / 2;
  3425. if (val >= tab[min].read && val < tab[med].read)
  3426. diff -upr linux-3.10.0-693.11.1.el7/drivers/media/dvb-frontends/stv090x.c linux-3.10.0-693.11.6.el7/drivers/media/dvb-frontends/stv090x.c
  3427. --- linux-3.10.0-693.11.1.el7/drivers/media/dvb-frontends/stv090x.c 2017-10-27 11:14:15.000000000 +0200
  3428. +++ linux-3.10.0-693.11.6.el7/drivers/media/dvb-frontends/stv090x.c 2017-12-28 19:59:43.000000000 +0100
  3429. @@ -3621,11 +3621,16 @@ static int stv090x_table_lookup(const st
  3430. tab[min].real;
  3431. } else {
  3432. if (tab[min].read < tab[max].read) {
  3433. - if (val < tab[min].read)
  3434. + gmb();
  3435. + if (val < tab[min].read) {
  3436. + gmb();
  3437. res = tab[min].real;
  3438. - else if (val >= tab[max].read)
  3439. + } else if (val >= tab[max].read) {
  3440. + gmb();
  3441. res = tab[max].real;
  3442. + }
  3443. } else {
  3444. + gmb();
  3445. if (val >= tab[min].read)
  3446. res = tab[min].real;
  3447. else if (val < tab[max].read)
  3448. diff -upr linux-3.10.0-693.11.1.el7/drivers/media/mmc/siano/smssdio.c linux-3.10.0-693.11.6.el7/drivers/media/mmc/siano/smssdio.c
  3449. --- linux-3.10.0-693.11.1.el7/drivers/media/mmc/siano/smssdio.c 2017-10-27 11:14:15.000000000 +0200
  3450. +++ linux-3.10.0-693.11.6.el7/drivers/media/mmc/siano/smssdio.c 2017-12-28 19:59:43.000000000 +0100
  3451. @@ -222,10 +222,12 @@ static void smssdio_interrupt(struct sdi
  3452. }
  3453.  
  3454. buffer += smsdev->func->cur_blksize;
  3455. - if (size > smsdev->func->cur_blksize)
  3456. + if (size > smsdev->func->cur_blksize) {
  3457. + gmb();
  3458. size -= smsdev->func->cur_blksize;
  3459. - else
  3460. + } else {
  3461. size = 0;
  3462. + }
  3463. }
  3464. }
  3465. }
  3466. diff -upr linux-3.10.0-693.11.1.el7/drivers/media/tuners/r820t.c linux-3.10.0-693.11.6.el7/drivers/media/tuners/r820t.c
  3467. --- linux-3.10.0-693.11.1.el7/drivers/media/tuners/r820t.c 2017-10-27 11:14:15.000000000 +0200
  3468. +++ linux-3.10.0-693.11.6.el7/drivers/media/tuners/r820t.c 2017-12-28 19:59:43.000000000 +0100
  3469. @@ -1564,8 +1564,10 @@ static void r820t_compre_cor(struct r820
  3470. int i;
  3471.  
  3472. for (i = 3; i > 0; i--) {
  3473. - if (iq[0].value > iq[i - 1].value)
  3474. + if (iq[0].value > iq[i - 1].value) {
  3475. + gmb();
  3476. swap(iq[0], iq[i - 1]);
  3477. + }
  3478. }
  3479. }
  3480.  
  3481. diff -upr linux-3.10.0-693.11.1.el7/drivers/media/usb/gspca/autogain_functions.c linux-3.10.0-693.11.6.el7/drivers/media/usb/gspca/autogain_functions.c
  3482. --- linux-3.10.0-693.11.1.el7/drivers/media/usb/gspca/autogain_functions.c 2017-10-27 11:14:15.000000000 +0200
  3483. +++ linux-3.10.0-693.11.6.el7/drivers/media/usb/gspca/autogain_functions.c 2017-12-28 19:59:43.000000000 +0100
  3484. @@ -145,10 +145,13 @@ int gspca_coarse_grained_expo_autogain(
  3485. gspca_dev->exp_too_low_cnt = 0;
  3486. } else {
  3487. gain += steps;
  3488. - if (gain > gspca_dev->gain->maximum)
  3489. + if (gain > gspca_dev->gain->maximum) {
  3490. + gmb();
  3491. gain = gspca_dev->gain->maximum;
  3492. - else if (gain < gspca_dev->gain->minimum)
  3493. + } else if (gain < gspca_dev->gain->minimum) {
  3494. + gmb();
  3495. gain = gspca_dev->gain->minimum;
  3496. + }
  3497. gspca_dev->exp_too_high_cnt = 0;
  3498. gspca_dev->exp_too_low_cnt = 0;
  3499. }
  3500. diff -upr linux-3.10.0-693.11.1.el7/drivers/media/usb/gspca/xirlink_cit.c linux-3.10.0-693.11.6.el7/drivers/media/usb/gspca/xirlink_cit.c
  3501. --- linux-3.10.0-693.11.1.el7/drivers/media/usb/gspca/xirlink_cit.c 2017-10-27 11:14:15.000000000 +0200
  3502. +++ linux-3.10.0-693.11.6.el7/drivers/media/usb/gspca/xirlink_cit.c 2017-12-28 19:59:43.000000000 +0100
  3503. @@ -2900,10 +2900,12 @@ static void sd_pkt_scan(struct gspca_dev
  3504.  
  3505. /* finish decoding current frame */
  3506. n = sof - data;
  3507. - if (n > sd->sof_len)
  3508. + if (n > sd->sof_len) {
  3509. + gmb();
  3510. n -= sd->sof_len;
  3511. - else
  3512. + } else {
  3513. n = 0;
  3514. + }
  3515. gspca_frame_add(gspca_dev, LAST_PACKET,
  3516. data, n);
  3517. gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
  3518. diff -upr linux-3.10.0-693.11.1.el7/drivers/media/usb/uvc/uvc_v4l2.c linux-3.10.0-693.11.6.el7/drivers/media/usb/uvc/uvc_v4l2.c
  3519. --- linux-3.10.0-693.11.1.el7/drivers/media/usb/uvc/uvc_v4l2.c 2017-10-27 11:14:15.000000000 +0200
  3520. +++ linux-3.10.0-693.11.6.el7/drivers/media/usb/uvc/uvc_v4l2.c 2017-12-28 19:59:43.000000000 +0100
  3521. @@ -720,6 +720,7 @@ static long uvc_v4l2_do_ioctl(struct fil
  3522. }
  3523. pin = iterm->id;
  3524. } else if (index < selector->bNrInPins) {
  3525. + gmb();
  3526. pin = selector->baSourceID[index];
  3527. list_for_each_entry(iterm, &chain->entities, chain) {
  3528. if (!UVC_ENTITY_IS_ITERM(iterm))
  3529. diff -upr linux-3.10.0-693.11.1.el7/drivers/memstick/host/r592.c linux-3.10.0-693.11.6.el7/drivers/memstick/host/r592.c
  3530. --- linux-3.10.0-693.11.1.el7/drivers/memstick/host/r592.c 2017-10-27 11:14:15.000000000 +0200
  3531. +++ linux-3.10.0-693.11.6.el7/drivers/memstick/host/r592.c 2017-12-28 19:59:43.000000000 +0100
  3532. @@ -336,6 +336,7 @@ static void r592_write_fifo_pio(struct r
  3533.  
  3534. if (!kfifo_is_full(&dev->pio_fifo))
  3535. return;
  3536. + gmb();
  3537. len -= copy_len;
  3538. buffer += copy_len;
  3539.  
  3540. diff -upr linux-3.10.0-693.11.1.el7/drivers/mmc/core/core.c linux-3.10.0-693.11.6.el7/drivers/mmc/core/core.c
  3541. --- linux-3.10.0-693.11.1.el7/drivers/mmc/core/core.c 2017-10-27 11:14:15.000000000 +0200
  3542. +++ linux-3.10.0-693.11.6.el7/drivers/mmc/core/core.c 2017-12-28 19:59:43.000000000 +0100
  3543. @@ -2009,9 +2009,11 @@ void mmc_init_erase(struct mmc_card *car
  3544. card->pref_erase = 2 * 1024 * 1024 / 512;
  3545. else
  3546. card->pref_erase = 4 * 1024 * 1024 / 512;
  3547. - if (card->pref_erase < card->erase_size)
  3548. + if (card->pref_erase < card->erase_size) {
  3549. + gmb();
  3550. card->pref_erase = card->erase_size;
  3551. - else {
  3552. + } else {
  3553. + gmb();
  3554. sz = card->pref_erase % card->erase_size;
  3555. if (sz)
  3556. card->pref_erase += card->erase_size - sz;
  3557. diff -upr linux-3.10.0-693.11.1.el7/drivers/mmc/core/mmc.c linux-3.10.0-693.11.6.el7/drivers/mmc/core/mmc.c
  3558. --- linux-3.10.0-693.11.1.el7/drivers/mmc/core/mmc.c 2017-10-27 11:14:15.000000000 +0200
  3559. +++ linux-3.10.0-693.11.6.el7/drivers/mmc/core/mmc.c 2017-12-28 19:59:43.000000000 +0100
  3560. @@ -940,12 +940,16 @@ static void mmc_set_bus_speed(struct mmc
  3561. unsigned int max_dtr = (unsigned int)-1;
  3562.  
  3563. if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
  3564. - max_dtr > card->ext_csd.hs200_max_dtr)
  3565. + max_dtr > card->ext_csd.hs200_max_dtr) {
  3566. + gmb();
  3567. max_dtr = card->ext_csd.hs200_max_dtr;
  3568. - else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
  3569. + } else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr) {
  3570. + gmb();
  3571. max_dtr = card->ext_csd.hs_max_dtr;
  3572. - else if (max_dtr > card->csd.max_dtr)
  3573. + } else if (max_dtr > card->csd.max_dtr) {
  3574. + gmb();
  3575. max_dtr = card->csd.max_dtr;
  3576. + }
  3577.  
  3578. mmc_set_clock(card->host, max_dtr);
  3579. }
  3580. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/can/dev.c linux-3.10.0-693.11.6.el7/drivers/net/can/dev.c
  3581. --- linux-3.10.0-693.11.1.el7/drivers/net/can/dev.c 2017-10-27 11:14:15.000000000 +0200
  3582. +++ linux-3.10.0-693.11.6.el7/drivers/net/can/dev.c 2017-12-28 19:59:43.000000000 +0100
  3583. @@ -94,6 +94,7 @@ static int can_update_spt(const struct c
  3584. *tseg2 = btc->tseg2_max;
  3585. *tseg1 = tseg - *tseg2;
  3586. if (*tseg1 > btc->tseg1_max) {
  3587. + gmb();
  3588. *tseg1 = btc->tseg1_max;
  3589. *tseg2 = tseg - *tseg1;
  3590. }
  3591. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/ethernet/amazon/ena/ena_netdev.c linux-3.10.0-693.11.6.el7/drivers/net/ethernet/amazon/ena/ena_netdev.c
  3592. --- linux-3.10.0-693.11.1.el7/drivers/net/ethernet/amazon/ena/ena_netdev.c 2017-10-27 11:14:15.000000000 +0200
  3593. +++ linux-3.10.0-693.11.6.el7/drivers/net/ethernet/amazon/ena/ena_netdev.c 2017-12-28 19:59:43.000000000 +0100
  3594. @@ -647,6 +647,7 @@ static int validate_tx_req_id(struct ena
  3595. struct ena_tx_buffer *tx_info = NULL;
  3596.  
  3597. if (likely(req_id < tx_ring->ring_size)) {
  3598. + gmb();
  3599. tx_info = &tx_ring->tx_buffer_info[req_id];
  3600. if (likely(tx_info->skb))
  3601. return 0;
  3602. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c linux-3.10.0-693.11.6.el7/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
  3603. --- linux-3.10.0-693.11.1.el7/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 2017-10-27 11:14:15.000000000 +0200
  3604. +++ linux-3.10.0-693.11.6.el7/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 2017-12-28 19:59:43.000000000 +0100
  3605. @@ -274,9 +274,11 @@ static int cfg_queues_uld(struct adapter
  3606. return -ENOMEM;
  3607.  
  3608. if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
  3609. + gmb();
  3610. i = s->nqs_per_uld;
  3611. rxq_info->nrxq = roundup(i, adap->params.nports);
  3612. } else {
  3613. + gmb();
  3614. i = min_t(int, uld_info->nrxq,
  3615. num_online_cpus());
  3616. rxq_info->nrxq = roundup(i, adap->params.nports);
  3617. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/ethernet/mellanox/mlx4/en_rx.c linux-3.10.0-693.11.6.el7/drivers/net/ethernet/mellanox/mlx4/en_rx.c
  3618. --- linux-3.10.0-693.11.1.el7/drivers/net/ethernet/mellanox/mlx4/en_rx.c 2017-10-27 11:14:15.000000000 +0200
  3619. +++ linux-3.10.0-693.11.6.el7/drivers/net/ethernet/mellanox/mlx4/en_rx.c 2017-12-28 19:59:43.000000000 +0100
  3620. @@ -1295,10 +1295,13 @@ int mlx4_en_config_rss_steer(struct mlx4
  3621. mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
  3622. priv->rx_ring[0]->cqn, -1, &context);
  3623.  
  3624. - if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
  3625. + if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) {
  3626. + gmb();
  3627. rss_rings = priv->rx_ring_num;
  3628. - else
  3629. + } else {
  3630. + gmb();
  3631. rss_rings = priv->prof->rss_rings;
  3632. + }
  3633.  
  3634. ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
  3635. + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
  3636. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/ethernet/mellanox/mlx4/profile.c linux-3.10.0-693.11.6.el7/drivers/net/ethernet/mellanox/mlx4/profile.c
  3637. --- linux-3.10.0-693.11.1.el7/drivers/net/ethernet/mellanox/mlx4/profile.c 2017-10-27 11:14:15.000000000 +0200
  3638. +++ linux-3.10.0-693.11.6.el7/drivers/net/ethernet/mellanox/mlx4/profile.c 2017-12-28 19:59:43.000000000 +0100
  3639. @@ -148,8 +148,10 @@ u64 mlx4_make_profile(struct mlx4_dev *d
  3640. */
  3641. for (i = MLX4_RES_NUM; i > 0; --i)
  3642. for (j = 1; j < i; ++j) {
  3643. - if (profile[j].size > profile[j - 1].size)
  3644. + if (profile[j].size > profile[j - 1].size) {
  3645. + gmb();
  3646. swap(profile[j], profile[j - 1]);
  3647. + }
  3648. }
  3649.  
  3650. for (i = 0; i < MLX4_RES_NUM; ++i) {
  3651. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/ethernet/qlogic/qlge/qlge_main.c linux-3.10.0-693.11.6.el7/drivers/net/ethernet/qlogic/qlge/qlge_main.c
  3652. --- linux-3.10.0-693.11.1.el7/drivers/net/ethernet/qlogic/qlge/qlge_main.c 2017-10-27 11:14:15.000000000 +0200
  3653. +++ linux-3.10.0-693.11.6.el7/drivers/net/ethernet/qlogic/qlge/qlge_main.c 2017-12-28 19:59:43.000000000 +0100
  3654. @@ -4149,6 +4149,7 @@ static int ql_configure_rings(struct ql_
  3655. rx_ring->cq_id = i;
  3656. rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
  3657. if (i < qdev->rss_ring_count) {
  3658. + gmb();
  3659. /*
  3660. * Inbound (RSS) queues.
  3661. */
  3662. @@ -4165,6 +4166,7 @@ static int ql_configure_rings(struct ql_
  3663. rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
  3664. rx_ring->type = RX_Q;
  3665. } else {
  3666. + gmb();
  3667. /*
  3668. * Outbound queue handles outbound completions only.
  3669. */
  3670. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/ethernet/sfc/falcon/tx.c linux-3.10.0-693.11.6.el7/drivers/net/ethernet/sfc/falcon/tx.c
  3671. --- linux-3.10.0-693.11.1.el7/drivers/net/ethernet/sfc/falcon/tx.c 2017-10-27 11:14:15.000000000 +0200
  3672. +++ linux-3.10.0-693.11.6.el7/drivers/net/ethernet/sfc/falcon/tx.c 2017-12-28 19:59:43.000000000 +0100
  3673. @@ -451,6 +451,7 @@ int ef4_setup_tc(struct net_device *net_
  3674. }
  3675.  
  3676. if (num_tc > net_dev->num_tc) {
  3677. + gmb();
  3678. /* Initialise high-priority queues as necessary */
  3679. ef4_for_each_channel(channel, efx) {
  3680. ef4_for_each_possible_channel_tx_queue(tx_queue,
  3681. @@ -468,6 +469,7 @@ int ef4_setup_tc(struct net_device *net_
  3682. }
  3683. }
  3684. } else {
  3685. + gmb();
  3686. /* Reduce number of classes before number of queues */
  3687. net_dev->num_tc = num_tc;
  3688. }
  3689. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/ethernet/sfc/tx.c linux-3.10.0-693.11.6.el7/drivers/net/ethernet/sfc/tx.c
  3690. --- linux-3.10.0-693.11.1.el7/drivers/net/ethernet/sfc/tx.c 2017-10-27 11:14:15.000000000 +0200
  3691. +++ linux-3.10.0-693.11.6.el7/drivers/net/ethernet/sfc/tx.c 2017-12-28 19:59:43.000000000 +0100
  3692. @@ -679,6 +679,7 @@ int efx_setup_tc(struct net_device *net_
  3693. }
  3694.  
  3695. if (num_tc > net_dev->num_tc) {
  3696. + gmb();
  3697. /* Initialise high-priority queues as necessary */
  3698. efx_for_each_channel(channel, efx) {
  3699. efx_for_each_possible_channel_tx_queue(tx_queue,
  3700. @@ -696,6 +697,7 @@ int efx_setup_tc(struct net_device *net_
  3701. }
  3702. }
  3703. } else {
  3704. + gmb();
  3705. /* Reduce number of classes before number of queues */
  3706. net_dev->num_tc = num_tc;
  3707. }
  3708. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/hyperv/rndis_filter.c linux-3.10.0-693.11.6.el7/drivers/net/hyperv/rndis_filter.c
  3709. --- linux-3.10.0-693.11.1.el7/drivers/net/hyperv/rndis_filter.c 2017-10-27 11:14:15.000000000 +0200
  3710. +++ linux-3.10.0-693.11.6.el7/drivers/net/hyperv/rndis_filter.c 2017-12-28 19:59:43.000000000 +0100
  3711. @@ -1088,10 +1088,13 @@ int rndis_filter_device_add(struct hv_de
  3712. num_possible_rss_qs = cpumask_weight(node_cpu_mask);
  3713.  
  3714. /* We will use the given number of channels if available. */
  3715. - if (device_info->num_chn && device_info->num_chn < net_device->max_chn)
  3716. + if (device_info->num_chn && device_info->num_chn < net_device->max_chn) {
  3717. + gmb();
  3718. net_device->num_chn = device_info->num_chn;
  3719. - else
  3720. + } else {
  3721. + gmb();
  3722. net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
  3723. + }
  3724.  
  3725. num_rss_qs = net_device->num_chn - 1;
  3726. net_device->num_sc_offered = num_rss_qs;
  3727. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/wireless/ath/ath10k/htt_rx.c linux-3.10.0-693.11.6.el7/drivers/net/wireless/ath/ath10k/htt_rx.c
  3728. --- linux-3.10.0-693.11.1.el7/drivers/net/wireless/ath/ath10k/htt_rx.c 2017-10-27 11:14:15.000000000 +0200
  3729. +++ linux-3.10.0-693.11.6.el7/drivers/net/wireless/ath/ath10k/htt_rx.c 2017-12-28 19:59:43.000000000 +0100
  3730. @@ -1635,6 +1635,7 @@ static void ath10k_htt_rx_tx_compl_ind(s
  3731. * writer, you don't need extra locking to use these macro.
  3732. */
  3733. if (!kfifo_put(&htt->txdone_fifo, &tx_done)) {
  3734. + gmb();
  3735. ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
  3736. tx_done.msdu_id, tx_done.status);
  3737. ath10k_txrx_tx_unref(htt, &tx_done);
  3738. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/wireless/ath/ath9k/calib.c linux-3.10.0-693.11.6.el7/drivers/net/wireless/ath/ath9k/calib.c
  3739. --- linux-3.10.0-693.11.1.el7/drivers/net/wireless/ath/ath9k/calib.c 2017-10-27 11:14:15.000000000 +0200
  3740. +++ linux-3.10.0-693.11.6.el7/drivers/net/wireless/ath/ath9k/calib.c 2017-12-28 19:59:43.000000000 +0100
  3741. @@ -370,11 +370,13 @@ static void ath9k_hw_nf_sanitize(struct
  3742. (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
  3743.  
  3744. if (nf[i] > limit->max) {
  3745. + gmb();
  3746. ath_dbg(common, CALIBRATE,
  3747. "NF[%d] (%d) > MAX (%d), correcting to MAX\n",
  3748. i, nf[i], limit->max);
  3749. nf[i] = limit->max;
  3750. } else if (nf[i] < limit->min) {
  3751. + gmb();
  3752. ath_dbg(common, CALIBRATE,
  3753. "NF[%d] (%d) < MIN (%d), correcting to NOM\n",
  3754. i, nf[i], limit->min);
  3755. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/wireless/ath/carl9170/main.c linux-3.10.0-693.11.6.el7/drivers/net/wireless/ath/carl9170/main.c
  3756. --- linux-3.10.0-693.11.1.el7/drivers/net/wireless/ath/carl9170/main.c 2017-10-27 11:14:15.000000000 +0200
  3757. +++ linux-3.10.0-693.11.6.el7/drivers/net/wireless/ath/carl9170/main.c 2017-12-28 19:59:43.000000000 +0100
  3758. @@ -1388,6 +1388,7 @@ static int carl9170_op_conf_tx(struct ie
  3759.  
  3760. mutex_lock(&ar->mutex);
  3761. if (queue < ar->hw->queues) {
  3762. + gmb();
  3763. memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
  3764. ret = carl9170_set_qos(ar);
  3765. } else {
  3766. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/wireless/marvell/mwifiex/11n.h linux-3.10.0-693.11.6.el7/drivers/net/wireless/marvell/mwifiex/11n.h
  3767. --- linux-3.10.0-693.11.1.el7/drivers/net/wireless/marvell/mwifiex/11n.h 2017-10-27 11:14:15.000000000 +0200
  3768. +++ linux-3.10.0-693.11.6.el7/drivers/net/wireless/marvell/mwifiex/11n.h 2017-12-28 19:59:43.000000000 +0100
  3769. @@ -154,6 +154,7 @@ mwifiex_find_stream_to_delete(struct mwi
  3770. spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
  3771. list_for_each_entry(tx_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
  3772. if (tid > priv->aggr_prio_tbl[tx_tbl->tid].ampdu_user) {
  3773. + gmb();
  3774. tid = priv->aggr_prio_tbl[tx_tbl->tid].ampdu_user;
  3775. *ptid = tx_tbl->tid;
  3776. memcpy(ra, tx_tbl->ra, ETH_ALEN);
  3777. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/wireless/marvell/mwifiex/wmm.c linux-3.10.0-693.11.6.el7/drivers/net/wireless/marvell/mwifiex/wmm.c
  3778. --- linux-3.10.0-693.11.1.el7/drivers/net/wireless/marvell/mwifiex/wmm.c 2017-10-27 11:14:15.000000000 +0200
  3779. +++ linux-3.10.0-693.11.6.el7/drivers/net/wireless/marvell/mwifiex/wmm.c 2017-12-28 19:59:43.000000000 +0100
  3780. @@ -265,14 +265,18 @@ mwifiex_wmm_setup_queue_priorities(struc
  3781. for (i = 0; i < num_ac; i++) {
  3782. for (j = 1; j < num_ac - i; j++) {
  3783. if (tmp[j - 1] > tmp[j]) {
  3784. + gmb();
  3785. swap(tmp[j - 1], tmp[j]);
  3786. swap(priv->wmm.queue_priority[j - 1],
  3787. priv->wmm.queue_priority[j]);
  3788. } else if (tmp[j - 1] == tmp[j]) {
  3789. + gmb();
  3790. if (priv->wmm.queue_priority[j - 1]
  3791. - < priv->wmm.queue_priority[j])
  3792. + < priv->wmm.queue_priority[j]) {
  3793. + gmb();
  3794. swap(priv->wmm.queue_priority[j - 1],
  3795. priv->wmm.queue_priority[j]);
  3796. + }
  3797. }
  3798. }
  3799. }
  3800. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/wireless/p54/main.c linux-3.10.0-693.11.6.el7/drivers/net/wireless/p54/main.c
  3801. --- linux-3.10.0-693.11.1.el7/drivers/net/wireless/p54/main.c 2017-10-27 11:14:15.000000000 +0200
  3802. +++ linux-3.10.0-693.11.6.el7/drivers/net/wireless/p54/main.c 2017-12-28 19:59:43.000000000 +0100
  3803. @@ -418,6 +418,7 @@ static int p54_conf_tx(struct ieee80211_
  3804.  
  3805. mutex_lock(&priv->conf_mutex);
  3806. if (queue < dev->queues) {
  3807. + gmb();
  3808. P54_SET_QUEUE(priv->qos_params[queue], params->aifs,
  3809. params->cw_min, params->cw_max, params->txop);
  3810. ret = p54_set_edcf(priv);
  3811. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c linux-3.10.0-693.11.6.el7/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
  3812. --- linux-3.10.0-693.11.1.el7/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 2017-10-27 11:14:15.000000000 +0200
  3813. +++ linux-3.10.0-693.11.6.el7/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 2017-12-28 19:59:43.000000000 +0100
  3814. @@ -445,6 +445,7 @@ static void rt2800mmio_txstatus_interrup
  3815. break;
  3816.  
  3817. if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
  3818. + gmb();
  3819. rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
  3820. break;
  3821. }
  3822. diff -upr linux-3.10.0-693.11.1.el7/drivers/net/wireless/ralink/rt2x00/rt2800usb.c linux-3.10.0-693.11.6.el7/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
  3823. --- linux-3.10.0-693.11.1.el7/drivers/net/wireless/ralink/rt2x00/rt2800usb.c 2017-10-27 11:14:15.000000000 +0200
  3824. +++ linux-3.10.0-693.11.6.el7/drivers/net/wireless/ralink/rt2x00/rt2800usb.c 2017-12-28 19:59:43.000000000 +0100
  3825. @@ -161,8 +161,10 @@ static bool rt2800usb_tx_sta_fifo_read_c
  3826.  
  3827. valid = rt2x00_get_field32(tx_status, TX_STA_FIFO_VALID);
  3828. if (valid) {
  3829. - if (!kfifo_put(&rt2x00dev->txstatus_fifo, &tx_status))
  3830. + if (!kfifo_put(&rt2x00dev->txstatus_fifo, &tx_status)) {
  3831. + gmb();
  3832. rt2x00_warn(rt2x00dev, "TX status FIFO overrun\n");
  3833. + }
  3834.  
  3835. queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
  3836.  
  3837. diff -upr linux-3.10.0-693.11.1.el7/drivers/pnp/quirks.c linux-3.10.0-693.11.6.el7/drivers/pnp/quirks.c
  3838. --- linux-3.10.0-693.11.1.el7/drivers/pnp/quirks.c 2017-10-27 11:14:15.000000000 +0200
  3839. +++ linux-3.10.0-693.11.6.el7/drivers/pnp/quirks.c 2017-12-28 19:59:43.000000000 +0100
  3840. @@ -323,6 +323,7 @@ static void quirk_amd_mmconfig_area(stru
  3841. "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
  3842. res, mmconfig);
  3843. if (mmconfig->start < res->start) {
  3844. + gmb();
  3845. start = mmconfig->start;
  3846. end = res->start - 1;
  3847. pnp_add_mem_resource(dev, start, end, 0);
  3848. diff -upr linux-3.10.0-693.11.1.el7/drivers/scsi/be2iscsi/be_iscsi.c linux-3.10.0-693.11.6.el7/drivers/scsi/be2iscsi/be_iscsi.c
  3849. --- linux-3.10.0-693.11.1.el7/drivers/scsi/be2iscsi/be_iscsi.c 2017-10-27 11:14:15.000000000 +0200
  3850. +++ linux-3.10.0-693.11.6.el7/drivers/scsi/be2iscsi/be_iscsi.c 2017-12-28 19:59:43.000000000 +0100
  3851. @@ -67,6 +67,7 @@ struct iscsi_cls_session *beiscsi_sessio
  3852. beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
  3853. "BS_%d : In beiscsi_session_create\n");
  3854. if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
  3855. + gmb();
  3856. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
  3857. "BS_%d : Cannot handle %d cmds."
  3858. "Max cmds per session supported is %d. Using %d."
  3859. diff -upr linux-3.10.0-693.11.1.el7/drivers/scsi/bnx2i/bnx2i_hwi.c linux-3.10.0-693.11.6.el7/drivers/scsi/bnx2i/bnx2i_hwi.c
  3860. --- linux-3.10.0-693.11.1.el7/drivers/scsi/bnx2i/bnx2i_hwi.c 2017-10-27 11:14:15.000000000 +0200
  3861. +++ linux-3.10.0-693.11.6.el7/drivers/scsi/bnx2i/bnx2i_hwi.c 2017-12-28 19:59:43.000000000 +0100
  3862. @@ -239,11 +239,14 @@ void bnx2i_put_rq_buf(struct bnx2i_conn
  3863. ep->qp.rq_prod_idx += count;
  3864.  
  3865. if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
  3866. + gmb();
  3867. ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
  3868. if (!hi_bit)
  3869. ep->qp.rq_prod_idx |= 0x8000;
  3870. - } else
  3871. + } else {
  3872. + gmb();
  3873. ep->qp.rq_prod_idx |= hi_bit;
  3874. + }
  3875.  
  3876. if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
  3877. rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
  3878. diff -upr linux-3.10.0-693.11.1.el7/drivers/scsi/hpsa.c linux-3.10.0-693.11.6.el7/drivers/scsi/hpsa.c
  3879. --- linux-3.10.0-693.11.1.el7/drivers/scsi/hpsa.c 2017-10-27 11:14:15.000000000 +0200
  3880. +++ linux-3.10.0-693.11.6.el7/drivers/scsi/hpsa.c 2017-12-28 19:59:43.000000000 +0100
  3881. @@ -4839,6 +4839,7 @@ static int hpsa_scsi_ioaccel2_queue_comm
  3882. if (use_sg) {
  3883. curr_sg = cp->sg;
  3884. if (use_sg > h->ioaccel_maxsg) {
  3885. + gmb();
  3886. addr64 = le64_to_cpu(
  3887. h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
  3888. curr_sg->address = cpu_to_le64(addr64);
  3889. diff -upr linux-3.10.0-693.11.1.el7/drivers/scsi/isci/host.c linux-3.10.0-693.11.6.el7/drivers/scsi/isci/host.c
  3890. --- linux-3.10.0-693.11.1.el7/drivers/scsi/isci/host.c 2017-10-27 11:14:15.000000000 +0200
  3891. +++ linux-3.10.0-693.11.6.el7/drivers/scsi/isci/host.c 2017-12-28 19:59:43.000000000 +0100
  3892. @@ -2465,8 +2465,10 @@ struct isci_request *sci_request_by_tag(
  3893. task_index = ISCI_TAG_TCI(io_tag);
  3894.  
  3895. if (task_index < ihost->task_context_entries) {
  3896. - struct isci_request *ireq = ihost->reqs[task_index];
  3897. + struct isci_request *ireq;
  3898.  
  3899. + gmb();
  3900. + ireq = ihost->reqs[task_index];
  3901. if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
  3902. task_sequence = ISCI_TAG_SEQ(io_tag);
  3903.  
  3904. diff -upr linux-3.10.0-693.11.1.el7/drivers/scsi/lpfc/lpfc_bsg.c linux-3.10.0-693.11.6.el7/drivers/scsi/lpfc/lpfc_bsg.c
  3905. --- linux-3.10.0-693.11.1.el7/drivers/scsi/lpfc/lpfc_bsg.c 2017-10-27 11:14:15.000000000 +0200
  3906. +++ linux-3.10.0-693.11.6.el7/drivers/scsi/lpfc/lpfc_bsg.c 2017-12-28 19:59:43.000000000 +0100
  3907. @@ -1321,6 +1321,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job
  3908. }
  3909.  
  3910. if (evt_dat->len > job->request_payload.payload_len) {
  3911. + gmb();
  3912. evt_dat->len = job->request_payload.payload_len;
  3913. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  3914. "2618 Truncated event data at %d "
  3915. diff -upr linux-3.10.0-693.11.1.el7/drivers/scsi/mpt3sas/mpt3sas_base.c linux-3.10.0-693.11.6.el7/drivers/scsi/mpt3sas/mpt3sas_base.c
  3916. --- linux-3.10.0-693.11.1.el7/drivers/scsi/mpt3sas/mpt3sas_base.c 2017-10-27 11:14:15.000000000 +0200
  3917. +++ linux-3.10.0-693.11.6.el7/drivers/scsi/mpt3sas/mpt3sas_base.c 2017-12-28 19:59:43.000000000 +0100
  3918. @@ -3407,6 +3407,7 @@ _base_allocate_memory_pools(struct MPT3S
  3919. ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
  3920. + 1;
  3921. if (chains_needed_per_io > facts->MaxChainDepth) {
  3922. + gmb();
  3923. chains_needed_per_io = facts->MaxChainDepth;
  3924. ioc->shost->sg_tablesize = min_t(u16,
  3925. ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
  3926. diff -upr linux-3.10.0-693.11.1.el7/drivers/scsi/osst.c linux-3.10.0-693.11.6.el7/drivers/scsi/osst.c
  3927. --- linux-3.10.0-693.11.1.el7/drivers/scsi/osst.c 2017-10-27 11:14:15.000000000 +0200
  3928. +++ linux-3.10.0-693.11.6.el7/drivers/scsi/osst.c 2017-12-28 19:59:43.000000000 +0100
  3929. @@ -3493,6 +3493,7 @@ static ssize_t osst_write(struct file *
  3930. }
  3931. }
  3932. if ((STps->drv_file + STps->drv_block) > 0 && STps->drv_file < STp->filemark_cnt) {
  3933. + gmb();
  3934. STp->filemark_cnt = STps->drv_file;
  3935. STp->last_mark_ppos =
  3936. ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[STp->filemark_cnt-1]);
  3937. diff -upr linux-3.10.0-693.11.1.el7/drivers/scsi/qla2xxx/qla_isr.c linux-3.10.0-693.11.6.el7/drivers/scsi/qla2xxx/qla_isr.c
  3938. --- linux-3.10.0-693.11.1.el7/drivers/scsi/qla2xxx/qla_isr.c 2017-10-27 11:14:15.000000000 +0200
  3939. +++ linux-3.10.0-693.11.6.el7/drivers/scsi/qla2xxx/qla_isr.c 2017-12-28 19:59:43.000000000 +0100
  3940. @@ -2093,6 +2093,7 @@ qla2x00_status_entry(scsi_qla_host_t *vh
  3941.  
  3942. /* Validate handle. */
  3943. if (handle < req->num_outstanding_cmds) {
  3944. + gmb();
  3945. sp = req->outstanding_cmds[handle];
  3946. if (!sp) {
  3947. ql_dbg(ql_dbg_io, vha, 0x3075,
  3948. @@ -2101,6 +2102,7 @@ qla2x00_status_entry(scsi_qla_host_t *vh
  3949. return;
  3950. }
  3951. } else {
  3952. + gmb();
  3953. ql_dbg(ql_dbg_io, vha, 0x3017,
  3954. "Invalid status handle, out of range (0x%x).\n",
  3955. sts->handle);
  3956. diff -upr linux-3.10.0-693.11.1.el7/drivers/scsi/qla2xxx/qla_mr.c linux-3.10.0-693.11.6.el7/drivers/scsi/qla2xxx/qla_mr.c
  3957. --- linux-3.10.0-693.11.1.el7/drivers/scsi/qla2xxx/qla_mr.c 2017-10-27 11:14:15.000000000 +0200
  3958. +++ linux-3.10.0-693.11.6.el7/drivers/scsi/qla2xxx/qla_mr.c 2017-12-28 19:59:43.000000000 +0100
  3959. @@ -2303,10 +2303,12 @@ qlafx00_status_entry(scsi_qla_host_t *vh
  3960. req = ha->req_q_map[que];
  3961.  
  3962. /* Validate handle. */
  3963. - if (handle < req->num_outstanding_cmds)
  3964. + if (handle < req->num_outstanding_cmds) {
  3965. + gmb();
  3966. sp = req->outstanding_cmds[handle];
  3967. - else
  3968. + } else {
  3969. sp = NULL;
  3970. + }
  3971.  
  3972. if (sp == NULL) {
  3973. ql_dbg(ql_dbg_io, vha, 0x3034,
  3974. @@ -2654,10 +2656,12 @@ qlafx00_multistatus_entry(struct scsi_ql
  3975. req = ha->req_q_map[que];
  3976.  
  3977. /* Validate handle. */
  3978. - if (handle < req->num_outstanding_cmds)
  3979. + if (handle < req->num_outstanding_cmds) {
  3980. + gmb();
  3981. sp = req->outstanding_cmds[handle];
  3982. - else
  3983. + } else {
  3984. sp = NULL;
  3985. + }
  3986.  
  3987. if (sp == NULL) {
  3988. ql_dbg(ql_dbg_io, vha, 0x3044,
  3989. diff -upr linux-3.10.0-693.11.1.el7/drivers/target/iscsi/iscsi_target_datain_values.c linux-3.10.0-693.11.6.el7/drivers/target/iscsi/iscsi_target_datain_values.c
  3990. --- linux-3.10.0-693.11.1.el7/drivers/target/iscsi/iscsi_target_datain_values.c 2017-10-27 11:14:15.000000000 +0200
  3991. +++ linux-3.10.0-693.11.6.el7/drivers/target/iscsi/iscsi_target_datain_values.c 2017-12-28 19:59:43.000000000 +0100
  3992. @@ -127,10 +127,12 @@ static struct iscsi_datain_req *iscsit_s
  3993. if ((next_burst_len +
  3994. conn->conn_ops->MaxRecvDataSegmentLength) <
  3995. conn->sess->sess_ops->MaxBurstLength) {
  3996. + gmb();
  3997. datain->length =
  3998. conn->conn_ops->MaxRecvDataSegmentLength;
  3999. next_burst_len += datain->length;
  4000. } else {
  4001. + gmb();
  4002. datain->length = (conn->sess->sess_ops->MaxBurstLength -
  4003. next_burst_len);
  4004. next_burst_len = 0;
  4005. diff -upr linux-3.10.0-693.11.1.el7/drivers/target/iscsi/iscsi_target_erl1.c linux-3.10.0-693.11.6.el7/drivers/target/iscsi/iscsi_target_erl1.c
  4006. --- linux-3.10.0-693.11.1.el7/drivers/target/iscsi/iscsi_target_erl1.c 2017-10-27 11:14:15.000000000 +0200
  4007. +++ linux-3.10.0-693.11.6.el7/drivers/target/iscsi/iscsi_target_erl1.c 2017-12-28 19:59:43.000000000 +0100
  4008. @@ -212,11 +212,13 @@ int iscsit_create_recovery_datain_values
  4009. if ((dr->next_burst_len +
  4010. conn->conn_ops->MaxRecvDataSegmentLength) <
  4011. conn->sess->sess_ops->MaxBurstLength) {
  4012. + gmb();
  4013. dr->read_data_done +=
  4014. conn->conn_ops->MaxRecvDataSegmentLength;
  4015. dr->next_burst_len +=
  4016. conn->conn_ops->MaxRecvDataSegmentLength;
  4017. } else {
  4018. + gmb();
  4019. dr->read_data_done +=
  4020. (conn->sess->sess_ops->MaxBurstLength -
  4021. dr->next_burst_len);
  4022. diff -upr linux-3.10.0-693.11.1.el7/drivers/target/iscsi/iscsi_target_erl2.c linux-3.10.0-693.11.6.el7/drivers/target/iscsi/iscsi_target_erl2.c
  4023. --- linux-3.10.0-693.11.1.el7/drivers/target/iscsi/iscsi_target_erl2.c 2017-10-27 11:14:15.000000000 +0200
  4024. +++ linux-3.10.0-693.11.6.el7/drivers/target/iscsi/iscsi_target_erl2.c 2017-12-28 19:59:43.000000000 +0100
  4025. @@ -46,11 +46,13 @@ void iscsit_create_conn_recovery_datain_
  4026. if ((cmd->next_burst_len +
  4027. conn->conn_ops->MaxRecvDataSegmentLength) <
  4028. conn->sess->sess_ops->MaxBurstLength) {
  4029. + gmb();
  4030. cmd->read_data_done +=
  4031. conn->conn_ops->MaxRecvDataSegmentLength;
  4032. cmd->next_burst_len +=
  4033. conn->conn_ops->MaxRecvDataSegmentLength;
  4034. } else {
  4035. + gmb();
  4036. cmd->read_data_done +=
  4037. (conn->sess->sess_ops->MaxBurstLength -
  4038. cmd->next_burst_len);
  4039. diff -upr linux-3.10.0-693.11.1.el7/drivers/target/target_core_rd.c linux-3.10.0-693.11.6.el7/drivers/target/target_core_rd.c
  4040. --- linux-3.10.0-693.11.1.el7/drivers/target/target_core_rd.c 2017-10-27 11:14:15.000000000 +0200
  4041. +++ linux-3.10.0-693.11.6.el7/drivers/target/target_core_rd.c 2017-12-28 19:59:43.000000000 +0100
  4042. @@ -350,6 +350,7 @@ static struct rd_dev_sg_table *rd_get_sg
  4043.  
  4044. i = page / sg_per_table;
  4045. if (i < rd_dev->sg_table_count) {
  4046. + gmb();
  4047. sg_table = &rd_dev->sg_table_array[i];
  4048. if ((sg_table->page_start_offset <= page) &&
  4049. (sg_table->page_end_offset >= page))
  4050. @@ -370,6 +371,7 @@ static struct rd_dev_sg_table *rd_get_pr
  4051.  
  4052. i = page / sg_per_table;
  4053. if (i < rd_dev->sg_prot_count) {
  4054. + gmb();
  4055. sg_table = &rd_dev->sg_prot_array[i];
  4056. if ((sg_table->page_start_offset <= page) &&
  4057. (sg_table->page_end_offset >= page))
  4058. diff -upr linux-3.10.0-693.11.1.el7/fs/btrfs/free-space-cache.c linux-3.10.0-693.11.6.el7/fs/btrfs/free-space-cache.c
  4059. --- linux-3.10.0-693.11.1.el7/fs/btrfs/free-space-cache.c 2017-10-27 11:14:15.000000000 +0200
  4060. +++ linux-3.10.0-693.11.6.el7/fs/btrfs/free-space-cache.c 2017-12-28 19:59:43.000000000 +0100
  4061. @@ -1439,10 +1439,13 @@ static int tree_insert_offset(struct rb_
  4062. info = rb_entry(parent, struct btrfs_free_space, offset_index);
  4063.  
  4064. if (offset < info->offset) {
  4065. + gmb();
  4066. p = &(*p)->rb_left;
  4067. } else if (offset > info->offset) {
  4068. + gmb();
  4069. p = &(*p)->rb_right;
  4070. } else {
  4071. + gmb();
  4072. /*
  4073. * we could have a bitmap entry and an extent entry
  4074. * share the same offset. If this is the case, we want
  4075. @@ -1796,6 +1799,7 @@ find_free_space(struct btrfs_free_space_
  4076. for (node = &entry->offset_index; node; node = rb_next(node)) {
  4077. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  4078. if (entry->bytes < *bytes) {
  4079. + gmb();
  4080. if (entry->bytes > *max_extent_size)
  4081. *max_extent_size = entry->bytes;
  4082. continue;
  4083. diff -upr linux-3.10.0-693.11.1.el7/fs/btrfs/volumes.c linux-3.10.0-693.11.6.el7/fs/btrfs/volumes.c
  4084. --- linux-3.10.0-693.11.1.el7/fs/btrfs/volumes.c 2017-10-27 11:14:15.000000000 +0200
  4085. +++ linux-3.10.0-693.11.6.el7/fs/btrfs/volumes.c 2017-12-28 19:59:43.000000000 +0100
  4086. @@ -1803,8 +1803,10 @@ static int btrfs_check_raid_min_devices(
  4087. continue;
  4088.  
  4089. if (num_devices < btrfs_raid_array[i].devs_min) {
  4090. - int ret = btrfs_raid_mindev_error[i];
  4091. + int ret;
  4092.  
  4093. + gmb();
  4094. + ret = btrfs_raid_mindev_error[i];
  4095. if (ret)
  4096. return ret;
  4097. }
  4098. diff -upr linux-3.10.0-693.11.1.el7/fs/ceph/snap.c linux-3.10.0-693.11.6.el7/fs/ceph/snap.c
  4099. --- linux-3.10.0-693.11.1.el7/fs/ceph/snap.c 2017-10-27 11:14:15.000000000 +0200
  4100. +++ linux-3.10.0-693.11.6.el7/fs/ceph/snap.c 2017-12-28 19:59:43.000000000 +0100
  4101. @@ -140,11 +140,14 @@ static struct ceph_snap_realm *__lookup_
  4102.  
  4103. while (n) {
  4104. r = rb_entry(n, struct ceph_snap_realm, node);
  4105. - if (ino < r->ino)
  4106. + if (ino < r->ino) {
  4107. + gmb();
  4108. n = n->rb_left;
  4109. - else if (ino > r->ino)
  4110. + } else if (ino > r->ino) {
  4111. + gmb();
  4112. n = n->rb_right;
  4113. - else {
  4114. + } else {
  4115. + gmb();
  4116. dout("lookup_snap_realm %llx %p\n", r->ino, r);
  4117. return r;
  4118. }
  4119. diff -upr linux-3.10.0-693.11.1.el7/fs/ext4/extents.c linux-3.10.0-693.11.6.el7/fs/ext4/extents.c
  4120. --- linux-3.10.0-693.11.1.el7/fs/ext4/extents.c 2017-10-27 11:14:15.000000000 +0200
  4121. +++ linux-3.10.0-693.11.6.el7/fs/ext4/extents.c 2017-12-28 19:59:43.000000000 +0100
  4122. @@ -3577,6 +3577,7 @@ static int ext4_ext_convert_to_initializ
  4123. split_map.m_len = map->m_len;
  4124.  
  4125. if (max_zeroout && (allocated > map->m_len)) {
  4126. + gmb();
  4127. if (allocated <= max_zeroout) {
  4128. /* case 3 */
  4129. zero_ex.ee_block =
  4130. diff -upr linux-3.10.0-693.11.1.el7/fs/locks.c linux-3.10.0-693.11.6.el7/fs/locks.c
  4131. --- linux-3.10.0-693.11.1.el7/fs/locks.c 2017-10-27 11:14:15.000000000 +0200
  4132. +++ linux-3.10.0-693.11.6.el7/fs/locks.c 2017-12-28 19:59:43.000000000 +0100
  4133. @@ -1046,14 +1046,20 @@ static int __posix_lock_file(struct inod
  4134. * lock yielding from the lower start address of both
  4135. * locks to the higher end address.
  4136. */
  4137. - if (fl->fl_start > request->fl_start)
  4138. + if (fl->fl_start > request->fl_start) {
  4139. + gmb();
  4140. fl->fl_start = request->fl_start;
  4141. - else
  4142. + } else {
  4143. + gmb();
  4144. request->fl_start = fl->fl_start;
  4145. - if (fl->fl_end < request->fl_end)
  4146. + }
  4147. + if (fl->fl_end < request->fl_end) {
  4148. + gmb();
  4149. fl->fl_end = request->fl_end;
  4150. - else
  4151. + } else {
  4152. + gmb();
  4153. request->fl_end = fl->fl_end;
  4154. + }
  4155. if (added) {
  4156. locks_delete_lock(before, &dispose);
  4157. continue;
  4158. diff -upr linux-3.10.0-693.11.1.el7/fs/nfs/direct.c linux-3.10.0-693.11.6.el7/fs/nfs/direct.c
  4159. --- linux-3.10.0-693.11.1.el7/fs/nfs/direct.c 2017-10-27 11:14:15.000000000 +0200
  4160. +++ linux-3.10.0-693.11.6.el7/fs/nfs/direct.c 2017-12-28 19:59:43.000000000 +0100
  4161. @@ -165,9 +165,10 @@ nfs_direct_select_verf(struct nfs_direct
  4162. * for layout segment where nbuckets is zero.
  4163. */
  4164. if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
  4165. - if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
  4166. + if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets) {
  4167. + gmb();
  4168. verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
  4169. - else
  4170. + } else
  4171. WARN_ON_ONCE(1);
  4172. }
  4173. #endif
  4174. diff -upr linux-3.10.0-693.11.1.el7/fs/nfs/flexfilelayout/flexfilelayout.c linux-3.10.0-693.11.6.el7/fs/nfs/flexfilelayout/flexfilelayout.c
  4175. --- linux-3.10.0-693.11.1.el7/fs/nfs/flexfilelayout/flexfilelayout.c 2017-10-27 11:14:15.000000000 +0200
  4176. +++ linux-3.10.0-693.11.6.el7/fs/nfs/flexfilelayout/flexfilelayout.c 2017-12-28 19:59:43.000000000 +0100
  4177. @@ -343,9 +343,11 @@ static void ff_layout_sort_mirrors(struc
  4178. for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
  4179. for (j = i + 1; j < fls->mirror_array_cnt; j++)
  4180. if (fls->mirror_array[i]->efficiency <
  4181. - fls->mirror_array[j]->efficiency)
  4182. + fls->mirror_array[j]->efficiency) {
  4183. + gmb();
  4184. swap(fls->mirror_array[i],
  4185. fls->mirror_array[j]);
  4186. + }
  4187. }
  4188. }
  4189.  
  4190. diff -upr linux-3.10.0-693.11.1.el7/fs/udf/balloc.c linux-3.10.0-693.11.6.el7/fs/udf/balloc.c
  4191. --- linux-3.10.0-693.11.1.el7/fs/udf/balloc.c 2017-10-27 11:14:15.000000000 +0200
  4192. +++ linux-3.10.0-693.11.6.el7/fs/udf/balloc.c 2017-12-28 19:59:43.000000000 +0100
  4193. @@ -491,6 +491,7 @@ static void udf_table_free_blocks(struct
  4194. aed->previousAllocExtLocation =
  4195. cpu_to_le32(oepos.block.logicalBlockNum);
  4196. if (epos.offset + adsize > sb->s_blocksize) {
  4197. + gmb();
  4198. loffset = epos.offset;
  4199. aed->lengthAllocDescs = cpu_to_le32(adsize);
  4200. sptr = iinfo->i_ext.i_data + epos.offset
  4201. @@ -501,6 +502,7 @@ static void udf_table_free_blocks(struct
  4202. epos.offset = sizeof(struct allocExtDesc) +
  4203. adsize;
  4204. } else {
  4205. + gmb();
  4206. loffset = epos.offset + adsize;
  4207. aed->lengthAllocDescs = cpu_to_le32(0);
  4208. if (oepos.bh) {
  4209. diff -upr linux-3.10.0-693.11.1.el7/fs/udf/inode.c linux-3.10.0-693.11.6.el7/fs/udf/inode.c
  4210. --- linux-3.10.0-693.11.1.el7/fs/udf/inode.c 2017-10-27 11:14:15.000000000 +0200
  4211. +++ linux-3.10.0-693.11.6.el7/fs/udf/inode.c 2017-12-28 19:59:43.000000000 +0100
  4212. @@ -1915,6 +1915,7 @@ int udf_add_aext(struct inode *inode, st
  4213. aed->previousAllocExtLocation =
  4214. cpu_to_le32(obloc.logicalBlockNum);
  4215. if (epos->offset + adsize > inode->i_sb->s_blocksize) {
  4216. + gmb();
  4217. loffset = epos->offset;
  4218. aed->lengthAllocDescs = cpu_to_le32(adsize);
  4219. sptr = ptr - adsize;
  4220. @@ -1922,6 +1923,7 @@ int udf_add_aext(struct inode *inode, st
  4221. memcpy(dptr, sptr, adsize);
  4222. epos->offset = sizeof(struct allocExtDesc) + adsize;
  4223. } else {
  4224. + gmb();
  4225. loffset = epos->offset + adsize;
  4226. aed->lengthAllocDescs = cpu_to_le32(0);
  4227. sptr = ptr;
  4228. diff -upr linux-3.10.0-693.11.1.el7/fs/udf/misc.c linux-3.10.0-693.11.6.el7/fs/udf/misc.c
  4229. --- linux-3.10.0-693.11.1.el7/fs/udf/misc.c 2017-10-27 11:14:15.000000000 +0200
  4230. +++ linux-3.10.0-693.11.6.el7/fs/udf/misc.c 2017-12-28 19:59:43.000000000 +0100
  4231. @@ -103,8 +103,10 @@ struct genericFormat *udf_add_extendedat
  4232. if (type < 2048) {
  4233. if (le32_to_cpu(eahd->appAttrLocation) <
  4234. iinfo->i_lenEAttr) {
  4235. - uint32_t aal =
  4236. - le32_to_cpu(eahd->appAttrLocation);
  4237. + uint32_t aal;
  4238. +
  4239. + gmb();
  4240. + aal = le32_to_cpu(eahd->appAttrLocation);
  4241. memmove(&ea[offset - aal + size],
  4242. &ea[aal], offset - aal);
  4243. offset -= aal;
  4244. @@ -113,8 +115,10 @@ struct genericFormat *udf_add_extendedat
  4245. }
  4246. if (le32_to_cpu(eahd->impAttrLocation) <
  4247. iinfo->i_lenEAttr) {
  4248. - uint32_t ial =
  4249. - le32_to_cpu(eahd->impAttrLocation);
  4250. + uint32_t ial;
  4251. +
  4252. + gmb();
  4253. + ial = le32_to_cpu(eahd->impAttrLocation);
  4254. memmove(&ea[offset - ial + size],
  4255. &ea[ial], offset - ial);
  4256. offset -= ial;
  4257. @@ -124,8 +128,10 @@ struct genericFormat *udf_add_extendedat
  4258. } else if (type < 65536) {
  4259. if (le32_to_cpu(eahd->appAttrLocation) <
  4260. iinfo->i_lenEAttr) {
  4261. - uint32_t aal =
  4262. - le32_to_cpu(eahd->appAttrLocation);
  4263. + uint32_t aal;
  4264. +
  4265. + gmb();
  4266. + aal = le32_to_cpu(eahd->appAttrLocation);
  4267. memmove(&ea[offset - aal + size],
  4268. &ea[aal], offset - aal);
  4269. offset -= aal;
  4270. diff -upr linux-3.10.0-693.11.1.el7/fs/udf/super.c linux-3.10.0-693.11.6.el7/fs/udf/super.c
  4271. --- linux-3.10.0-693.11.1.el7/fs/udf/super.c 2017-10-27 11:14:15.000000000 +0200
  4272. +++ linux-3.10.0-693.11.6.el7/fs/udf/super.c 2017-12-28 19:59:43.000000000 +0100
  4273. @@ -2374,6 +2374,7 @@ static unsigned int udf_count_free(struc
  4274. (struct logicalVolIntegrityDesc *)
  4275. sbi->s_lvid_bh->b_data;
  4276. if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) {
  4277. + gmb();
  4278. accum = le32_to_cpu(
  4279. lvid->freeSpaceTable[sbi->s_partition]);
  4280. if (accum == 0xFFFFFFFF)
  4281. diff -upr linux-3.10.0-693.11.1.el7/fs/userfaultfd.c linux-3.10.0-693.11.6.el7/fs/userfaultfd.c
  4282. --- linux-3.10.0-693.11.1.el7/fs/userfaultfd.c 2017-10-27 11:14:15.000000000 +0200
  4283. +++ linux-3.10.0-693.11.6.el7/fs/userfaultfd.c 2017-12-28 19:59:43.000000000 +0100
  4284. @@ -557,6 +557,12 @@ static void userfaultfd_event_wait_compl
  4285. break;
  4286. if (ACCESS_ONCE(ctx->released) ||
  4287. fatal_signal_pending(current)) {
  4288. + /*
  4289. + * &ewq->wq may be queued in fork_event, but
  4290. + * __remove_wait_queue ignores the head
  4291. + * parameter. It would be a problem if it
  4292. + * didn't.
  4293. + */
  4294. __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
  4295. if (ewq->msg.event == UFFD_EVENT_FORK) {
  4296. struct userfaultfd_ctx *new;
  4297. @@ -1027,6 +1033,12 @@ static ssize_t userfaultfd_ctx_read(stru
  4298. (unsigned long)
  4299. uwq->msg.arg.reserved.reserved1;
  4300. list_move(&uwq->wq.task_list, &fork_event);
  4301. + /*
  4302. + * fork_nctx can be freed as soon as
  4303. + * we drop the lock, unless we take a
  4304. + * reference on it.
  4305. + */
  4306. + userfaultfd_ctx_get(fork_nctx);
  4307. spin_unlock(&ctx->event_wqh.lock);
  4308. ret = 0;
  4309. break;
  4310. @@ -1057,19 +1069,53 @@ static ssize_t userfaultfd_ctx_read(stru
  4311.  
  4312. if (!ret && msg->event == UFFD_EVENT_FORK) {
  4313. ret = resolve_userfault_fork(ctx, fork_nctx, msg);
  4314. + spin_lock(&ctx->event_wqh.lock);
  4315. + if (!list_empty(&fork_event)) {
  4316. + /*
  4317. + * The fork thread didn't abort, so we can
  4318. + * drop the temporary refcount.
  4319. + */
  4320. + userfaultfd_ctx_put(fork_nctx);
  4321. +
  4322. + uwq = list_first_entry(&fork_event,
  4323. + typeof(*uwq),
  4324. + wq.task_list);
  4325. + /*
  4326. + * If fork_event list wasn't empty and in turn
  4327. + * the event wasn't already released by fork
  4328. + * (the event is allocated on fork kernel
  4329. + * stack), put the event back to its place in
  4330. + * the event_wq. fork_event head will be freed
  4331. + * as soon as we return so the event cannot
  4332. + * stay queued there no matter the current
  4333. + * "ret" value.
  4334. + */
  4335. + list_del(&uwq->wq.task_list);
  4336. + __add_wait_queue(&ctx->event_wqh, &uwq->wq);
  4337.  
  4338. - if (!ret) {
  4339. - spin_lock(&ctx->event_wqh.lock);
  4340. - if (!list_empty(&fork_event)) {
  4341. - uwq = list_first_entry(&fork_event,
  4342. - typeof(*uwq),
  4343. - wq.task_list);
  4344. - list_del(&uwq->wq.task_list);
  4345. - __add_wait_queue(&ctx->event_wqh, &uwq->wq);
  4346. + /*
  4347. + * Leave the event in the waitqueue and report
  4348. + * error to userland if we failed to resolve
  4349. + * the userfault fork.
  4350. + */
  4351. + if (likely(!ret))
  4352. userfaultfd_event_complete(ctx, uwq);
  4353. - }
  4354. - spin_unlock(&ctx->event_wqh.lock);
  4355. + } else {
  4356. + /*
  4357. + * Here the fork thread aborted and the
  4358. + * refcount from the fork thread on fork_nctx
  4359. + * has already been released. We still hold
  4360. + * the reference we took before releasing the
  4361. + * lock above. If resolve_userfault_fork
  4362. + * failed we've to drop it because the
  4363. + * fork_nctx has to be freed in such case. If
  4364. + * it succeeded we'll hold it because the new
  4365. + * uffd references it.
  4366. + */
  4367. + if (ret)
  4368. + userfaultfd_ctx_put(fork_nctx);
  4369. }
  4370. + spin_unlock(&ctx->event_wqh.lock);
  4371. }
  4372.  
  4373. return ret;
  4374. diff -upr linux-3.10.0-693.11.1.el7/fs/xfs/libxfs/xfs_attr_leaf.c linux-3.10.0-693.11.6.el7/fs/xfs/libxfs/xfs_attr_leaf.c
  4375. --- linux-3.10.0-693.11.1.el7/fs/xfs/libxfs/xfs_attr_leaf.c 2017-10-27 11:14:15.000000000 +0200
  4376. +++ linux-3.10.0-693.11.6.el7/fs/xfs/libxfs/xfs_attr_leaf.c 2017-12-28 19:59:43.000000000 +0100
  4377. @@ -1172,9 +1172,11 @@ xfs_attr3_leaf_add(
  4378. + xfs_attr3_leaf_hdr_size(leaf);
  4379. for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE - 1; i >= 0; i--) {
  4380. if (tablesize > ichdr.firstused) {
  4381. + gmb();
  4382. sum += ichdr.freemap[i].size;
  4383. continue;
  4384. }
  4385. + gmb();
  4386. if (!ichdr.freemap[i].size)
  4387. continue; /* no space in this map */
  4388. tmp = entsize;
  4389. @@ -1526,6 +1528,7 @@ xfs_attr3_leaf_rebalance(
  4390. * Move any entries required from leaf to leaf:
  4391. */
  4392. if (count < ichdr1.count) {
  4393. + gmb();
  4394. /*
  4395. * Figure the total bytes to be added to the destination leaf.
  4396. */
  4397. @@ -1549,6 +1552,7 @@ xfs_attr3_leaf_rebalance(
  4398. ichdr1.count - count, leaf2, &ichdr2, 0, count);
  4399.  
  4400. } else if (count > ichdr1.count) {
  4401. + gmb();
  4402. /*
  4403. * I assert that since all callers pass in an empty
  4404. * second buffer, this code should never execute.
  4405. diff -upr linux-3.10.0-693.11.1.el7/fs/xfs/libxfs/xfs_bmap.c linux-3.10.0-693.11.6.el7/fs/xfs/libxfs/xfs_bmap.c
  4406. --- linux-3.10.0-693.11.1.el7/fs/xfs/libxfs/xfs_bmap.c 2017-10-27 11:14:15.000000000 +0200
  4407. +++ linux-3.10.0-693.11.6.el7/fs/xfs/libxfs/xfs_bmap.c 2017-12-28 19:59:43.000000000 +0100
  4408. @@ -3467,13 +3467,17 @@ xfs_bmap_adjacent(
  4409. * offset by our length.
  4410. */
  4411. if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
  4412. - ISVALID(gotbno - gotdiff, gotbno))
  4413. + ISVALID(gotbno - gotdiff, gotbno)) {
  4414. + gmb();
  4415. gotbno -= adjust;
  4416. - else if (ISVALID(gotbno - ap->length, gotbno)) {
  4417. + } else if (ISVALID(gotbno - ap->length, gotbno)) {
  4418. + gmb();
  4419. gotbno -= ap->length;
  4420. gotdiff += adjust - ap->length;
  4421. - } else
  4422. + } else {
  4423. + gmb();
  4424. gotdiff += adjust;
  4425. + }
  4426. /*
  4427. * If the firstblock forbids it, can't use it,
  4428. * must use default.
  4429. diff -upr linux-3.10.0-693.11.1.el7/include/asm-generic/barrier.h linux-3.10.0-693.11.6.el7/include/asm-generic/barrier.h
  4430. --- linux-3.10.0-693.11.1.el7/include/asm-generic/barrier.h 2017-10-27 11:14:15.000000000 +0200
  4431. +++ linux-3.10.0-693.11.6.el7/include/asm-generic/barrier.h 2017-12-28 19:59:43.000000000 +0100
  4432. @@ -42,6 +42,10 @@
  4433. #define wmb() mb()
  4434. #endif
  4435.  
  4436. +#ifndef gmb
  4437. +#define gmb() do { } while (0)
  4438. +#endif
  4439. +
  4440. #ifndef dma_rmb
  4441. #define dma_rmb() rmb()
  4442. #endif
  4443. diff -upr linux-3.10.0-693.11.1.el7/include/asm-generic/sections.h linux-3.10.0-693.11.6.el7/include/asm-generic/sections.h
  4444. --- linux-3.10.0-693.11.1.el7/include/asm-generic/sections.h 2017-10-27 11:14:15.000000000 +0200
  4445. +++ linux-3.10.0-693.11.6.el7/include/asm-generic/sections.h 2017-12-28 19:59:43.000000000 +0100
  4446. @@ -12,6 +12,7 @@ extern char _end[];
  4447. extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
  4448. extern char __kprobes_text_start[], __kprobes_text_end[];
  4449. extern char __entry_text_start[], __entry_text_end[];
  4450. +extern char __irqentry_text_start[], __irqentry_text_end[];
  4451. extern char __initdata_begin[], __initdata_end[];
  4452. extern char __start_rodata[], __end_rodata[];
  4453.  
  4454. diff -upr linux-3.10.0-693.11.1.el7/include/asm-generic/vmlinux.lds.h linux-3.10.0-693.11.6.el7/include/asm-generic/vmlinux.lds.h
  4455. --- linux-3.10.0-693.11.1.el7/include/asm-generic/vmlinux.lds.h 2017-10-27 11:14:15.000000000 +0200
  4456. +++ linux-3.10.0-693.11.6.el7/include/asm-generic/vmlinux.lds.h 2017-12-28 19:59:43.000000000 +0100
  4457. @@ -718,6 +718,13 @@
  4458. VMLINUX_SYMBOL(__per_cpu_start) = .; \
  4459. *(.data..percpu..first) \
  4460. . = ALIGN(PAGE_SIZE); \
  4461. + VMLINUX_SYMBOL(__per_cpu_user_mapped_start) = .; \
  4462. + *(.data..percpu..user_mapped..page_aligned) \
  4463. + . = ALIGN(cacheline); \
  4464. + *(.data..percpu..user_mapped) \
  4465. + *(.data..percpu..user_mapped..shared_aligned) \
  4466. + VMLINUX_SYMBOL(__per_cpu_user_mapped_end) = .; \
  4467. + . = ALIGN(PAGE_SIZE); \
  4468. *(.data..percpu..page_aligned) \
  4469. . = ALIGN(cacheline); \
  4470. *(.data..percpu..readmostly) \
  4471. diff -upr linux-3.10.0-693.11.1.el7/include/linux/ceph/libceph.h linux-3.10.0-693.11.6.el7/include/linux/ceph/libceph.h
  4472. --- linux-3.10.0-693.11.1.el7/include/linux/ceph/libceph.h 2017-10-27 11:14:15.000000000 +0200
  4473. +++ linux-3.10.0-693.11.6.el7/include/linux/ceph/libceph.h 2017-12-28 19:59:43.000000000 +0100
  4474. @@ -195,11 +195,13 @@ static void insert_##name(struct rb_root
  4475. type *cur = rb_entry(*n, type, nodefld); \
  4476. \
  4477. parent = *n; \
  4478. - if (t->keyfld < cur->keyfld) \
  4479. + if (t->keyfld < cur->keyfld) { \
  4480. + gmb(); \
  4481. n = &(*n)->rb_left; \
  4482. - else if (t->keyfld > cur->keyfld) \
  4483. + } else if (t->keyfld > cur->keyfld) { \
  4484. + gmb(); \
  4485. n = &(*n)->rb_right; \
  4486. - else \
  4487. + } else \
  4488. BUG(); \
  4489. } \
  4490. \
  4491. diff -upr linux-3.10.0-693.11.1.el7/include/linux/fdtable.h linux-3.10.0-693.11.6.el7/include/linux/fdtable.h
  4492. --- linux-3.10.0-693.11.1.el7/include/linux/fdtable.h 2017-10-27 11:14:15.000000000 +0200
  4493. +++ linux-3.10.0-693.11.6.el7/include/linux/fdtable.h 2017-12-28 19:59:43.000000000 +0100
  4494. @@ -77,8 +77,10 @@ static inline struct file * fcheck_files
  4495. struct file * file = NULL;
  4496. struct fdtable *fdt = files_fdtable(files);
  4497.  
  4498. - if (fd < fdt->max_fds)
  4499. + if (fd < fdt->max_fds) {
  4500. + gmb();
  4501. file = rcu_dereference_check_fdtable(files, fdt->fd[fd]);
  4502. + }
  4503. return file;
  4504. }
  4505.  
  4506. diff -upr linux-3.10.0-693.11.1.el7/include/linux/interval_tree_generic.h linux-3.10.0-693.11.6.el7/include/linux/interval_tree_generic.h
  4507. --- linux-3.10.0-693.11.1.el7/include/linux/interval_tree_generic.h 2017-10-27 11:14:15.000000000 +0200
  4508. +++ linux-3.10.0-693.11.6.el7/include/linux/interval_tree_generic.h 2017-12-28 19:59:43.000000000 +0100
  4509. @@ -76,10 +76,13 @@ ITSTATIC void ITPREFIX ## _insert(ITSTRU
  4510. parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \
  4511. if (parent->ITSUBTREE < last) \
  4512. parent->ITSUBTREE = last; \
  4513. - if (start < ITSTART(parent)) \
  4514. + if (start < ITSTART(parent)) { \
  4515. + gmb(); \
  4516. link = &parent->ITRB.rb_left; \
  4517. - else \
  4518. + } else { \
  4519. + gmb(); \
  4520. link = &parent->ITRB.rb_right; \
  4521. + } \
  4522. } \
  4523. \
  4524. node->ITSUBTREE = last; \
  4525. Только в linux-3.10.0-693.11.6.el7/include/linux: kaiser.h
  4526. diff -upr linux-3.10.0-693.11.1.el7/include/linux/percpu-defs.h linux-3.10.0-693.11.6.el7/include/linux/percpu-defs.h
  4527. --- linux-3.10.0-693.11.1.el7/include/linux/percpu-defs.h 2017-10-27 11:14:15.000000000 +0200
  4528. +++ linux-3.10.0-693.11.6.el7/include/linux/percpu-defs.h 2017-12-28 19:59:43.000000000 +0100
  4529. @@ -1,6 +1,12 @@
  4530. #ifndef _LINUX_PERCPU_DEFS_H
  4531. #define _LINUX_PERCPU_DEFS_H
  4532.  
  4533. +#ifdef CONFIG_KAISER
  4534. +#define USER_MAPPED_SECTION "..user_mapped"
  4535. +#else
  4536. +#define USER_MAPPED_SECTION ""
  4537. +#endif
  4538. +
  4539. /*
  4540. * Base implementations of per-CPU variable declarations and definitions, where
  4541. * the section in which the variable is to be placed is provided by the
  4542. @@ -93,6 +99,12 @@
  4543. #define DEFINE_PER_CPU(type, name) \
  4544. DEFINE_PER_CPU_SECTION(type, name, "")
  4545.  
  4546. +#define DECLARE_PER_CPU_USER_MAPPED(type, name) \
  4547. + DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION)
  4548. +
  4549. +#define DEFINE_PER_CPU_USER_MAPPED(type, name) \
  4550. + DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION)
  4551. +
  4552. /*
  4553. * Declaration/definition used for per-CPU variables that must come first in
  4554. * the set of variables.
  4555. @@ -122,6 +134,14 @@
  4556. DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
  4557. ____cacheline_aligned_in_smp
  4558.  
  4559. +#define DECLARE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name) \
  4560. + DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \
  4561. + ____cacheline_aligned_in_smp
  4562. +
  4563. +#define DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name) \
  4564. + DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \
  4565. + ____cacheline_aligned_in_smp
  4566. +
  4567. #define DECLARE_PER_CPU_ALIGNED(type, name) \
  4568. DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
  4569. ____cacheline_aligned
  4570. @@ -140,6 +160,16 @@
  4571. #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
  4572. DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \
  4573. __aligned(PAGE_SIZE)
  4574. +/*
  4575. + * Declaration/definition used for per-CPU variables that must be page aligned and need to be mapped in user mode.
  4576. + */
  4577. +#define DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name) \
  4578. + DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \
  4579. + __aligned(PAGE_SIZE)
  4580. +
  4581. +#define DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name) \
  4582. + DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \
  4583. + __aligned(PAGE_SIZE)
  4584.  
  4585. /*
  4586. * Declaration/definition used for per-CPU variables that must be read mostly.
  4587. diff -upr linux-3.10.0-693.11.1.el7/include/linux/ptrace.h linux-3.10.0-693.11.6.el7/include/linux/ptrace.h
  4588. --- linux-3.10.0-693.11.1.el7/include/linux/ptrace.h 2017-10-27 11:14:15.000000000 +0200
  4589. +++ linux-3.10.0-693.11.6.el7/include/linux/ptrace.h 2017-12-28 19:59:43.000000000 +0100
  4590. @@ -58,8 +58,15 @@ extern void exit_ptrace(struct task_stru
  4591. #define PTRACE_MODE_READ 0x01
  4592. #define PTRACE_MODE_ATTACH 0x02
  4593. #define PTRACE_MODE_NOAUDIT 0x04
  4594. +#define PTRACE_MODE_NOACCESS_CHK 0x20
  4595. +#define PTRACE_MODE_IBPB (PTRACE_MODE_ATTACH | PTRACE_MODE_NOAUDIT \
  4596. + | PTRACE_MODE_NOACCESS_CHK)
  4597. /* Returns true on success, false on denial. */
  4598. extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
  4599. +extern int ___ptrace_may_access(struct task_struct *tracer,
  4600. + const struct cred *cred, /* tracer cred */
  4601. + struct task_struct *task,
  4602. + unsigned int mode);
  4603.  
  4604. static inline int ptrace_reparented(struct task_struct *child)
  4605. {
  4606. diff -upr linux-3.10.0-693.11.1.el7/init/main.c linux-3.10.0-693.11.6.el7/init/main.c
  4607. --- linux-3.10.0-693.11.1.el7/init/main.c 2017-10-27 11:14:15.000000000 +0200
  4608. +++ linux-3.10.0-693.11.6.el7/init/main.c 2017-12-28 19:59:43.000000000 +0100
  4609. @@ -72,6 +72,7 @@
  4610. #include <linux/perf_event.h>
  4611. #include <linux/file.h>
  4612. #include <linux/ptrace.h>
  4613. +#include <linux/kaiser.h>
  4614. #include <linux/blkdev.h>
  4615. #include <linux/elevator.h>
  4616. #include <linux/random.h>
  4617. @@ -478,6 +479,8 @@ static void __init mm_init(void)
  4618. pgtable_init();
  4619. vmalloc_init();
  4620. ioremap_huge_init();
  4621. + /* This just needs to be done before we first run userspace: */
  4622. + kaiser_init();
  4623. }
  4624.  
  4625. asmlinkage void __init start_kernel(void)
  4626. diff -upr linux-3.10.0-693.11.1.el7/kernel/fork.c linux-3.10.0-693.11.6.el7/kernel/fork.c
  4627. --- linux-3.10.0-693.11.1.el7/kernel/fork.c 2017-10-27 11:14:15.000000000 +0200
  4628. +++ linux-3.10.0-693.11.6.el7/kernel/fork.c 2017-12-28 19:59:43.000000000 +0100
  4629. @@ -57,6 +57,7 @@
  4630. #include <linux/tsacct_kern.h>
  4631. #include <linux/cn_proc.h>
  4632. #include <linux/freezer.h>
  4633. +#include <linux/kaiser.h>
  4634. #include <linux/delayacct.h>
  4635. #include <linux/taskstats_kern.h>
  4636. #include <linux/random.h>
  4637. diff -upr linux-3.10.0-693.11.1.el7/kernel/ptrace.c linux-3.10.0-693.11.6.el7/kernel/ptrace.c
  4638. --- linux-3.10.0-693.11.1.el7/kernel/ptrace.c 2017-10-27 11:14:15.000000000 +0200
  4639. +++ linux-3.10.0-693.11.6.el7/kernel/ptrace.c 2017-12-28 19:59:43.000000000 +0100
  4640. @@ -222,9 +222,12 @@ static int ptrace_has_cap(struct user_na
  4641. }
  4642.  
  4643. /* Returns 0 on success, -errno on denial. */
  4644. -static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
  4645. +int ___ptrace_may_access(struct task_struct *tracer,
  4646. + const struct cred *cred, /* tracer cred */
  4647. + struct task_struct *task,
  4648. + unsigned int mode)
  4649. {
  4650. - const struct cred *cred = current_cred(), *tcred;
  4651. + const struct cred *tcred;
  4652.  
  4653. /* May we inspect the given task?
  4654. * This check is used both for attaching with ptrace
  4655. @@ -236,9 +239,17 @@ static int __ptrace_may_access(struct ta
  4656. */
  4657. int dumpable = 0;
  4658. /* Don't let security modules deny introspection */
  4659. - if (task == current)
  4660. + if (task == tracer)
  4661. return 0;
  4662. rcu_read_lock();
  4663. + if (!cred) {
  4664. + WARN_ON_ONCE(tracer == current);
  4665. + WARN_ON_ONCE(task != current);
  4666. + cred = __task_cred(tracer);
  4667. + } else {
  4668. + WARN_ON_ONCE(tracer != current);
  4669. + WARN_ON_ONCE(task == current);
  4670. + }
  4671. tcred = __task_cred(task);
  4672. if (uid_eq(cred->uid, tcred->euid) &&
  4673. uid_eq(cred->uid, tcred->suid) &&
  4674. @@ -264,7 +275,15 @@ ok:
  4675. }
  4676. rcu_read_unlock();
  4677.  
  4678. - return security_ptrace_access_check(task, mode);
  4679. + if (!(mode & PTRACE_MODE_NOACCESS_CHK))
  4680. + return security_ptrace_access_check(task, mode);
  4681. +
  4682. + return 0;
  4683. +}
  4684. +
  4685. +static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
  4686. +{
  4687. + return ___ptrace_may_access(current, current_cred(), task, mode);
  4688. }
  4689.  
  4690. bool ptrace_may_access(struct task_struct *task, unsigned int mode)
  4691. diff -upr linux-3.10.0-693.11.1.el7/kernel/sched/cputime.c linux-3.10.0-693.11.6.el7/kernel/sched/cputime.c
  4692. --- linux-3.10.0-693.11.1.el7/kernel/sched/cputime.c 2017-10-27 11:14:15.000000000 +0200
  4693. +++ linux-3.10.0-693.11.6.el7/kernel/sched/cputime.c 2017-12-28 19:59:43.000000000 +0100
  4694. @@ -635,6 +635,7 @@ update:
  4695. * monotonicity for stime, analogous argument to above.
  4696. */
  4697. if (utime < prev->utime) {
  4698. + gmb();
  4699. utime = prev->utime;
  4700. stime = rtime - utime;
  4701. }
  4702. diff -upr linux-3.10.0-693.11.1.el7/kernel/sysctl.c linux-3.10.0-693.11.6.el7/kernel/sysctl.c
  4703. --- linux-3.10.0-693.11.1.el7/kernel/sysctl.c 2017-10-27 11:14:15.000000000 +0200
  4704. +++ linux-3.10.0-693.11.6.el7/kernel/sysctl.c 2017-12-28 19:59:43.000000000 +0100
  4705. @@ -978,13 +978,6 @@ static struct ctl_table kern_table[] = {
  4706. .proc_handler = proc_dointvec,
  4707. },
  4708. {
  4709. - .procname = "kstack_depth_to_print",
  4710. - .data = &kstack_depth_to_print,
  4711. - .maxlen = sizeof(int),
  4712. - .mode = 0644,
  4713. - .proc_handler = proc_dointvec,
  4714. - },
  4715. - {
  4716. .procname = "io_delay_type",
  4717. .data = &io_delay_type,
  4718. .maxlen = sizeof(int),
  4719. diff -upr linux-3.10.0-693.11.1.el7/kernel/user_namespace.c linux-3.10.0-693.11.6.el7/kernel/user_namespace.c
  4720. --- linux-3.10.0-693.11.1.el7/kernel/user_namespace.c 2017-10-27 11:14:15.000000000 +0200
  4721. +++ linux-3.10.0-693.11.6.el7/kernel/user_namespace.c 2017-12-28 19:59:43.000000000 +0100
  4722. @@ -554,8 +554,10 @@ static void *m_start(struct seq_file *se
  4723. struct uid_gid_extent *extent = NULL;
  4724. loff_t pos = *ppos;
  4725.  
  4726. - if (pos < map->nr_extents)
  4727. + if (pos < map->nr_extents) {
  4728. + gmb();
  4729. extent = &map->extent[pos];
  4730. + }
  4731.  
  4732. return extent;
  4733. }
  4734. diff -upr linux-3.10.0-693.11.1.el7/Makefile linux-3.10.0-693.11.6.el7/Makefile
  4735. --- linux-3.10.0-693.11.1.el7/Makefile 2017-10-27 11:14:15.000000000 +0200
  4736. +++ linux-3.10.0-693.11.6.el7/Makefile 2017-12-28 19:59:43.000000000 +0100
  4737. @@ -5,7 +5,7 @@ EXTRAVERSION =
  4738. NAME = Unicycling Gorilla
  4739. RHEL_MAJOR = 7
  4740. RHEL_MINOR = 4
  4741. -RHEL_RELEASE = 693.11.1
  4742. +RHEL_RELEASE = 693.11.6
  4743.  
  4744. #
  4745. # DRM backport version
  4746. diff -upr linux-3.10.0-693.11.1.el7/mm/fremap.c linux-3.10.0-693.11.6.el7/mm/fremap.c
  4747. --- linux-3.10.0-693.11.1.el7/mm/fremap.c 2017-10-27 11:14:15.000000000 +0200
  4748. +++ linux-3.10.0-693.11.6.el7/mm/fremap.c 2017-12-28 19:59:43.000000000 +0100
  4749. @@ -28,24 +28,35 @@ static void zap_pte(struct mm_struct *mm
  4750. unsigned long addr, pte_t *ptep)
  4751. {
  4752. pte_t pte = *ptep;
  4753. + struct page *page;
  4754. + swp_entry_t entry;
  4755.  
  4756. if (pte_present(pte)) {
  4757. - struct page *page;
  4758. -
  4759. flush_cache_page(vma, addr, pte_pfn(pte));
  4760. pte = ptep_clear_flush_notify(vma, addr, ptep);
  4761. page = vm_normal_page(vma, addr, pte);
  4762. if (page) {
  4763. if (pte_dirty(pte))
  4764. set_page_dirty(page);
  4765. + update_hiwater_rss(mm);
  4766. + dec_mm_counter(mm, mm_counter(page));
  4767. page_remove_rmap(page);
  4768. page_cache_release(page);
  4769. + }
  4770. + } else { /* zap_pte() is not called when pte_none() */
  4771. + if (!pte_file(pte)) {
  4772. update_hiwater_rss(mm);
  4773. - dec_mm_counter(mm, mm_counter_file(page));
  4774. + entry = pte_to_swp_entry(pte);
  4775. + if (non_swap_entry(entry)) {
  4776. + if (is_migration_entry(entry)) {
  4777. + page = migration_entry_to_page(entry);
  4778. + dec_mm_counter(mm, mm_counter(page));
  4779. + }
  4780. + } else {
  4781. + free_swap_and_cache(entry);
  4782. + dec_mm_counter(mm, MM_SWAPENTS);
  4783. + }
  4784. }
  4785. - } else {
  4786. - if (!pte_file(pte))
  4787. - free_swap_and_cache(pte_to_swp_entry(pte));
  4788. pte_clear_not_present_full(mm, addr, ptep, 0);
  4789. }
  4790. }
  4791. diff -upr linux-3.10.0-693.11.1.el7/mm/hugetlb.c linux-3.10.0-693.11.6.el7/mm/hugetlb.c
  4792. --- linux-3.10.0-693.11.1.el7/mm/hugetlb.c 2017-10-27 11:14:15.000000000 +0200
  4793. +++ linux-3.10.0-693.11.6.el7/mm/hugetlb.c 2017-12-28 19:59:43.000000000 +0100
  4794. @@ -4047,6 +4047,9 @@ int hugetlb_mcopy_atomic_pte(struct mm_s
  4795. unsigned long src_addr,
  4796. struct page **pagep)
  4797. {
  4798. + struct address_space *mapping;
  4799. + pgoff_t idx;
  4800. + unsigned long size;
  4801. int vm_shared = dst_vma->vm_flags & VM_SHARED;
  4802. struct hstate *h = hstate_vma(dst_vma);
  4803. pte_t _dst_pte;
  4804. @@ -4084,13 +4087,24 @@ int hugetlb_mcopy_atomic_pte(struct mm_s
  4805. __SetPageUptodate(page);
  4806. set_page_huge_active(page);
  4807.  
  4808. + mapping = dst_vma->vm_file->f_mapping;
  4809. + idx = vma_hugecache_offset(h, dst_vma, dst_addr);
  4810. +
  4811. /*
  4812. * If shared, add to page cache
  4813. */
  4814. if (vm_shared) {
  4815. - struct address_space *mapping = dst_vma->vm_file->f_mapping;
  4816. - pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
  4817. + size = i_size_read(mapping->host) >> huge_page_shift(h);
  4818. + ret = -EFAULT;
  4819. + if (idx >= size)
  4820. + goto out_release_nounlock;
  4821.  
  4822. + /*
  4823. + * Serialization between remove_inode_hugepages() and
  4824. + * huge_add_to_page_cache() below happens through the
  4825. + * hugetlb_fault_mutex_table that here must be hold by
  4826. + * the caller.
  4827. + */
  4828. ret = huge_add_to_page_cache(page, mapping, idx);
  4829. if (ret)
  4830. goto out_release_nounlock;
  4831. @@ -4099,6 +4113,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_s
  4832. ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
  4833. spin_lock(ptl);
  4834.  
  4835. + /*
  4836. + * Recheck the i_size after holding PT lock to make sure not
  4837. + * to leave any page mapped (as page_mapped()) beyond the end
  4838. + * of the i_size (remove_inode_hugepages() is strict about
  4839. + * enforcing that). If we bail out here, we'll also leave a
  4840. + * page in the radix tree in the vm_shared case beyond the end
  4841. + * of the i_size, but remove_inode_hugepages() will take care
  4842. + * of it as soon as we drop the hugetlb_fault_mutex_table.
  4843. + */
  4844. + size = i_size_read(mapping->host) >> huge_page_shift(h);
  4845. + ret = -EFAULT;
  4846. + if (idx >= size)
  4847. + goto out_release_unlock;
  4848. +
  4849. ret = -EEXIST;
  4850. if (!huge_pte_none(huge_ptep_get(dst_pte)))
  4851. goto out_release_unlock;
  4852. @@ -4131,9 +4159,9 @@ out:
  4853. return ret;
  4854. out_release_unlock:
  4855. spin_unlock(ptl);
  4856. -out_release_nounlock:
  4857. if (vm_shared)
  4858. unlock_page(page);
  4859. +out_release_nounlock:
  4860. put_page(page);
  4861. goto out;
  4862. }
  4863. diff -upr linux-3.10.0-693.11.1.el7/mm/memory-failure.c linux-3.10.0-693.11.6.el7/mm/memory-failure.c
  4864. --- linux-3.10.0-693.11.1.el7/mm/memory-failure.c 2017-10-27 11:14:15.000000000 +0200
  4865. +++ linux-3.10.0-693.11.6.el7/mm/memory-failure.c 2017-12-28 19:59:43.000000000 +0100
  4866. @@ -1339,11 +1339,14 @@ void memory_failure_queue(unsigned long
  4867.  
  4868. mf_cpu = &get_cpu_var(memory_failure_cpu);
  4869. spin_lock_irqsave(&mf_cpu->lock, proc_flags);
  4870. - if (kfifo_put(&mf_cpu->fifo, &entry))
  4871. + if (kfifo_put(&mf_cpu->fifo, &entry)) {
  4872. + gmb();
  4873. schedule_work_on(smp_processor_id(), &mf_cpu->work);
  4874. - else
  4875. + } else {
  4876. + gmb();
  4877. pr_err("Memory failure: buffer overflow when queuing memory failure at 0x%#lx\n",
  4878. pfn);
  4879. + }
  4880. spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
  4881. put_cpu_var(memory_failure_cpu);
  4882. }
  4883. diff -upr linux-3.10.0-693.11.1.el7/net/6lowpan/nhc.c linux-3.10.0-693.11.6.el7/net/6lowpan/nhc.c
  4884. --- linux-3.10.0-693.11.1.el7/net/6lowpan/nhc.c 2017-10-27 11:14:15.000000000 +0200
  4885. +++ linux-3.10.0-693.11.6.el7/net/6lowpan/nhc.c 2017-12-28 19:59:43.000000000 +0100
  4886. @@ -33,10 +33,13 @@ static int lowpan_nhc_insert(struct lowp
  4887.  
  4888. len_dif = nhc->idlen - this->idlen;
  4889.  
  4890. - if (nhc->idlen < this->idlen)
  4891. + if (nhc->idlen < this->idlen) {
  4892. + gmb();
  4893. len = nhc->idlen;
  4894. - else
  4895. + } else {
  4896. + gmb();
  4897. len = this->idlen;
  4898. + }
  4899.  
  4900. result = memcmp(nhc->id, this->id, len);
  4901. if (!result)
  4902. diff -upr linux-3.10.0-693.11.1.el7/net/bluetooth/l2cap_core.c linux-3.10.0-693.11.6.el7/net/bluetooth/l2cap_core.c
  4903. --- linux-3.10.0-693.11.1.el7/net/bluetooth/l2cap_core.c 2017-10-27 11:14:15.000000000 +0200
  4904. +++ linux-3.10.0-693.11.6.el7/net/bluetooth/l2cap_core.c 2017-12-28 19:59:43.000000000 +0100
  4905. @@ -1509,6 +1509,7 @@ static void l2cap_le_conn_ready(struct l
  4906. hcon->le_conn_interval > hcon->le_conn_max_interval)) {
  4907. struct l2cap_conn_param_update_req req;
  4908.  
  4909. + gmb();
  4910. req.min = cpu_to_le16(hcon->le_conn_min_interval);
  4911. req.max = cpu_to_le16(hcon->le_conn_max_interval);
  4912. req.latency = cpu_to_le16(hcon->le_conn_latency);
  4913. diff -upr linux-3.10.0-693.11.1.el7/net/ceph/messenger.c linux-3.10.0-693.11.6.el7/net/ceph/messenger.c
  4914. --- linux-3.10.0-693.11.1.el7/net/ceph/messenger.c 2017-10-27 11:14:15.000000000 +0200
  4915. +++ linux-3.10.0-693.11.6.el7/net/ceph/messenger.c 2017-12-28 19:59:43.000000000 +0100
  4916. @@ -1248,6 +1248,7 @@ static void prepare_write_message(struct
  4917. /* Sneak an ack in there first? If we can get it into the same
  4918. * TCP packet that's a good thing. */
  4919. if (con->in_seq > con->in_seq_acked) {
  4920. + gmb();
  4921. con->in_seq_acked = con->in_seq;
  4922. con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
  4923. con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
  4924. diff -upr linux-3.10.0-693.11.1.el7/net/core/tso.c linux-3.10.0-693.11.6.el7/net/core/tso.c
  4925. --- linux-3.10.0-693.11.1.el7/net/core/tso.c 2017-10-27 11:14:15.000000000 +0200
  4926. +++ linux-3.10.0-693.11.6.el7/net/core/tso.c 2017-12-28 19:59:43.000000000 +0100
  4927. @@ -51,7 +51,10 @@ void tso_build_data(struct sk_buff *skb,
  4928.  
  4929. if ((tso->size == 0) &&
  4930. (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
  4931. - skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
  4932. + skb_frag_t *frag;
  4933. +
  4934. + gmb();
  4935. + frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
  4936.  
  4937. /* Move to next segment */
  4938. tso->size = frag->size;
  4939. @@ -75,8 +78,10 @@ void tso_start(struct sk_buff *skb, stru
  4940. tso->data = skb->data + hdr_len;
  4941. if ((tso->size == 0) &&
  4942. (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
  4943. - skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
  4944. + skb_frag_t *frag;
  4945.  
  4946. + gmb();
  4947. + frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
  4948. /* Move to next segment */
  4949. tso->size = frag->size;
  4950. tso->data = page_address(frag->page.p) + frag->page_offset;
  4951. diff -upr linux-3.10.0-693.11.1.el7/net/dccp/ccids/ccid2.c linux-3.10.0-693.11.6.el7/net/dccp/ccids/ccid2.c
  4952. --- linux-3.10.0-693.11.1.el7/net/dccp/ccids/ccid2.c 2017-10-27 11:14:15.000000000 +0200
  4953. +++ linux-3.10.0-693.11.6.el7/net/dccp/ccids/ccid2.c 2017-12-28 19:59:43.000000000 +0100
  4954. @@ -389,6 +389,7 @@ static void ccid2_rtt_estimator(struct s
  4955. hc->tx_mdev += m;
  4956.  
  4957. if (hc->tx_mdev > hc->tx_mdev_max) {
  4958. + gmb();
  4959. hc->tx_mdev_max = hc->tx_mdev;
  4960. if (hc->tx_mdev_max > hc->tx_rttvar)
  4961. hc->tx_rttvar = hc->tx_mdev_max;
  4962. diff -upr linux-3.10.0-693.11.1.el7/net/mac80211/util.c linux-3.10.0-693.11.6.el7/net/mac80211/util.c
  4963. --- linux-3.10.0-693.11.1.el7/net/mac80211/util.c 2017-10-27 11:14:15.000000000 +0200
  4964. +++ linux-3.10.0-693.11.6.el7/net/mac80211/util.c 2017-12-28 19:59:43.000000000 +0100
  4965. @@ -1605,11 +1605,14 @@ u32 ieee80211_sta_get_rates(struct ieee8
  4966. u8 rate = 0;
  4967. int own_rate;
  4968. bool is_basic;
  4969. - if (i < elems->supp_rates_len)
  4970. + if (i < elems->supp_rates_len) {
  4971. + gmb();
  4972. rate = elems->supp_rates[i];
  4973. - else if (elems->ext_supp_rates)
  4974. + } else if (elems->ext_supp_rates) {
  4975. + gmb();
  4976. rate = elems->ext_supp_rates
  4977. [i - elems->supp_rates_len];
  4978. + }
  4979. own_rate = 5 * (rate & 0x7f);
  4980. is_basic = !!(rate & 0x80);
  4981.  
  4982. diff -upr linux-3.10.0-693.11.1.el7/net/netfilter/ipvs/ip_vs_wrr.c linux-3.10.0-693.11.6.el7/net/netfilter/ipvs/ip_vs_wrr.c
  4983. --- linux-3.10.0-693.11.1.el7/net/netfilter/ipvs/ip_vs_wrr.c 2017-10-27 11:14:15.000000000 +0200
  4984. +++ linux-3.10.0-693.11.6.el7/net/netfilter/ipvs/ip_vs_wrr.c 2017-12-28 19:59:43.000000000 +0100
  4985. @@ -149,10 +149,13 @@ static int ip_vs_wrr_dest_changed(struct
  4986. mark->cl = list_entry(&svc->destinations, struct ip_vs_dest, n_list);
  4987. mark->di = ip_vs_wrr_gcd_weight(svc);
  4988. mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1);
  4989. - if (mark->cw > mark->mw || !mark->cw)
  4990. + if (mark->cw > mark->mw || !mark->cw) {
  4991. + gmb();
  4992. mark->cw = mark->mw;
  4993. - else if (mark->di > 1)
  4994. + } else if (mark->di > 1) {
  4995. + gmb();
  4996. mark->cw = (mark->cw / mark->di) * mark->di + 1;
  4997. + }
  4998. spin_unlock_bh(&svc->sched_lock);
  4999. return 0;
  5000. }
  5001. diff -upr linux-3.10.0-693.11.1.el7/net/netfilter/nf_conntrack_core.c linux-3.10.0-693.11.6.el7/net/netfilter/nf_conntrack_core.c
  5002. --- linux-3.10.0-693.11.1.el7/net/netfilter/nf_conntrack_core.c 2017-10-27 11:14:15.000000000 +0200
  5003. +++ linux-3.10.0-693.11.6.el7/net/netfilter/nf_conntrack_core.c 2017-12-28 19:59:43.000000000 +0100
  5004. @@ -1418,6 +1418,7 @@ get_next_corpse(struct net *net, int (*i
  5005. local_bh_disable();
  5006. spin_lock(lockp);
  5007. if (*bucket < net->ct.htable_size) {
  5008. + gmb();
  5009. hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
  5010. if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
  5011. continue;
  5012. diff -upr linux-3.10.0-693.11.1.el7/net/netfilter/nf_conntrack_helper.c linux-3.10.0-693.11.6.el7/net/netfilter/nf_conntrack_helper.c
  5013. --- linux-3.10.0-693.11.1.el7/net/netfilter/nf_conntrack_helper.c 2017-10-27 11:14:15.000000000 +0200
  5014. +++ linux-3.10.0-693.11.6.el7/net/netfilter/nf_conntrack_helper.c 2017-12-28 19:59:43.000000000 +0100
  5015. @@ -419,6 +419,7 @@ static void __nf_conntrack_helper_unregi
  5016. for (i = 0; i < net->ct.htable_size; i++) {
  5017. spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
  5018. if (i < net->ct.htable_size) {
  5019. + gmb();
  5020. hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
  5021. unhelp(h, me);
  5022. }
  5023. diff -upr linux-3.10.0-693.11.1.el7/net/netfilter/nf_conntrack_proto_tcp.c linux-3.10.0-693.11.6.el7/net/netfilter/nf_conntrack_proto_tcp.c
  5024. --- linux-3.10.0-693.11.1.el7/net/netfilter/nf_conntrack_proto_tcp.c 2017-10-27 11:14:15.000000000 +0200
  5025. +++ linux-3.10.0-693.11.6.el7/net/netfilter/nf_conntrack_proto_tcp.c 2017-12-28 19:59:43.000000000 +0100
  5026. @@ -1056,14 +1056,18 @@ static int tcp_packet(struct nf_conn *ct
  5027. ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
  5028.  
  5029. if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
  5030. - timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
  5031. + timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS]) {
  5032. + gmb();
  5033. timeout = timeouts[TCP_CONNTRACK_RETRANS];
  5034. - else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
  5035. + } else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
  5036. IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
  5037. - timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
  5038. + timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK]) {
  5039. + gmb();
  5040. timeout = timeouts[TCP_CONNTRACK_UNACK];
  5041. - else
  5042. + } else {
  5043. + gmb();
  5044. timeout = timeouts[new_state];
  5045. + }
  5046. spin_unlock_bh(&ct->lock);
  5047.  
  5048. if (new_state != old_state)
  5049. diff -upr linux-3.10.0-693.11.1.el7/net/netfilter/nfnetlink_cttimeout.c linux-3.10.0-693.11.6.el7/net/netfilter/nfnetlink_cttimeout.c
  5050. --- linux-3.10.0-693.11.1.el7/net/netfilter/nfnetlink_cttimeout.c 2017-10-27 11:14:15.000000000 +0200
  5051. +++ linux-3.10.0-693.11.6.el7/net/netfilter/nfnetlink_cttimeout.c 2017-12-28 19:59:43.000000000 +0100
  5052. @@ -311,6 +311,7 @@ static void ctnl_untimeout(struct net *n
  5053. for (i = 0; i < net->ct.htable_size; i++) {
  5054. spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
  5055. if (i < net->ct.htable_size) {
  5056. + gmb();
  5057. hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
  5058. untimeout(h, timeout);
  5059. }
  5060. diff -upr linux-3.10.0-693.11.1.el7/net/netlink/af_netlink.c linux-3.10.0-693.11.6.el7/net/netlink/af_netlink.c
  5061. --- linux-3.10.0-693.11.1.el7/net/netlink/af_netlink.c 2017-10-27 11:14:15.000000000 +0200
  5062. +++ linux-3.10.0-693.11.6.el7/net/netlink/af_netlink.c 2017-12-28 19:59:43.000000000 +0100
  5063. @@ -628,8 +628,10 @@ static bool netlink_dump_space(struct ne
  5064. return false;
  5065.  
  5066. n = ring->head + ring->frame_max / 2;
  5067. - if (n > ring->frame_max)
  5068. + if (n > ring->frame_max) {
  5069. + gmb();
  5070. n -= ring->frame_max;
  5071. + }
  5072.  
  5073. hdr = __netlink_lookup_frame(ring, n);
  5074.  
  5075. diff -upr linux-3.10.0-693.11.1.el7/net/sched/sch_fq_codel.c linux-3.10.0-693.11.6.el7/net/sched/sch_fq_codel.c
  5076. --- linux-3.10.0-693.11.1.el7/net/sched/sch_fq_codel.c 2017-10-27 11:14:15.000000000 +0200
  5077. +++ linux-3.10.0-693.11.6.el7/net/sched/sch_fq_codel.c 2017-12-28 19:59:43.000000000 +0100
  5078. @@ -640,9 +640,12 @@ static int fq_codel_dump_class_stats(str
  5079. struct tc_fq_codel_xstats xstats;
  5080.  
  5081. if (idx < q->flows_cnt) {
  5082. - const struct fq_codel_flow *flow = &q->flows[idx];
  5083. - const struct sk_buff *skb = flow->head;
  5084. + const struct fq_codel_flow *flow;
  5085. + const struct sk_buff *skb;
  5086.  
  5087. + gmb();
  5088. + flow = &q->flows[idx];
  5089. + skb = flow->head;
  5090. memset(&xstats, 0, sizeof(xstats));
  5091. xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
  5092. xstats.class_stats.deficit = flow->deficit;
  5093. diff -upr linux-3.10.0-693.11.1.el7/net/sunrpc/cache.c linux-3.10.0-693.11.6.el7/net/sunrpc/cache.c
  5094. --- linux-3.10.0-693.11.1.el7/net/sunrpc/cache.c 2017-10-27 11:14:15.000000000 +0200
  5095. +++ linux-3.10.0-693.11.6.el7/net/sunrpc/cache.c 2017-12-28 19:59:43.000000000 +0100
  5096. @@ -433,6 +433,7 @@ static int cache_clean(void)
  5097. struct cache_head *ch, **cp;
  5098. struct cache_detail *d;
  5099.  
  5100. + gmb();
  5101. write_lock(&current_detail->hash_lock);
  5102.  
  5103. /* Ok, now to clean this strand */
  5104. diff -upr linux-3.10.0-693.11.1.el7/net/sunrpc/xdr.c linux-3.10.0-693.11.6.el7/net/sunrpc/xdr.c
  5105. --- linux-3.10.0-693.11.1.el7/net/sunrpc/xdr.c 2017-10-27 11:14:15.000000000 +0200
  5106. +++ linux-3.10.0-693.11.6.el7/net/sunrpc/xdr.c 2017-12-28 19:59:43.000000000 +0100
  5107. @@ -916,6 +916,7 @@ static unsigned int xdr_align_pages(stru
  5108. }
  5109.  
  5110. if (nwords > xdr->nwords) {
  5111. + gmb();
  5112. nwords = xdr->nwords;
  5113. len = nwords << 2;
  5114. }
  5115. diff -upr linux-3.10.0-693.11.1.el7/net/vmw_vsock/vmci_transport_notify.c linux-3.10.0-693.11.6.el7/net/vmw_vsock/vmci_transport_notify.c
  5116. --- linux-3.10.0-693.11.1.el7/net/vmw_vsock/vmci_transport_notify.c 2017-10-27 11:14:15.000000000 +0200
  5117. +++ linux-3.10.0-693.11.6.el7/net/vmw_vsock/vmci_transport_notify.c 2017-12-28 19:59:43.000000000 +0100
  5118. @@ -413,6 +413,7 @@ vmci_transport_notify_pkt_recv_init(
  5119. PKT_FIELD(vsk, write_notify_min_window) = target + 1;
  5120. if (PKT_FIELD(vsk, write_notify_window) <
  5121. PKT_FIELD(vsk, write_notify_min_window)) {
  5122. + gmb();
  5123. /* If the current window is smaller than the new
  5124. * minimal window size, we need to reevaluate whether
  5125. * we need to notify the sender. If the number of ready
  5126. diff -upr linux-3.10.0-693.11.1.el7/net/vmw_vsock/vmci_transport_notify_qstate.c linux-3.10.0-693.11.6.el7/net/vmw_vsock/vmci_transport_notify_qstate.c
  5127. --- linux-3.10.0-693.11.1.el7/net/vmw_vsock/vmci_transport_notify_qstate.c 2017-10-27 11:14:15.000000000 +0200
  5128. +++ linux-3.10.0-693.11.6.el7/net/vmw_vsock/vmci_transport_notify_qstate.c 2017-12-28 19:59:43.000000000 +0100
  5129. @@ -221,6 +221,7 @@ vmci_transport_notify_pkt_recv_init(
  5130. PKT_FIELD(vsk, write_notify_min_window) = target + 1;
  5131. if (PKT_FIELD(vsk, write_notify_window) <
  5132. PKT_FIELD(vsk, write_notify_min_window)) {
  5133. + gmb();
  5134. /* If the current window is smaller than the new
  5135. * minimal window size, we need to reevaluate whether
  5136. * we need to notify the sender. If the number of ready
  5137. diff -upr linux-3.10.0-693.11.1.el7/security/Kconfig linux-3.10.0-693.11.6.el7/security/Kconfig
  5138. --- linux-3.10.0-693.11.1.el7/security/Kconfig 2017-10-27 11:14:15.000000000 +0200
  5139. +++ linux-3.10.0-693.11.6.el7/security/Kconfig 2017-12-28 19:59:43.000000000 +0100
  5140. @@ -48,6 +48,16 @@ config SECURITY_NETWORK
  5141. implement socket and networking access controls.
  5142. If you are unsure how to answer this question, answer N.
  5143.  
  5144. +config KAISER
  5145. + bool "Remove the kernel mapping in user mode"
  5146. + depends on X86_64 && SMP && STOP_MACHINE
  5147. + help
  5148. + This feature reduces the number of hardware side channels by
  5149. + ensuring that the majority of kernel addresses are not mapped
  5150. + into userspace.
  5151. +
  5152. + See Documentation/x86/kaiser.txt for more details.
  5153. +
  5154. config SECURITY_NETWORK_XFRM
  5155. bool "XFRM (IPSec) Networking Security Hooks"
  5156. depends on XFRM && SECURITY_NETWORK
  5157. diff -upr linux-3.10.0-693.11.1.el7/sound/core/pcm_lib.c linux-3.10.0-693.11.6.el7/sound/core/pcm_lib.c
  5158. --- linux-3.10.0-693.11.1.el7/sound/core/pcm_lib.c 2017-10-27 11:14:15.000000000 +0200
  5159. +++ linux-3.10.0-693.11.6.el7/sound/core/pcm_lib.c 2017-12-28 19:59:43.000000000 +0100
  5160. @@ -606,18 +606,22 @@ int snd_interval_refine(struct snd_inter
  5161. if (snd_BUG_ON(snd_interval_empty(i)))
  5162. return -EINVAL;
  5163. if (i->min < v->min) {
  5164. + gmb();
  5165. i->min = v->min;
  5166. i->openmin = v->openmin;
  5167. changed = 1;
  5168. } else if (i->min == v->min && !i->openmin && v->openmin) {
  5169. + gmb();
  5170. i->openmin = 1;
  5171. changed = 1;
  5172. }
  5173. if (i->max > v->max) {
  5174. + gmb();
  5175. i->max = v->max;
  5176. i->openmax = v->openmax;
  5177. changed = 1;
  5178. } else if (i->max == v->max && !i->openmax && v->openmax) {
  5179. + gmb();
  5180. i->openmax = 1;
  5181. changed = 1;
  5182. }
  5183. @@ -828,8 +832,10 @@ int snd_interval_ratnum(struct snd_inter
  5184. else {
  5185. unsigned int r;
  5186. r = (den - rats[k].den_min) % rats[k].den_step;
  5187. - if (r != 0)
  5188. + if (r != 0) {
  5189. + gmb();
  5190. den -= r;
  5191. + }
  5192. }
  5193. diff = num - q * den;
  5194. if (diff < 0)
  5195. @@ -869,8 +875,10 @@ int snd_interval_ratnum(struct snd_inter
  5196. else {
  5197. unsigned int r;
  5198. r = (den - rats[k].den_min) % rats[k].den_step;
  5199. - if (r != 0)
  5200. + if (r != 0) {
  5201. + gmb();
  5202. den += rats[k].den_step - r;
  5203. + }
  5204. }
  5205. diff = q * den - num;
  5206. if (diff < 0)
  5207. @@ -943,8 +951,10 @@ static int snd_interval_ratden(struct sn
  5208. else {
  5209. unsigned int r;
  5210. r = (num - rats[k].num_min) % rats[k].num_step;
  5211. - if (r != 0)
  5212. + if (r != 0) {
  5213. + gmb();
  5214. num += rats[k].num_step - r;
  5215. + }
  5216. }
  5217. diff = num - q * den;
  5218. if (best_num == 0 ||
  5219. @@ -975,8 +985,10 @@ static int snd_interval_ratden(struct sn
  5220. else {
  5221. unsigned int r;
  5222. r = (num - rats[k].num_min) % rats[k].num_step;
  5223. - if (r != 0)
  5224. + if (r != 0) {
  5225. + gmb();
  5226. num -= r;
  5227. + }
  5228. }
  5229. diff = q * den - num;
  5230. if (best_num == 0 ||
  5231. @@ -1084,12 +1096,14 @@ int snd_interval_ranges(struct snd_inter
  5232. continue;
  5233.  
  5234. if (range.min < range_union.min) {
  5235. + gmb();
  5236. range_union.min = range.min;
  5237. range_union.openmin = 1;
  5238. }
  5239. if (range.min == range_union.min && !range.openmin)
  5240. range_union.openmin = 0;
  5241. if (range.max > range_union.max) {
  5242. + gmb();
  5243. range_union.max = range.max;
  5244. range_union.openmax = 1;
  5245. }
  5246. diff -upr linux-3.10.0-693.11.1.el7/sound/pci/emu10k1/emufx.c linux-3.10.0-693.11.6.el7/sound/pci/emu10k1/emufx.c
  5247. --- linux-3.10.0-693.11.1.el7/sound/pci/emu10k1/emufx.c 2017-10-27 11:14:15.000000000 +0200
  5248. +++ linux-3.10.0-693.11.6.el7/sound/pci/emu10k1/emufx.c 2017-12-28 19:59:43.000000000 +0100
  5249. @@ -911,6 +911,7 @@ static int snd_emu10k1_list_controls(str
  5250. total++;
  5251. if (icode->gpr_list_controls &&
  5252. i < icode->gpr_list_control_count) {
  5253. + gmb();
  5254. memset(gctl, 0, sizeof(*gctl));
  5255. id = &ctl->kcontrol->id;
  5256. gctl->id.iface = id->iface;
  5257. diff -upr linux-3.10.0-693.11.1.el7/sound/pci/es1968.c linux-3.10.0-693.11.6.el7/sound/pci/es1968.c
  5258. --- linux-3.10.0-693.11.1.el7/sound/pci/es1968.c 2017-10-27 11:14:15.000000000 +0200
  5259. +++ linux-3.10.0-693.11.6.el7/sound/pci/es1968.c 2017-12-28 19:59:43.000000000 +0100
  5260. @@ -1879,6 +1879,7 @@ static void snd_es1968_update_pcm(struct
  5261. es->count += diff;
  5262.  
  5263. if (es->count > es->frag_size) {
  5264. + gmb();
  5265. spin_unlock(&chip->substream_lock);
  5266. snd_pcm_period_elapsed(subs);
  5267. spin_lock(&chip->substream_lock);
  5268. diff -upr linux-3.10.0-693.11.1.el7/tools/objtool/builtin-check.c linux-3.10.0-693.11.6.el7/tools/objtool/builtin-check.c
  5269. --- linux-3.10.0-693.11.1.el7/tools/objtool/builtin-check.c 2017-10-27 11:14:15.000000000 +0200
  5270. +++ linux-3.10.0-693.11.6.el7/tools/objtool/builtin-check.c 2017-12-28 19:59:43.000000000 +0100
  5271. @@ -404,7 +404,7 @@ static int add_call_destinations(struct
  5272. dest_off = insn->offset + insn->len + insn->immediate;
  5273. insn->call_dest = find_symbol_by_offset(insn->sec,
  5274. dest_off);
  5275. - if (!insn->call_dest) {
  5276. + if (!insn->call_dest && !insn->visited) {
  5277. WARN_FUNC("can't find call dest symbol at offset 0x%lx",
  5278. insn->sec, insn->offset, dest_off);
  5279. return -1;
Add Comment
Please, Sign In to add comment