Magellan Linux

Contents of /trunk/kernel-alx/patches-4.14/0115-4.14.16-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (show annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 6 months ago) by niro
File size: 101879 byte(s)
-added up to patches-4.14.79
1 diff --git a/Makefile b/Makefile
2 index bf1a277a67a4..90a4bffa8446 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 14
9 -SUBLEVEL = 15
10 +SUBLEVEL = 16
11 EXTRAVERSION =
12 NAME = Petit Gorille
13
14 diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
15 index c199990e12b6..323a4df59a6c 100644
16 --- a/arch/arm/net/bpf_jit_32.c
17 +++ b/arch/arm/net/bpf_jit_32.c
18 @@ -27,14 +27,58 @@
19
20 int bpf_jit_enable __read_mostly;
21
22 +/*
23 + * eBPF prog stack layout:
24 + *
25 + * high
26 + * original ARM_SP => +-----+
27 + * | | callee saved registers
28 + * +-----+ <= (BPF_FP + SCRATCH_SIZE)
29 + * | ... | eBPF JIT scratch space
30 + * eBPF fp register => +-----+
31 + * (BPF_FP) | ... | eBPF prog stack
32 + * +-----+
33 + * |RSVD | JIT scratchpad
34 + * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
35 + * | |
36 + * | ... | Function call stack
37 + * | |
38 + * +-----+
39 + * low
40 + *
41 + * The callee saved registers depends on whether frame pointers are enabled.
42 + * With frame pointers (to be compliant with the ABI):
43 + *
44 + * high
45 + * original ARM_SP => +------------------+ \
46 + * | pc | |
47 + * current ARM_FP => +------------------+ } callee saved registers
48 + * |r4-r8,r10,fp,ip,lr| |
49 + * +------------------+ /
50 + * low
51 + *
52 + * Without frame pointers:
53 + *
54 + * high
55 + * original ARM_SP => +------------------+
56 + * | r4-r8,r10,fp,lr | callee saved registers
57 + * current ARM_FP => +------------------+
58 + * low
59 + *
60 + * When popping registers off the stack at the end of a BPF function, we
61 + * reference them via the current ARM_FP register.
62 + */
63 +#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
64 + 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \
65 + 1 << ARM_FP)
66 +#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
67 +#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
68 +
69 #define STACK_OFFSET(k) (k)
70 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */
71 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */
72 #define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */
73
74 -/* Flags used for JIT optimization */
75 -#define SEEN_CALL (1 << 0)
76 -
77 #define FLAG_IMM_OVERFLOW (1 << 0)
78
79 /*
80 @@ -95,7 +139,6 @@ static const u8 bpf2a32[][2] = {
81 * idx : index of current last JITed instruction.
82 * prologue_bytes : bytes used in prologue.
83 * epilogue_offset : offset of epilogue starting.
84 - * seen : bit mask used for JIT optimization.
85 * offsets : array of eBPF instruction offsets in
86 * JITed code.
87 * target : final JITed code.
88 @@ -110,7 +153,6 @@ struct jit_ctx {
89 unsigned int idx;
90 unsigned int prologue_bytes;
91 unsigned int epilogue_offset;
92 - u32 seen;
93 u32 flags;
94 u32 *offsets;
95 u32 *target;
96 @@ -179,8 +221,13 @@ static void jit_fill_hole(void *area, unsigned int size)
97 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
98 }
99
100 -/* Stack must be multiples of 16 Bytes */
101 -#define STACK_ALIGN(sz) (((sz) + 3) & ~3)
102 +#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
103 +/* EABI requires the stack to be aligned to 64-bit boundaries */
104 +#define STACK_ALIGNMENT 8
105 +#else
106 +/* Stack must be aligned to 32-bit boundaries */
107 +#define STACK_ALIGNMENT 4
108 +#endif
109
110 /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
111 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
112 @@ -194,7 +241,7 @@ static void jit_fill_hole(void *area, unsigned int size)
113 + SCRATCH_SIZE + \
114 + 4 /* extra for skb_copy_bits buffer */)
115
116 -#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
117 +#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
118
119 /* Get the offset of eBPF REGISTERs stored on scratch space. */
120 #define STACK_VAR(off) (STACK_SIZE-off-4)
121 @@ -285,16 +332,19 @@ static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
122 emit_mov_i_no8m(rd, val, ctx);
123 }
124
125 -static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
126 +static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
127 {
128 - ctx->seen |= SEEN_CALL;
129 -#if __LINUX_ARM_ARCH__ < 5
130 - emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
131 -
132 if (elf_hwcap & HWCAP_THUMB)
133 emit(ARM_BX(tgt_reg), ctx);
134 else
135 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
136 +}
137 +
138 +static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
139 +{
140 +#if __LINUX_ARM_ARCH__ < 5
141 + emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
142 + emit_bx_r(tgt_reg, ctx);
143 #else
144 emit(ARM_BLX_R(tgt_reg), ctx);
145 #endif
146 @@ -354,7 +404,6 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
147 }
148
149 /* Call appropriate function */
150 - ctx->seen |= SEEN_CALL;
151 emit_mov_i(ARM_IP, op == BPF_DIV ?
152 (u32)jit_udiv32 : (u32)jit_mod32, ctx);
153 emit_blx_r(ARM_IP, ctx);
154 @@ -620,8 +669,6 @@ static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk,
155 /* Do LSH operation */
156 emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
157 emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
158 - /* As we are using ARM_LR */
159 - ctx->seen |= SEEN_CALL;
160 emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx);
161 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx);
162 emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx);
163 @@ -656,8 +703,6 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
164 /* Do the ARSH operation */
165 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
166 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
167 - /* As we are using ARM_LR */
168 - ctx->seen |= SEEN_CALL;
169 emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
170 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
171 _emit(ARM_COND_MI, ARM_B(0), ctx);
172 @@ -692,8 +737,6 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
173 /* Do LSH operation */
174 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
175 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
176 - /* As we are using ARM_LR */
177 - ctx->seen |= SEEN_CALL;
178 emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
179 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
180 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx);
181 @@ -828,8 +871,6 @@ static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
182 /* Do Multiplication */
183 emit(ARM_MUL(ARM_IP, rd, rn), ctx);
184 emit(ARM_MUL(ARM_LR, rm, rt), ctx);
185 - /* As we are using ARM_LR */
186 - ctx->seen |= SEEN_CALL;
187 emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
188
189 emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx);
190 @@ -872,33 +913,53 @@ static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
191 }
192
193 /* dst = *(size*)(src + off) */
194 -static inline void emit_ldx_r(const u8 dst, const u8 src, bool dstk,
195 - const s32 off, struct jit_ctx *ctx, const u8 sz){
196 +static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
197 + s32 off, struct jit_ctx *ctx, const u8 sz){
198 const u8 *tmp = bpf2a32[TMP_REG_1];
199 - u8 rd = dstk ? tmp[1] : dst;
200 + const u8 *rd = dstk ? tmp : dst;
201 u8 rm = src;
202 + s32 off_max;
203
204 - if (off) {
205 + if (sz == BPF_H)
206 + off_max = 0xff;
207 + else
208 + off_max = 0xfff;
209 +
210 + if (off < 0 || off > off_max) {
211 emit_a32_mov_i(tmp[0], off, false, ctx);
212 emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
213 rm = tmp[0];
214 + off = 0;
215 + } else if (rd[1] == rm) {
216 + emit(ARM_MOV_R(tmp[0], rm), ctx);
217 + rm = tmp[0];
218 }
219 switch (sz) {
220 - case BPF_W:
221 - /* Load a Word */
222 - emit(ARM_LDR_I(rd, rm, 0), ctx);
223 + case BPF_B:
224 + /* Load a Byte */
225 + emit(ARM_LDRB_I(rd[1], rm, off), ctx);
226 + emit_a32_mov_i(dst[0], 0, dstk, ctx);
227 break;
228 case BPF_H:
229 /* Load a HalfWord */
230 - emit(ARM_LDRH_I(rd, rm, 0), ctx);
231 + emit(ARM_LDRH_I(rd[1], rm, off), ctx);
232 + emit_a32_mov_i(dst[0], 0, dstk, ctx);
233 break;
234 - case BPF_B:
235 - /* Load a Byte */
236 - emit(ARM_LDRB_I(rd, rm, 0), ctx);
237 + case BPF_W:
238 + /* Load a Word */
239 + emit(ARM_LDR_I(rd[1], rm, off), ctx);
240 + emit_a32_mov_i(dst[0], 0, dstk, ctx);
241 + break;
242 + case BPF_DW:
243 + /* Load a Double Word */
244 + emit(ARM_LDR_I(rd[1], rm, off), ctx);
245 + emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
246 break;
247 }
248 if (dstk)
249 - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
250 + emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx);
251 + if (dstk && sz == BPF_DW)
252 + emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx);
253 }
254
255 /* Arithmatic Operation */
256 @@ -906,7 +967,6 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
257 const u8 rn, struct jit_ctx *ctx, u8 op) {
258 switch (op) {
259 case BPF_JSET:
260 - ctx->seen |= SEEN_CALL;
261 emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
262 emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
263 emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
264 @@ -945,7 +1005,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
265 const u8 *tcc = bpf2a32[TCALL_CNT];
266 const int idx0 = ctx->idx;
267 #define cur_offset (ctx->idx - idx0)
268 -#define jmp_offset (out_offset - (cur_offset))
269 +#define jmp_offset (out_offset - (cur_offset) - 2)
270 u32 off, lo, hi;
271
272 /* if (index >= array->map.max_entries)
273 @@ -956,7 +1016,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
274 emit_a32_mov_i(tmp[1], off, false, ctx);
275 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
276 emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx);
277 - /* index (64 bit) */
278 + /* index is 32-bit for arrays */
279 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
280 /* index >= array->map.max_entries */
281 emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx);
282 @@ -997,7 +1057,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
283 emit_a32_mov_i(tmp2[1], off, false, ctx);
284 emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx);
285 emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
286 - emit(ARM_BX(tmp[1]), ctx);
287 + emit_bx_r(tmp[1], ctx);
288
289 /* out: */
290 if (out_offset == -1)
291 @@ -1070,54 +1130,22 @@ static void build_prologue(struct jit_ctx *ctx)
292 const u8 r2 = bpf2a32[BPF_REG_1][1];
293 const u8 r3 = bpf2a32[BPF_REG_1][0];
294 const u8 r4 = bpf2a32[BPF_REG_6][1];
295 - const u8 r5 = bpf2a32[BPF_REG_6][0];
296 - const u8 r6 = bpf2a32[TMP_REG_1][1];
297 - const u8 r7 = bpf2a32[TMP_REG_1][0];
298 - const u8 r8 = bpf2a32[TMP_REG_2][1];
299 - const u8 r10 = bpf2a32[TMP_REG_2][0];
300 const u8 fplo = bpf2a32[BPF_REG_FP][1];
301 const u8 fphi = bpf2a32[BPF_REG_FP][0];
302 - const u8 sp = ARM_SP;
303 const u8 *tcc = bpf2a32[TCALL_CNT];
304
305 - u16 reg_set = 0;
306 -
307 - /*
308 - * eBPF prog stack layout
309 - *
310 - * high
311 - * original ARM_SP => +-----+ eBPF prologue
312 - * |FP/LR|
313 - * current ARM_FP => +-----+
314 - * | ... | callee saved registers
315 - * eBPF fp register => +-----+ <= (BPF_FP)
316 - * | ... | eBPF JIT scratch space
317 - * | | eBPF prog stack
318 - * +-----+
319 - * |RSVD | JIT scratchpad
320 - * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
321 - * | |
322 - * | ... | Function call stack
323 - * | |
324 - * +-----+
325 - * low
326 - */
327 -
328 /* Save callee saved registers. */
329 - reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
330 #ifdef CONFIG_FRAME_POINTER
331 - reg_set |= (1<<ARM_FP) | (1<<ARM_IP) | (1<<ARM_LR) | (1<<ARM_PC);
332 - emit(ARM_MOV_R(ARM_IP, sp), ctx);
333 + u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
334 + emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
335 emit(ARM_PUSH(reg_set), ctx);
336 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
337 #else
338 - /* Check if call instruction exists in BPF body */
339 - if (ctx->seen & SEEN_CALL)
340 - reg_set |= (1<<ARM_LR);
341 - emit(ARM_PUSH(reg_set), ctx);
342 + emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
343 + emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
344 #endif
345 /* Save frame pointer for later */
346 - emit(ARM_SUB_I(ARM_IP, sp, SCRATCH_SIZE), ctx);
347 + emit(ARM_SUB_I(ARM_IP, ARM_SP, SCRATCH_SIZE), ctx);
348
349 ctx->stack_size = imm8m(STACK_SIZE);
350
351 @@ -1140,33 +1168,19 @@ static void build_prologue(struct jit_ctx *ctx)
352 /* end of prologue */
353 }
354
355 +/* restore callee saved registers. */
356 static void build_epilogue(struct jit_ctx *ctx)
357 {
358 - const u8 r4 = bpf2a32[BPF_REG_6][1];
359 - const u8 r5 = bpf2a32[BPF_REG_6][0];
360 - const u8 r6 = bpf2a32[TMP_REG_1][1];
361 - const u8 r7 = bpf2a32[TMP_REG_1][0];
362 - const u8 r8 = bpf2a32[TMP_REG_2][1];
363 - const u8 r10 = bpf2a32[TMP_REG_2][0];
364 - u16 reg_set = 0;
365 -
366 - /* unwind function call stack */
367 - emit(ARM_ADD_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
368 -
369 - /* restore callee saved registers. */
370 - reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
371 #ifdef CONFIG_FRAME_POINTER
372 - /* the first instruction of the prologue was: mov ip, sp */
373 - reg_set |= (1<<ARM_FP) | (1<<ARM_SP) | (1<<ARM_PC);
374 + /* When using frame pointers, some additional registers need to
375 + * be loaded. */
376 + u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
377 + emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
378 emit(ARM_LDM(ARM_SP, reg_set), ctx);
379 #else
380 - if (ctx->seen & SEEN_CALL)
381 - reg_set |= (1<<ARM_PC);
382 /* Restore callee saved registers. */
383 - emit(ARM_POP(reg_set), ctx);
384 - /* Return back to the callee function */
385 - if (!(ctx->seen & SEEN_CALL))
386 - emit(ARM_BX(ARM_LR), ctx);
387 + emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
388 + emit(ARM_POP(CALLEE_POP_MASK), ctx);
389 #endif
390 }
391
392 @@ -1394,8 +1408,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
393 emit_rev32(rt, rt, ctx);
394 goto emit_bswap_uxt;
395 case 64:
396 - /* Because of the usage of ARM_LR */
397 - ctx->seen |= SEEN_CALL;
398 emit_rev32(ARM_LR, rt, ctx);
399 emit_rev32(rt, rd, ctx);
400 emit(ARM_MOV_R(rd, ARM_LR), ctx);
401 @@ -1448,22 +1460,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
402 rn = sstk ? tmp2[1] : src_lo;
403 if (sstk)
404 emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
405 - switch (BPF_SIZE(code)) {
406 - case BPF_W:
407 - /* Load a Word */
408 - case BPF_H:
409 - /* Load a Half-Word */
410 - case BPF_B:
411 - /* Load a Byte */
412 - emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_SIZE(code));
413 - emit_a32_mov_i(dst_hi, 0, dstk, ctx);
414 - break;
415 - case BPF_DW:
416 - /* Load a double word */
417 - emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_W);
418 - emit_ldx_r(dst_hi, rn, dstk, off+4, ctx, BPF_W);
419 - break;
420 - }
421 + emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code));
422 break;
423 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
424 case BPF_LD | BPF_ABS | BPF_W:
425 diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
426 index ba38d403abb2..bb32f7f6dd0f 100644
427 --- a/arch/arm64/net/bpf_jit_comp.c
428 +++ b/arch/arm64/net/bpf_jit_comp.c
429 @@ -148,7 +148,8 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
430 /* Stack must be multiples of 16B */
431 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
432
433 -#define PROLOGUE_OFFSET 8
434 +/* Tail call offset to jump into */
435 +#define PROLOGUE_OFFSET 7
436
437 static int build_prologue(struct jit_ctx *ctx)
438 {
439 @@ -200,19 +201,19 @@ static int build_prologue(struct jit_ctx *ctx)
440 /* Initialize tail_call_cnt */
441 emit(A64_MOVZ(1, tcc, 0, 0), ctx);
442
443 - /* 4 byte extra for skb_copy_bits buffer */
444 - ctx->stack_size = prog->aux->stack_depth + 4;
445 - ctx->stack_size = STACK_ALIGN(ctx->stack_size);
446 -
447 - /* Set up function call stack */
448 - emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
449 -
450 cur_offset = ctx->idx - idx0;
451 if (cur_offset != PROLOGUE_OFFSET) {
452 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
453 cur_offset, PROLOGUE_OFFSET);
454 return -1;
455 }
456 +
457 + /* 4 byte extra for skb_copy_bits buffer */
458 + ctx->stack_size = prog->aux->stack_depth + 4;
459 + ctx->stack_size = STACK_ALIGN(ctx->stack_size);
460 +
461 + /* Set up function call stack */
462 + emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
463 return 0;
464 }
465
466 @@ -260,11 +261,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
467 emit(A64_LDR64(prg, tmp, prg), ctx);
468 emit(A64_CBZ(1, prg, jmp_offset), ctx);
469
470 - /* goto *(prog->bpf_func + prologue_size); */
471 + /* goto *(prog->bpf_func + prologue_offset); */
472 off = offsetof(struct bpf_prog, bpf_func);
473 emit_a64_mov_i64(tmp, off, ctx);
474 emit(A64_LDR64(tmp, prg, tmp), ctx);
475 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
476 + emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
477 emit(A64_BR(tmp), ctx);
478
479 /* out: */
480 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
481 index b87a930c2201..6c88cb18ace2 100644
482 --- a/arch/s390/kvm/kvm-s390.c
483 +++ b/arch/s390/kvm/kvm-s390.c
484 @@ -768,7 +768,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
485
486 /*
487 * Must be called with kvm->srcu held to avoid races on memslots, and with
488 - * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
489 + * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
490 */
491 static int kvm_s390_vm_start_migration(struct kvm *kvm)
492 {
493 @@ -824,7 +824,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
494 }
495
496 /*
497 - * Must be called with kvm->lock to avoid races with ourselves and
498 + * Must be called with kvm->slots_lock to avoid races with ourselves and
499 * kvm_s390_vm_start_migration.
500 */
501 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
502 @@ -839,6 +839,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
503
504 if (kvm->arch.use_cmma) {
505 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
506 + /* We have to wait for the essa emulation to finish */
507 + synchronize_srcu(&kvm->srcu);
508 vfree(mgs->pgste_bitmap);
509 }
510 kfree(mgs);
511 @@ -848,14 +850,12 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
512 static int kvm_s390_vm_set_migration(struct kvm *kvm,
513 struct kvm_device_attr *attr)
514 {
515 - int idx, res = -ENXIO;
516 + int res = -ENXIO;
517
518 - mutex_lock(&kvm->lock);
519 + mutex_lock(&kvm->slots_lock);
520 switch (attr->attr) {
521 case KVM_S390_VM_MIGRATION_START:
522 - idx = srcu_read_lock(&kvm->srcu);
523 res = kvm_s390_vm_start_migration(kvm);
524 - srcu_read_unlock(&kvm->srcu, idx);
525 break;
526 case KVM_S390_VM_MIGRATION_STOP:
527 res = kvm_s390_vm_stop_migration(kvm);
528 @@ -863,7 +863,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm,
529 default:
530 break;
531 }
532 - mutex_unlock(&kvm->lock);
533 + mutex_unlock(&kvm->slots_lock);
534
535 return res;
536 }
537 @@ -1753,7 +1753,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
538 r = -EFAULT;
539 if (copy_from_user(&args, argp, sizeof(args)))
540 break;
541 + mutex_lock(&kvm->slots_lock);
542 r = kvm_s390_get_cmma_bits(kvm, &args);
543 + mutex_unlock(&kvm->slots_lock);
544 if (!r) {
545 r = copy_to_user(argp, &args, sizeof(args));
546 if (r)
547 @@ -1767,7 +1769,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
548 r = -EFAULT;
549 if (copy_from_user(&args, argp, sizeof(args)))
550 break;
551 + mutex_lock(&kvm->slots_lock);
552 r = kvm_s390_set_cmma_bits(kvm, &args);
553 + mutex_unlock(&kvm->slots_lock);
554 break;
555 }
556 default:
557 diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
558 index a6eee5ac4f58..2aefacf5c5b2 100644
559 --- a/arch/x86/events/amd/power.c
560 +++ b/arch/x86/events/amd/power.c
561 @@ -277,7 +277,7 @@ static int __init amd_power_pmu_init(void)
562 int ret;
563
564 if (!x86_match_cpu(cpu_match))
565 - return 0;
566 + return -ENODEV;
567
568 if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
569 return -ENODEV;
570 diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
571 index c4fa4a85d4cb..e4fc595cd6ea 100644
572 --- a/arch/x86/kernel/cpu/microcode/core.c
573 +++ b/arch/x86/kernel/cpu/microcode/core.c
574 @@ -239,7 +239,7 @@ static int __init save_microcode_in_initrd(void)
575 break;
576 case X86_VENDOR_AMD:
577 if (c->x86 >= 0x10)
578 - return save_microcode_in_initrd_amd(cpuid_eax(1));
579 + ret = save_microcode_in_initrd_amd(cpuid_eax(1));
580 break;
581 default:
582 break;
583 diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
584 index d9e460fc7a3b..f7c55b0e753a 100644
585 --- a/arch/x86/kernel/cpu/microcode/intel.c
586 +++ b/arch/x86/kernel/cpu/microcode/intel.c
587 @@ -45,6 +45,9 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
588 /* Current microcode patch used in early patching on the APs. */
589 static struct microcode_intel *intel_ucode_patch;
590
591 +/* last level cache size per core */
592 +static int llc_size_per_core;
593 +
594 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
595 unsigned int s2, unsigned int p2)
596 {
597 @@ -912,12 +915,14 @@ static bool is_blacklisted(unsigned int cpu)
598
599 /*
600 * Late loading on model 79 with microcode revision less than 0x0b000021
601 - * may result in a system hang. This behavior is documented in item
602 - * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
603 + * and LLC size per core bigger than 2.5MB may result in a system hang.
604 + * This behavior is documented in item BDF90, #334165 (Intel Xeon
605 + * Processor E7-8800/4800 v4 Product Family).
606 */
607 if (c->x86 == 6 &&
608 c->x86_model == INTEL_FAM6_BROADWELL_X &&
609 c->x86_mask == 0x01 &&
610 + llc_size_per_core > 2621440 &&
611 c->microcode < 0x0b000021) {
612 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
613 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
614 @@ -975,6 +980,15 @@ static struct microcode_ops microcode_intel_ops = {
615 .apply_microcode = apply_microcode_intel,
616 };
617
618 +static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
619 +{
620 + u64 llc_size = c->x86_cache_size * 1024;
621 +
622 + do_div(llc_size, c->x86_max_cores);
623 +
624 + return (int)llc_size;
625 +}
626 +
627 struct microcode_ops * __init init_intel_microcode(void)
628 {
629 struct cpuinfo_x86 *c = &boot_cpu_data;
630 @@ -985,5 +999,7 @@ struct microcode_ops * __init init_intel_microcode(void)
631 return NULL;
632 }
633
634 + llc_size_per_core = calc_llc_size_per_core(c);
635 +
636 return &microcode_intel_ops;
637 }
638 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
639 index a1561957dccb..5bfe61a5e8e3 100644
640 --- a/arch/x86/mm/tlb.c
641 +++ b/arch/x86/mm/tlb.c
642 @@ -151,6 +151,34 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
643 local_irq_restore(flags);
644 }
645
646 +static void sync_current_stack_to_mm(struct mm_struct *mm)
647 +{
648 + unsigned long sp = current_stack_pointer;
649 + pgd_t *pgd = pgd_offset(mm, sp);
650 +
651 + if (CONFIG_PGTABLE_LEVELS > 4) {
652 + if (unlikely(pgd_none(*pgd))) {
653 + pgd_t *pgd_ref = pgd_offset_k(sp);
654 +
655 + set_pgd(pgd, *pgd_ref);
656 + }
657 + } else {
658 + /*
659 + * "pgd" is faked. The top level entries are "p4d"s, so sync
660 + * the p4d. This compiles to approximately the same code as
661 + * the 5-level case.
662 + */
663 + p4d_t *p4d = p4d_offset(pgd, sp);
664 +
665 + if (unlikely(p4d_none(*p4d))) {
666 + pgd_t *pgd_ref = pgd_offset_k(sp);
667 + p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
668 +
669 + set_p4d(p4d, *p4d_ref);
670 + }
671 + }
672 +}
673 +
674 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
675 struct task_struct *tsk)
676 {
677 @@ -226,11 +254,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
678 * mapped in the new pgd, we'll double-fault. Forcibly
679 * map it.
680 */
681 - unsigned int index = pgd_index(current_stack_pointer);
682 - pgd_t *pgd = next->pgd + index;
683 -
684 - if (unlikely(pgd_none(*pgd)))
685 - set_pgd(pgd, init_mm.pgd[index]);
686 + sync_current_stack_to_mm(next);
687 }
688
689 /* Stop remote flushes for the previous mm */
690 diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
691 index 58d4f4e1ad6a..ca38229b045a 100644
692 --- a/drivers/cpufreq/cpufreq_governor.c
693 +++ b/drivers/cpufreq/cpufreq_governor.c
694 @@ -22,6 +22,8 @@
695
696 #include "cpufreq_governor.h"
697
698 +#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC)
699 +
700 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
701
702 static DEFINE_MUTEX(gov_dbs_data_mutex);
703 @@ -47,11 +49,15 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
704 {
705 struct dbs_data *dbs_data = to_dbs_data(attr_set);
706 struct policy_dbs_info *policy_dbs;
707 + unsigned int sampling_interval;
708 int ret;
709 - ret = sscanf(buf, "%u", &dbs_data->sampling_rate);
710 - if (ret != 1)
711 +
712 + ret = sscanf(buf, "%u", &sampling_interval);
713 + if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
714 return -EINVAL;
715
716 + dbs_data->sampling_rate = sampling_interval;
717 +
718 /*
719 * We are operating under dbs_data->mutex and so the list and its
720 * entries can't be freed concurrently.
721 @@ -430,7 +436,14 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
722 if (ret)
723 goto free_policy_dbs_info;
724
725 - dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy);
726 + /*
727 + * The sampling interval should not be less than the transition latency
728 + * of the CPU and it also cannot be too small for dbs_update() to work
729 + * correctly.
730 + */
731 + dbs_data->sampling_rate = max_t(unsigned int,
732 + CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
733 + cpufreq_policy_transition_delay_us(policy));
734
735 if (!have_governor_per_policy())
736 gov->gdbs_data = dbs_data;
737 diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
738 index d0c6bfb68c4e..c50debb1986f 100644
739 --- a/drivers/gpu/drm/vc4/vc4_gem.c
740 +++ b/drivers/gpu/drm/vc4/vc4_gem.c
741 @@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev)
742 struct vc4_exec_info *exec[2];
743 struct vc4_bo *bo;
744 unsigned long irqflags;
745 - unsigned int i, j, unref_list_count, prev_idx;
746 + unsigned int i, j, k, unref_list_count;
747
748 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
749 if (!kernel_state)
750 @@ -182,24 +182,24 @@ vc4_save_hang_state(struct drm_device *dev)
751 return;
752 }
753
754 - prev_idx = 0;
755 + k = 0;
756 for (i = 0; i < 2; i++) {
757 if (!exec[i])
758 continue;
759
760 for (j = 0; j < exec[i]->bo_count; j++) {
761 drm_gem_object_get(&exec[i]->bo[j]->base);
762 - kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
763 + kernel_state->bo[k++] = &exec[i]->bo[j]->base;
764 }
765
766 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
767 drm_gem_object_get(&bo->base.base);
768 - kernel_state->bo[j + prev_idx] = &bo->base.base;
769 - j++;
770 + kernel_state->bo[k++] = &bo->base.base;
771 }
772 - prev_idx = j + 1;
773 }
774
775 + WARN_ON_ONCE(k != state->bo_count);
776 +
777 if (exec[0])
778 state->start_bin = exec[0]->ct0ca;
779 if (exec[1])
780 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
781 index 30d479f87cb8..fb5302ee57c7 100644
782 --- a/drivers/infiniband/hw/mlx5/main.c
783 +++ b/drivers/infiniband/hw/mlx5/main.c
784 @@ -1276,7 +1276,8 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
785 return err;
786
787 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
788 - !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
789 + (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
790 + !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
791 return err;
792
793 mutex_lock(&dev->lb_mutex);
794 @@ -1294,7 +1295,8 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
795 mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
796
797 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
798 - !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
799 + (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
800 + !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
801 return;
802
803 mutex_lock(&dev->lb_mutex);
804 @@ -4161,7 +4163,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
805 }
806
807 if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
808 - MLX5_CAP_GEN(mdev, disable_local_lb))
809 + (MLX5_CAP_GEN(mdev, disable_local_lb_uc) ||
810 + MLX5_CAP_GEN(mdev, disable_local_lb_mc)))
811 mutex_init(&dev->lb_mutex);
812
813 dev->ib_active = true;
814 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
815 index d86e59515b9c..d88d3e0f59fb 100644
816 --- a/drivers/input/joystick/xpad.c
817 +++ b/drivers/input/joystick/xpad.c
818 @@ -229,6 +229,7 @@ static const struct xpad_device {
819 { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
820 { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
821 { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
822 + { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
823 { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
824 { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
825 { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
826 @@ -475,6 +476,22 @@ static const u8 xboxone_hori_init[] = {
827 0x00, 0x00, 0x00, 0x80, 0x00
828 };
829
830 +/*
831 + * This packet is required for some of the PDP pads to start
832 + * sending input reports. One of those pads is (0x0e6f:0x02ab).
833 + */
834 +static const u8 xboxone_pdp_init1[] = {
835 + 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
836 +};
837 +
838 +/*
839 + * This packet is required for some of the PDP pads to start
840 + * sending input reports. One of those pads is (0x0e6f:0x02ab).
841 + */
842 +static const u8 xboxone_pdp_init2[] = {
843 + 0x06, 0x20, 0x00, 0x02, 0x01, 0x00
844 +};
845 +
846 /*
847 * A specific rumble packet is required for some PowerA pads to start
848 * sending input reports. One of those pads is (0x24c6:0x543a).
849 @@ -505,6 +522,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
850 XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
851 XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
852 XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
853 + XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
854 + XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
855 XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
856 XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
857 XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
858 diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
859 index 0871010f18d5..bbd29220dbe9 100644
860 --- a/drivers/input/mouse/trackpoint.c
861 +++ b/drivers/input/mouse/trackpoint.c
862 @@ -19,6 +19,13 @@
863 #include "psmouse.h"
864 #include "trackpoint.h"
865
866 +static const char * const trackpoint_variants[] = {
867 + [TP_VARIANT_IBM] = "IBM",
868 + [TP_VARIANT_ALPS] = "ALPS",
869 + [TP_VARIANT_ELAN] = "Elan",
870 + [TP_VARIANT_NXP] = "NXP",
871 +};
872 +
873 /*
874 * Power-on Reset: Resets all trackpoint parameters, including RAM values,
875 * to defaults.
876 @@ -26,7 +33,7 @@
877 */
878 static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
879 {
880 - unsigned char results[2];
881 + u8 results[2];
882 int tries = 0;
883
884 /* Issue POR command, and repeat up to once if 0xFC00 received */
885 @@ -38,7 +45,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
886
887 /* Check for success response -- 0xAA00 */
888 if (results[0] != 0xAA || results[1] != 0x00)
889 - return -1;
890 + return -ENODEV;
891
892 return 0;
893 }
894 @@ -46,8 +53,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
895 /*
896 * Device IO: read, write and toggle bit
897 */
898 -static int trackpoint_read(struct ps2dev *ps2dev,
899 - unsigned char loc, unsigned char *results)
900 +static int trackpoint_read(struct ps2dev *ps2dev, u8 loc, u8 *results)
901 {
902 if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
903 ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) {
904 @@ -57,8 +63,7 @@ static int trackpoint_read(struct ps2dev *ps2dev,
905 return 0;
906 }
907
908 -static int trackpoint_write(struct ps2dev *ps2dev,
909 - unsigned char loc, unsigned char val)
910 +static int trackpoint_write(struct ps2dev *ps2dev, u8 loc, u8 val)
911 {
912 if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
913 ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) ||
914 @@ -70,8 +75,7 @@ static int trackpoint_write(struct ps2dev *ps2dev,
915 return 0;
916 }
917
918 -static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
919 - unsigned char loc, unsigned char mask)
920 +static int trackpoint_toggle_bit(struct ps2dev *ps2dev, u8 loc, u8 mask)
921 {
922 /* Bad things will happen if the loc param isn't in this range */
923 if (loc < 0x20 || loc >= 0x2F)
924 @@ -87,11 +91,11 @@ static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
925 return 0;
926 }
927
928 -static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
929 - unsigned char mask, unsigned char value)
930 +static int trackpoint_update_bit(struct ps2dev *ps2dev,
931 + u8 loc, u8 mask, u8 value)
932 {
933 int retval = 0;
934 - unsigned char data;
935 + u8 data;
936
937 trackpoint_read(ps2dev, loc, &data);
938 if (((data & mask) == mask) != !!value)
939 @@ -105,17 +109,18 @@ static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
940 */
941 struct trackpoint_attr_data {
942 size_t field_offset;
943 - unsigned char command;
944 - unsigned char mask;
945 - unsigned char inverted;
946 - unsigned char power_on_default;
947 + u8 command;
948 + u8 mask;
949 + bool inverted;
950 + u8 power_on_default;
951 };
952
953 -static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf)
954 +static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse,
955 + void *data, char *buf)
956 {
957 struct trackpoint_data *tp = psmouse->private;
958 struct trackpoint_attr_data *attr = data;
959 - unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset);
960 + u8 value = *(u8 *)((void *)tp + attr->field_offset);
961
962 if (attr->inverted)
963 value = !value;
964 @@ -128,8 +133,8 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
965 {
966 struct trackpoint_data *tp = psmouse->private;
967 struct trackpoint_attr_data *attr = data;
968 - unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
969 - unsigned char value;
970 + u8 *field = (void *)tp + attr->field_offset;
971 + u8 value;
972 int err;
973
974 err = kstrtou8(buf, 10, &value);
975 @@ -157,17 +162,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
976 {
977 struct trackpoint_data *tp = psmouse->private;
978 struct trackpoint_attr_data *attr = data;
979 - unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
980 - unsigned int value;
981 + bool *field = (void *)tp + attr->field_offset;
982 + bool value;
983 int err;
984
985 - err = kstrtouint(buf, 10, &value);
986 + err = kstrtobool(buf, &value);
987 if (err)
988 return err;
989
990 - if (value > 1)
991 - return -EINVAL;
992 -
993 if (attr->inverted)
994 value = !value;
995
996 @@ -193,30 +195,6 @@ PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \
997 &trackpoint_attr_##_name, \
998 trackpoint_show_int_attr, trackpoint_set_bit_attr)
999
1000 -#define TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name) \
1001 -do { \
1002 - struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \
1003 - \
1004 - trackpoint_update_bit(&_psmouse->ps2dev, \
1005 - _attr->command, _attr->mask, _tp->_name); \
1006 -} while (0)
1007 -
1008 -#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \
1009 -do { \
1010 - if (!_power_on || \
1011 - _tp->_name != trackpoint_attr_##_name.power_on_default) { \
1012 - if (!trackpoint_attr_##_name.mask) \
1013 - trackpoint_write(&_psmouse->ps2dev, \
1014 - trackpoint_attr_##_name.command, \
1015 - _tp->_name); \
1016 - else \
1017 - TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name); \
1018 - } \
1019 -} while (0)
1020 -
1021 -#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \
1022 - (_tp->_name = trackpoint_attr_##_name.power_on_default)
1023 -
1024 TRACKPOINT_INT_ATTR(sensitivity, TP_SENS, TP_DEF_SENS);
1025 TRACKPOINT_INT_ATTR(speed, TP_SPEED, TP_DEF_SPEED);
1026 TRACKPOINT_INT_ATTR(inertia, TP_INERTIA, TP_DEF_INERTIA);
1027 @@ -229,13 +207,33 @@ TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
1028 TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
1029 TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
1030
1031 -TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
1032 +TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, false,
1033 TP_DEF_PTSON);
1034 -TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0,
1035 +TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, false,
1036 TP_DEF_SKIPBACK);
1037 -TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1,
1038 +TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, true,
1039 TP_DEF_EXT_DEV);
1040
1041 +static bool trackpoint_is_attr_available(struct psmouse *psmouse,
1042 + struct attribute *attr)
1043 +{
1044 + struct trackpoint_data *tp = psmouse->private;
1045 +
1046 + return tp->variant_id == TP_VARIANT_IBM ||
1047 + attr == &psmouse_attr_sensitivity.dattr.attr ||
1048 + attr == &psmouse_attr_press_to_select.dattr.attr;
1049 +}
1050 +
1051 +static umode_t trackpoint_is_attr_visible(struct kobject *kobj,
1052 + struct attribute *attr, int n)
1053 +{
1054 + struct device *dev = container_of(kobj, struct device, kobj);
1055 + struct serio *serio = to_serio_port(dev);
1056 + struct psmouse *psmouse = serio_get_drvdata(serio);
1057 +
1058 + return trackpoint_is_attr_available(psmouse, attr) ? attr->mode : 0;
1059 +}
1060 +
1061 static struct attribute *trackpoint_attrs[] = {
1062 &psmouse_attr_sensitivity.dattr.attr,
1063 &psmouse_attr_speed.dattr.attr,
1064 @@ -255,24 +253,56 @@ static struct attribute *trackpoint_attrs[] = {
1065 };
1066
1067 static struct attribute_group trackpoint_attr_group = {
1068 - .attrs = trackpoint_attrs,
1069 + .is_visible = trackpoint_is_attr_visible,
1070 + .attrs = trackpoint_attrs,
1071 };
1072
1073 -static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *firmware_id)
1074 -{
1075 - unsigned char param[2] = { 0 };
1076 +#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \
1077 +do { \
1078 + struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \
1079 + \
1080 + if ((!_power_on || _tp->_name != _attr->power_on_default) && \
1081 + trackpoint_is_attr_available(_psmouse, \
1082 + &psmouse_attr_##_name.dattr.attr)) { \
1083 + if (!_attr->mask) \
1084 + trackpoint_write(&_psmouse->ps2dev, \
1085 + _attr->command, _tp->_name); \
1086 + else \
1087 + trackpoint_update_bit(&_psmouse->ps2dev, \
1088 + _attr->command, _attr->mask, \
1089 + _tp->_name); \
1090 + } \
1091 +} while (0)
1092
1093 - if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
1094 - return -1;
1095 +#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \
1096 +do { \
1097 + _tp->_name = trackpoint_attr_##_name.power_on_default; \
1098 +} while (0)
1099
1100 - /* add new TP ID. */
1101 - if (!(param[0] & TP_MAGIC_IDENT))
1102 - return -1;
1103 +static int trackpoint_start_protocol(struct psmouse *psmouse,
1104 + u8 *variant_id, u8 *firmware_id)
1105 +{
1106 + u8 param[2] = { 0 };
1107 + int error;
1108
1109 - if (firmware_id)
1110 - *firmware_id = param[1];
1111 + error = ps2_command(&psmouse->ps2dev,
1112 + param, MAKE_PS2_CMD(0, 2, TP_READ_ID));
1113 + if (error)
1114 + return error;
1115 +
1116 + switch (param[0]) {
1117 + case TP_VARIANT_IBM:
1118 + case TP_VARIANT_ALPS:
1119 + case TP_VARIANT_ELAN:
1120 + case TP_VARIANT_NXP:
1121 + if (variant_id)
1122 + *variant_id = param[0];
1123 + if (firmware_id)
1124 + *firmware_id = param[1];
1125 + return 0;
1126 + }
1127
1128 - return 0;
1129 + return -ENODEV;
1130 }
1131
1132 /*
1133 @@ -285,7 +315,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
1134 {
1135 struct trackpoint_data *tp = psmouse->private;
1136
1137 - if (!in_power_on_state) {
1138 + if (!in_power_on_state && tp->variant_id == TP_VARIANT_IBM) {
1139 /*
1140 * Disable features that may make device unusable
1141 * with this driver.
1142 @@ -347,7 +377,8 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
1143
1144 static void trackpoint_disconnect(struct psmouse *psmouse)
1145 {
1146 - sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group);
1147 + device_remove_group(&psmouse->ps2dev.serio->dev,
1148 + &trackpoint_attr_group);
1149
1150 kfree(psmouse->private);
1151 psmouse->private = NULL;
1152 @@ -355,14 +386,20 @@ static void trackpoint_disconnect(struct psmouse *psmouse)
1153
1154 static int trackpoint_reconnect(struct psmouse *psmouse)
1155 {
1156 - int reset_fail;
1157 + struct trackpoint_data *tp = psmouse->private;
1158 + int error;
1159 + bool was_reset;
1160
1161 - if (trackpoint_start_protocol(psmouse, NULL))
1162 - return -1;
1163 + error = trackpoint_start_protocol(psmouse, NULL, NULL);
1164 + if (error)
1165 + return error;
1166
1167 - reset_fail = trackpoint_power_on_reset(&psmouse->ps2dev);
1168 - if (trackpoint_sync(psmouse, !reset_fail))
1169 - return -1;
1170 + was_reset = tp->variant_id == TP_VARIANT_IBM &&
1171 + trackpoint_power_on_reset(&psmouse->ps2dev) == 0;
1172 +
1173 + error = trackpoint_sync(psmouse, was_reset);
1174 + if (error)
1175 + return error;
1176
1177 return 0;
1178 }
1179 @@ -370,46 +407,66 @@ static int trackpoint_reconnect(struct psmouse *psmouse)
1180 int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
1181 {
1182 struct ps2dev *ps2dev = &psmouse->ps2dev;
1183 - unsigned char firmware_id;
1184 - unsigned char button_info;
1185 + struct trackpoint_data *tp;
1186 + u8 variant_id;
1187 + u8 firmware_id;
1188 + u8 button_info;
1189 int error;
1190
1191 - if (trackpoint_start_protocol(psmouse, &firmware_id))
1192 - return -1;
1193 + error = trackpoint_start_protocol(psmouse, &variant_id, &firmware_id);
1194 + if (error)
1195 + return error;
1196
1197 if (!set_properties)
1198 return 0;
1199
1200 - if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) {
1201 - psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
1202 - button_info = 0x33;
1203 - }
1204 -
1205 - psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
1206 - if (!psmouse->private)
1207 + tp = kzalloc(sizeof(*tp), GFP_KERNEL);
1208 + if (!tp)
1209 return -ENOMEM;
1210
1211 - psmouse->vendor = "IBM";
1212 + trackpoint_defaults(tp);
1213 + tp->variant_id = variant_id;
1214 + tp->firmware_id = firmware_id;
1215 +
1216 + psmouse->private = tp;
1217 +
1218 + psmouse->vendor = trackpoint_variants[variant_id];
1219 psmouse->name = "TrackPoint";
1220
1221 psmouse->reconnect = trackpoint_reconnect;
1222 psmouse->disconnect = trackpoint_disconnect;
1223
1224 + if (variant_id != TP_VARIANT_IBM) {
1225 + /* Newer variants do not support extended button query. */
1226 + button_info = 0x33;
1227 + } else {
1228 + error = trackpoint_read(ps2dev, TP_EXT_BTN, &button_info);
1229 + if (error) {
1230 + psmouse_warn(psmouse,
1231 + "failed to get extended button data, assuming 3 buttons\n");
1232 + button_info = 0x33;
1233 + } else if (!button_info) {
1234 + psmouse_warn(psmouse,
1235 + "got 0 in extended button data, assuming 3 buttons\n");
1236 + button_info = 0x33;
1237 + }
1238 + }
1239 +
1240 if ((button_info & 0x0f) >= 3)
1241 - __set_bit(BTN_MIDDLE, psmouse->dev->keybit);
1242 + input_set_capability(psmouse->dev, EV_KEY, BTN_MIDDLE);
1243
1244 __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit);
1245 __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit);
1246
1247 - trackpoint_defaults(psmouse->private);
1248 -
1249 - error = trackpoint_power_on_reset(ps2dev);
1250 -
1251 - /* Write defaults to TP only if reset fails. */
1252 - if (error)
1253 + if (variant_id != TP_VARIANT_IBM ||
1254 + trackpoint_power_on_reset(ps2dev) != 0) {
1255 + /*
1256 + * Write defaults to TP if we did not reset the trackpoint.
1257 + */
1258 trackpoint_sync(psmouse, false);
1259 + }
1260
1261 - error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group);
1262 + error = device_add_group(&ps2dev->serio->dev, &trackpoint_attr_group);
1263 if (error) {
1264 psmouse_err(psmouse,
1265 "failed to create sysfs attributes, error: %d\n",
1266 @@ -420,8 +477,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
1267 }
1268
1269 psmouse_info(psmouse,
1270 - "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
1271 - firmware_id,
1272 + "%s TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
1273 + psmouse->vendor, firmware_id,
1274 (button_info & 0xf0) >> 4, button_info & 0x0f);
1275
1276 return 0;
1277 diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
1278 index 88055755f82e..10a039148234 100644
1279 --- a/drivers/input/mouse/trackpoint.h
1280 +++ b/drivers/input/mouse/trackpoint.h
1281 @@ -21,10 +21,16 @@
1282 #define TP_COMMAND 0xE2 /* Commands start with this */
1283
1284 #define TP_READ_ID 0xE1 /* Sent for device identification */
1285 -#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
1286 - /* by the firmware ID */
1287 - /* Firmware ID includes 0x1, 0x2, 0x3 */
1288
1289 +/*
1290 + * Valid first byte responses to the "Read Secondary ID" (0xE1) command.
1291 + * 0x01 was the original IBM trackpoint, others implement very limited
1292 + * subset of trackpoint features.
1293 + */
1294 +#define TP_VARIANT_IBM 0x01
1295 +#define TP_VARIANT_ALPS 0x02
1296 +#define TP_VARIANT_ELAN 0x03
1297 +#define TP_VARIANT_NXP 0x04
1298
1299 /*
1300 * Commands
1301 @@ -136,18 +142,20 @@
1302
1303 #define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd))
1304
1305 -struct trackpoint_data
1306 -{
1307 - unsigned char sensitivity, speed, inertia, reach;
1308 - unsigned char draghys, mindrag;
1309 - unsigned char thresh, upthresh;
1310 - unsigned char ztime, jenks;
1311 - unsigned char drift_time;
1312 +struct trackpoint_data {
1313 + u8 variant_id;
1314 + u8 firmware_id;
1315 +
1316 + u8 sensitivity, speed, inertia, reach;
1317 + u8 draghys, mindrag;
1318 + u8 thresh, upthresh;
1319 + u8 ztime, jenks;
1320 + u8 drift_time;
1321
1322 /* toggles */
1323 - unsigned char press_to_select;
1324 - unsigned char skipback;
1325 - unsigned char ext_dev;
1326 + bool press_to_select;
1327 + bool skipback;
1328 + bool ext_dev;
1329 };
1330
1331 #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
1332 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
1333 index 0e3d9f39a807..1b03c32afc1f 100644
1334 --- a/drivers/net/ethernet/emulex/benet/be_main.c
1335 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
1336 @@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter)
1337
1338 be_schedule_worker(adapter);
1339
1340 + /*
1341 + * The IF was destroyed and re-created. We need to clear
1342 + * all promiscuous flags valid for the destroyed IF.
1343 + * Without this promisc mode is not restored during
1344 + * be_open() because the driver thinks that it is
1345 + * already enabled in HW.
1346 + */
1347 + adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1348 +
1349 if (netif_running(netdev))
1350 status = be_open(netdev);
1351
1352 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
1353 index acf32fe952cd..3d3b1f97dc27 100644
1354 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
1355 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
1356 @@ -197,9 +197,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
1357 return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
1358 MLX5E_AM_STATS_WORSE;
1359
1360 + if (!prev->ppms)
1361 + return curr->ppms ? MLX5E_AM_STATS_BETTER :
1362 + MLX5E_AM_STATS_SAME;
1363 +
1364 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
1365 return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
1366 MLX5E_AM_STATS_WORSE;
1367 + if (!prev->epms)
1368 + return MLX5E_AM_STATS_SAME;
1369
1370 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
1371 return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
1372 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
1373 index 1f1f8af87d4d..5a4608281f38 100644
1374 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
1375 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
1376 @@ -238,15 +238,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
1377 int err = 0;
1378
1379 /* Temporarily enable local_lb */
1380 - if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
1381 - mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
1382 - if (!lbtp->local_lb)
1383 - mlx5_nic_vport_update_local_lb(priv->mdev, true);
1384 + err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
1385 + if (err)
1386 + return err;
1387 +
1388 + if (!lbtp->local_lb) {
1389 + err = mlx5_nic_vport_update_local_lb(priv->mdev, true);
1390 + if (err)
1391 + return err;
1392 }
1393
1394 err = mlx5e_refresh_tirs(priv, true);
1395 if (err)
1396 - return err;
1397 + goto out;
1398
1399 lbtp->loopback_ok = false;
1400 init_completion(&lbtp->comp);
1401 @@ -256,16 +260,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
1402 lbtp->pt.dev = priv->netdev;
1403 lbtp->pt.af_packet_priv = lbtp;
1404 dev_add_pack(&lbtp->pt);
1405 +
1406 + return 0;
1407 +
1408 +out:
1409 + if (!lbtp->local_lb)
1410 + mlx5_nic_vport_update_local_lb(priv->mdev, false);
1411 +
1412 return err;
1413 }
1414
1415 static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
1416 struct mlx5e_lbt_priv *lbtp)
1417 {
1418 - if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
1419 - if (!lbtp->local_lb)
1420 - mlx5_nic_vport_update_local_lb(priv->mdev, false);
1421 - }
1422 + if (!lbtp->local_lb)
1423 + mlx5_nic_vport_update_local_lb(priv->mdev, false);
1424
1425 dev_remove_pack(&lbtp->pt);
1426 mlx5e_refresh_tirs(priv, false);
1427 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1428 index 8bfc37e4ec87..4ddd632d10f9 100644
1429 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
1430 +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1431 @@ -577,8 +577,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
1432 int ret = 0;
1433
1434 /* Disable local_lb by default */
1435 - if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
1436 - MLX5_CAP_GEN(dev, disable_local_lb))
1437 + if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
1438 ret = mlx5_nic_vport_update_local_lb(dev, false);
1439
1440 return ret;
1441 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
1442 index d653b0025b13..a1296a62497d 100644
1443 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
1444 +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
1445 @@ -908,23 +908,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
1446 void *in;
1447 int err;
1448
1449 - mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable");
1450 + if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
1451 + !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
1452 + return 0;
1453 +
1454 in = kvzalloc(inlen, GFP_KERNEL);
1455 if (!in)
1456 return -ENOMEM;
1457
1458 - MLX5_SET(modify_nic_vport_context_in, in,
1459 - field_select.disable_mc_local_lb, 1);
1460 MLX5_SET(modify_nic_vport_context_in, in,
1461 nic_vport_context.disable_mc_local_lb, !enable);
1462 -
1463 - MLX5_SET(modify_nic_vport_context_in, in,
1464 - field_select.disable_uc_local_lb, 1);
1465 MLX5_SET(modify_nic_vport_context_in, in,
1466 nic_vport_context.disable_uc_local_lb, !enable);
1467
1468 + if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
1469 + MLX5_SET(modify_nic_vport_context_in, in,
1470 + field_select.disable_mc_local_lb, 1);
1471 +
1472 + if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
1473 + MLX5_SET(modify_nic_vport_context_in, in,
1474 + field_select.disable_uc_local_lb, 1);
1475 +
1476 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1477
1478 + if (!err)
1479 + mlx5_core_dbg(mdev, "%s local_lb\n",
1480 + enable ? "enable" : "disable");
1481 +
1482 kvfree(in);
1483 return err;
1484 }
1485 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
1486 index c23cc51bb5a5..7bef80676464 100644
1487 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
1488 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
1489 @@ -1531,11 +1531,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1490 dipn = htonl(dip);
1491 dev = mlxsw_sp->router->rifs[rif]->dev;
1492 n = neigh_lookup(&arp_tbl, &dipn, dev);
1493 - if (!n) {
1494 - netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1495 - &dip);
1496 + if (!n)
1497 return;
1498 - }
1499
1500 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1501 neigh_event_send(n, NULL);
1502 @@ -1562,11 +1559,8 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1503
1504 dev = mlxsw_sp->router->rifs[rif]->dev;
1505 n = neigh_lookup(&nd_tbl, &dip, dev);
1506 - if (!n) {
1507 - netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1508 - &dip);
1509 + if (!n)
1510 return;
1511 - }
1512
1513 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1514 neigh_event_send(n, NULL);
1515 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1516 index dc016dfec64d..8e623d8fa78e 100644
1517 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1518 +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1519 @@ -306,7 +306,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
1520 ls >= ARRAY_SIZE(ls_to_ethtool))
1521 return 0;
1522
1523 - cmd->base.speed = ls_to_ethtool[sts];
1524 + cmd->base.speed = ls_to_ethtool[ls];
1525 cmd->base.duplex = DUPLEX_FULL;
1526
1527 return 0;
1528 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
1529 index 9541465e43e9..958ff931e790 100644
1530 --- a/drivers/net/ethernet/realtek/r8169.c
1531 +++ b/drivers/net/ethernet/realtek/r8169.c
1532 @@ -2239,19 +2239,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
1533 void __iomem *ioaddr = tp->mmio_addr;
1534 dma_addr_t paddr = tp->counters_phys_addr;
1535 u32 cmd;
1536 - bool ret;
1537
1538 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1539 + RTL_R32(CounterAddrHigh);
1540 cmd = (u64)paddr & DMA_BIT_MASK(32);
1541 RTL_W32(CounterAddrLow, cmd);
1542 RTL_W32(CounterAddrLow, cmd | counter_cmd);
1543
1544 - ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
1545 -
1546 - RTL_W32(CounterAddrLow, 0);
1547 - RTL_W32(CounterAddrHigh, 0);
1548 -
1549 - return ret;
1550 + return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
1551 }
1552
1553 static bool rtl8169_reset_counters(struct net_device *dev)
1554 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
1555 index bf14c51f35e1..8c6b8918ec31 100644
1556 --- a/drivers/net/ppp/ppp_generic.c
1557 +++ b/drivers/net/ppp/ppp_generic.c
1558 @@ -1003,17 +1003,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
1559 if (!ifname_is_set)
1560 snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
1561
1562 + mutex_unlock(&pn->all_ppp_mutex);
1563 +
1564 ret = register_netdevice(ppp->dev);
1565 if (ret < 0)
1566 goto err_unit;
1567
1568 atomic_inc(&ppp_unit_count);
1569
1570 - mutex_unlock(&pn->all_ppp_mutex);
1571 -
1572 return 0;
1573
1574 err_unit:
1575 + mutex_lock(&pn->all_ppp_mutex);
1576 unit_put(&pn->units_idr, ppp->file.index);
1577 err:
1578 mutex_unlock(&pn->all_ppp_mutex);
1579 diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
1580 index 4e1da1645b15..5aa59f41bf8c 100644
1581 --- a/drivers/net/ppp/pppoe.c
1582 +++ b/drivers/net/ppp/pppoe.c
1583 @@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
1584 struct pppoe_hdr *ph;
1585 struct net_device *dev;
1586 char *start;
1587 + int hlen;
1588
1589 lock_sock(sk);
1590 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
1591 @@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
1592 if (total_len > (dev->mtu + dev->hard_header_len))
1593 goto end;
1594
1595 -
1596 - skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
1597 - 0, GFP_KERNEL);
1598 + hlen = LL_RESERVED_SPACE(dev);
1599 + skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
1600 + dev->needed_tailroom, 0, GFP_KERNEL);
1601 if (!skb) {
1602 error = -ENOMEM;
1603 goto end;
1604 }
1605
1606 /* Reserve space for headers. */
1607 - skb_reserve(skb, dev->hard_header_len);
1608 + skb_reserve(skb, hlen);
1609 skb_reset_network_header(skb);
1610
1611 skb->dev = dev;
1612 @@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
1613 /* Copy the data if there is no space for the header or if it's
1614 * read-only.
1615 */
1616 - if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
1617 + if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
1618 goto abort;
1619
1620 __skb_push(skb, sizeof(*ph));
1621 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1622 index c91b110f2169..fa51b7b0e9ea 100644
1623 --- a/drivers/net/tun.c
1624 +++ b/drivers/net/tun.c
1625 @@ -534,6 +534,14 @@ static void tun_queue_purge(struct tun_file *tfile)
1626 skb_queue_purge(&tfile->sk.sk_error_queue);
1627 }
1628
1629 +static void tun_cleanup_tx_array(struct tun_file *tfile)
1630 +{
1631 + if (tfile->tx_array.ring.queue) {
1632 + skb_array_cleanup(&tfile->tx_array);
1633 + memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
1634 + }
1635 +}
1636 +
1637 static void __tun_detach(struct tun_file *tfile, bool clean)
1638 {
1639 struct tun_file *ntfile;
1640 @@ -575,8 +583,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
1641 tun->dev->reg_state == NETREG_REGISTERED)
1642 unregister_netdevice(tun->dev);
1643 }
1644 - if (tun)
1645 - skb_array_cleanup(&tfile->tx_array);
1646 + tun_cleanup_tx_array(tfile);
1647 sock_put(&tfile->sk);
1648 }
1649 }
1650 @@ -616,11 +623,13 @@ static void tun_detach_all(struct net_device *dev)
1651 /* Drop read queue */
1652 tun_queue_purge(tfile);
1653 sock_put(&tfile->sk);
1654 + tun_cleanup_tx_array(tfile);
1655 }
1656 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
1657 tun_enable_queue(tfile);
1658 tun_queue_purge(tfile);
1659 sock_put(&tfile->sk);
1660 + tun_cleanup_tx_array(tfile);
1661 }
1662 BUG_ON(tun->numdisabled != 0);
1663
1664 @@ -2624,6 +2633,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
1665
1666 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
1667
1668 + memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
1669 +
1670 return 0;
1671 }
1672
1673 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
1674 index 0161f77641fa..a8dd1c7a08cb 100644
1675 --- a/drivers/net/usb/lan78xx.c
1676 +++ b/drivers/net/usb/lan78xx.c
1677 @@ -2396,6 +2396,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
1678 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1679 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1680 dev->rx_qlen = 4;
1681 + dev->tx_qlen = 4;
1682 }
1683
1684 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1685 diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
1686 index d1c7029ded7c..cf95290b160c 100644
1687 --- a/drivers/net/vmxnet3/vmxnet3_drv.c
1688 +++ b/drivers/net/vmxnet3/vmxnet3_drv.c
1689 @@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1690 rq->rx_ring[i].basePA);
1691 rq->rx_ring[i].base = NULL;
1692 }
1693 - rq->buf_info[i] = NULL;
1694 }
1695
1696 if (rq->data_ring.base) {
1697 @@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1698 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1699 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1700 rq->buf_info_pa);
1701 + rq->buf_info[0] = rq->buf_info[1] = NULL;
1702 }
1703 }
1704
1705 diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
1706 index 7dc3bcac3506..67ecf2425b88 100644
1707 --- a/drivers/net/vrf.c
1708 +++ b/drivers/net/vrf.c
1709 @@ -674,8 +674,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
1710 struct sock *sk,
1711 struct sk_buff *skb)
1712 {
1713 - /* don't divert multicast */
1714 - if (ipv4_is_multicast(ip_hdr(skb)->daddr))
1715 + /* don't divert multicast or local broadcast */
1716 + if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
1717 + ipv4_is_lbcast(ip_hdr(skb)->daddr))
1718 return skb;
1719
1720 if (qdisc_tx_is_default(vrf_dev))
1721 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
1722 index 0c4b690cf761..04f39111fafb 100644
1723 --- a/fs/btrfs/delayed-inode.c
1724 +++ b/fs/btrfs/delayed-inode.c
1725 @@ -1677,28 +1677,18 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
1726 int btrfs_should_delete_dir_index(struct list_head *del_list,
1727 u64 index)
1728 {
1729 - struct btrfs_delayed_item *curr, *next;
1730 - int ret;
1731 -
1732 - if (list_empty(del_list))
1733 - return 0;
1734 + struct btrfs_delayed_item *curr;
1735 + int ret = 0;
1736
1737 - list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1738 + list_for_each_entry(curr, del_list, readdir_list) {
1739 if (curr->key.offset > index)
1740 break;
1741 -
1742 - list_del(&curr->readdir_list);
1743 - ret = (curr->key.offset == index);
1744 -
1745 - if (refcount_dec_and_test(&curr->refs))
1746 - kfree(curr);
1747 -
1748 - if (ret)
1749 - return 1;
1750 - else
1751 - continue;
1752 + if (curr->key.offset == index) {
1753 + ret = 1;
1754 + break;
1755 + }
1756 }
1757 - return 0;
1758 + return ret;
1759 }
1760
1761 /*
1762 diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
1763 index f650e475d8f0..fdf2aad73470 100644
1764 --- a/fs/nfsd/auth.c
1765 +++ b/fs/nfsd/auth.c
1766 @@ -60,10 +60,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
1767 gi->gid[i] = exp->ex_anon_gid;
1768 else
1769 gi->gid[i] = rqgi->gid[i];
1770 -
1771 - /* Each thread allocates its own gi, no race */
1772 - groups_sort(gi);
1773 }
1774 +
1775 + /* Each thread allocates its own gi, no race */
1776 + groups_sort(gi);
1777 } else {
1778 gi = get_group_info(rqgi);
1779 }
1780 diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
1781 index ded456f17de6..c584ad8d023c 100644
1782 --- a/fs/orangefs/devorangefs-req.c
1783 +++ b/fs/orangefs/devorangefs-req.c
1784 @@ -162,7 +162,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
1785 struct orangefs_kernel_op_s *op, *temp;
1786 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
1787 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
1788 - struct orangefs_kernel_op_s *cur_op = NULL;
1789 + struct orangefs_kernel_op_s *cur_op;
1790 unsigned long ret;
1791
1792 /* We do not support blocking IO. */
1793 @@ -186,6 +186,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
1794 return -EAGAIN;
1795
1796 restart:
1797 + cur_op = NULL;
1798 /* Get next op (if any) from top of list. */
1799 spin_lock(&orangefs_request_list_lock);
1800 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
1801 diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
1802 index e4a8e6a7eb17..962bf4824283 100644
1803 --- a/fs/orangefs/file.c
1804 +++ b/fs/orangefs/file.c
1805 @@ -446,7 +446,7 @@ ssize_t orangefs_inode_read(struct inode *inode,
1806 static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1807 {
1808 struct file *file = iocb->ki_filp;
1809 - loff_t pos = *(&iocb->ki_pos);
1810 + loff_t pos = iocb->ki_pos;
1811 ssize_t rc = 0;
1812
1813 BUG_ON(iocb->private);
1814 @@ -486,9 +486,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
1815 }
1816 }
1817
1818 - if (file->f_pos > i_size_read(file->f_mapping->host))
1819 - orangefs_i_size_write(file->f_mapping->host, file->f_pos);
1820 -
1821 rc = generic_write_checks(iocb, iter);
1822
1823 if (rc <= 0) {
1824 @@ -502,7 +499,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
1825 * pos to the end of the file, so we will wait till now to set
1826 * pos...
1827 */
1828 - pos = *(&iocb->ki_pos);
1829 + pos = iocb->ki_pos;
1830
1831 rc = do_readv_writev(ORANGEFS_IO_WRITE,
1832 file,
1833 diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
1834 index 004af348fb80..c244bbf494bc 100644
1835 --- a/fs/orangefs/orangefs-kernel.h
1836 +++ b/fs/orangefs/orangefs-kernel.h
1837 @@ -566,17 +566,6 @@ do { \
1838 sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE; \
1839 } while (0)
1840
1841 -static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
1842 -{
1843 -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
1844 - inode_lock(inode);
1845 -#endif
1846 - i_size_write(inode, i_size);
1847 -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
1848 - inode_unlock(inode);
1849 -#endif
1850 -}
1851 -
1852 static inline void orangefs_set_timeout(struct dentry *dentry)
1853 {
1854 unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
1855 diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c
1856 index 835c6e148afc..0577d6dba8c8 100644
1857 --- a/fs/orangefs/waitqueue.c
1858 +++ b/fs/orangefs/waitqueue.c
1859 @@ -29,10 +29,10 @@ static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s
1860 */
1861 void purge_waiting_ops(void)
1862 {
1863 - struct orangefs_kernel_op_s *op;
1864 + struct orangefs_kernel_op_s *op, *tmp;
1865
1866 spin_lock(&orangefs_request_list_lock);
1867 - list_for_each_entry(op, &orangefs_request_list, list) {
1868 + list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) {
1869 gossip_debug(GOSSIP_WAIT_DEBUG,
1870 "pvfs2-client-core: purging op tag %llu %s\n",
1871 llu(op->tag),
1872 diff --git a/include/linux/bpf.h b/include/linux/bpf.h
1873 index 0bcf803f20de..5c5be80ce802 100644
1874 --- a/include/linux/bpf.h
1875 +++ b/include/linux/bpf.h
1876 @@ -42,7 +42,14 @@ struct bpf_map_ops {
1877 };
1878
1879 struct bpf_map {
1880 - atomic_t refcnt;
1881 + /* 1st cacheline with read-mostly members of which some
1882 + * are also accessed in fast-path (e.g. ops, max_entries).
1883 + */
1884 + const struct bpf_map_ops *ops ____cacheline_aligned;
1885 + struct bpf_map *inner_map_meta;
1886 +#ifdef CONFIG_SECURITY
1887 + void *security;
1888 +#endif
1889 enum bpf_map_type map_type;
1890 u32 key_size;
1891 u32 value_size;
1892 @@ -52,11 +59,15 @@ struct bpf_map {
1893 u32 id;
1894 int numa_node;
1895 bool unpriv_array;
1896 - struct user_struct *user;
1897 - const struct bpf_map_ops *ops;
1898 - struct work_struct work;
1899 + /* 7 bytes hole */
1900 +
1901 + /* 2nd cacheline with misc members to avoid false sharing
1902 + * particularly with refcounting.
1903 + */
1904 + struct user_struct *user ____cacheline_aligned;
1905 + atomic_t refcnt;
1906 atomic_t usercnt;
1907 - struct bpf_map *inner_map_meta;
1908 + struct work_struct work;
1909 };
1910
1911 /* function argument constraints */
1912 diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
1913 index 8b3d0103c03a..a13525daf09b 100644
1914 --- a/include/linux/mlx5/driver.h
1915 +++ b/include/linux/mlx5/driver.h
1916 @@ -36,6 +36,7 @@
1917 #include <linux/kernel.h>
1918 #include <linux/completion.h>
1919 #include <linux/pci.h>
1920 +#include <linux/irq.h>
1921 #include <linux/spinlock_types.h>
1922 #include <linux/semaphore.h>
1923 #include <linux/slab.h>
1924 @@ -1194,7 +1195,23 @@ enum {
1925 static inline const struct cpumask *
1926 mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
1927 {
1928 - return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector);
1929 + const struct cpumask *mask;
1930 + struct irq_desc *desc;
1931 + unsigned int irq;
1932 + int eqn;
1933 + int err;
1934 +
1935 + err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
1936 + if (err)
1937 + return NULL;
1938 +
1939 + desc = irq_to_desc(irq);
1940 +#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
1941 + mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
1942 +#else
1943 + mask = desc->irq_common_data.affinity;
1944 +#endif
1945 + return mask;
1946 }
1947
1948 #endif /* MLX5_DRIVER_H */
1949 diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
1950 index c8091f06eaa4..f3765155fa4d 100644
1951 --- a/include/linux/mlx5/mlx5_ifc.h
1952 +++ b/include/linux/mlx5/mlx5_ifc.h
1953 @@ -1023,8 +1023,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
1954 u8 log_max_wq_sz[0x5];
1955
1956 u8 nic_vport_change_event[0x1];
1957 - u8 disable_local_lb[0x1];
1958 - u8 reserved_at_3e2[0x9];
1959 + u8 disable_local_lb_uc[0x1];
1960 + u8 disable_local_lb_mc[0x1];
1961 + u8 reserved_at_3e3[0x8];
1962 u8 log_max_vlan_list[0x5];
1963 u8 reserved_at_3f0[0x3];
1964 u8 log_max_current_mc_list[0x5];
1965 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
1966 index 853291714ae0..bae807eb2933 100644
1967 --- a/include/linux/vermagic.h
1968 +++ b/include/linux/vermagic.h
1969 @@ -31,17 +31,11 @@
1970 #else
1971 #define MODULE_RANDSTRUCT_PLUGIN
1972 #endif
1973 -#ifdef RETPOLINE
1974 -#define MODULE_VERMAGIC_RETPOLINE "retpoline "
1975 -#else
1976 -#define MODULE_VERMAGIC_RETPOLINE ""
1977 -#endif
1978
1979 #define VERMAGIC_STRING \
1980 UTS_RELEASE " " \
1981 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
1982 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
1983 MODULE_ARCH_VERMAGIC \
1984 - MODULE_RANDSTRUCT_PLUGIN \
1985 - MODULE_VERMAGIC_RETPOLINE
1986 + MODULE_RANDSTRUCT_PLUGIN
1987
1988 diff --git a/include/net/arp.h b/include/net/arp.h
1989 index dc8cd47f883b..977aabfcdc03 100644
1990 --- a/include/net/arp.h
1991 +++ b/include/net/arp.h
1992 @@ -20,6 +20,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
1993
1994 static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
1995 {
1996 + if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1997 + key = INADDR_ANY;
1998 +
1999 return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
2000 }
2001
2002 diff --git a/include/net/ipv6.h b/include/net/ipv6.h
2003 index 35e9dd2d18ba..9596aa93d6ef 100644
2004 --- a/include/net/ipv6.h
2005 +++ b/include/net/ipv6.h
2006 @@ -291,6 +291,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
2007 int flags);
2008 int ip6_flowlabel_init(void);
2009 void ip6_flowlabel_cleanup(void);
2010 +bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
2011
2012 static inline void fl6_sock_release(struct ip6_flowlabel *fl)
2013 {
2014 diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
2015 index 10f99dafd5ac..049008493faf 100644
2016 --- a/include/net/net_namespace.h
2017 +++ b/include/net/net_namespace.h
2018 @@ -223,6 +223,11 @@ int net_eq(const struct net *net1, const struct net *net2)
2019 return net1 == net2;
2020 }
2021
2022 +static inline int check_net(const struct net *net)
2023 +{
2024 + return atomic_read(&net->count) != 0;
2025 +}
2026 +
2027 void net_drop_ns(void *);
2028
2029 #else
2030 @@ -247,6 +252,11 @@ int net_eq(const struct net *net1, const struct net *net2)
2031 return 1;
2032 }
2033
2034 +static inline int check_net(const struct net *net)
2035 +{
2036 + return 1;
2037 +}
2038 +
2039 #define net_drop_ns NULL
2040 #endif
2041
2042 diff --git a/include/net/tls.h b/include/net/tls.h
2043 index c06db1eadac2..df950383b8c1 100644
2044 --- a/include/net/tls.h
2045 +++ b/include/net/tls.h
2046 @@ -168,7 +168,7 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
2047
2048 static inline void tls_err_abort(struct sock *sk)
2049 {
2050 - sk->sk_err = -EBADMSG;
2051 + sk->sk_err = EBADMSG;
2052 sk->sk_error_report(sk);
2053 }
2054
2055 diff --git a/init/Kconfig b/init/Kconfig
2056 index 3c1faaa2af4a..46075327c165 100644
2057 --- a/init/Kconfig
2058 +++ b/init/Kconfig
2059 @@ -1342,6 +1342,13 @@ config BPF_SYSCALL
2060 Enable the bpf() system call that allows to manipulate eBPF
2061 programs and maps via file descriptors.
2062
2063 +config BPF_JIT_ALWAYS_ON
2064 + bool "Permanently enable BPF JIT and remove BPF interpreter"
2065 + depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
2066 + help
2067 + Enables BPF JIT and removes BPF interpreter to avoid
2068 + speculative execution of BPF instructions by the interpreter
2069 +
2070 config SHMEM
2071 bool "Use full shmem filesystem" if EXPERT
2072 default y
2073 diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
2074 index 7b62df86be1d..2246115365d9 100644
2075 --- a/kernel/bpf/core.c
2076 +++ b/kernel/bpf/core.c
2077 @@ -760,6 +760,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
2078 }
2079 EXPORT_SYMBOL_GPL(__bpf_call_base);
2080
2081 +#ifndef CONFIG_BPF_JIT_ALWAYS_ON
2082 /**
2083 * __bpf_prog_run - run eBPF program on a given context
2084 * @ctx: is the data we are operating on
2085 @@ -948,7 +949,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
2086 DST = tmp;
2087 CONT;
2088 ALU_MOD_X:
2089 - if (unlikely(SRC == 0))
2090 + if (unlikely((u32)SRC == 0))
2091 return 0;
2092 tmp = (u32) DST;
2093 DST = do_div(tmp, (u32) SRC);
2094 @@ -967,7 +968,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
2095 DST = div64_u64(DST, SRC);
2096 CONT;
2097 ALU_DIV_X:
2098 - if (unlikely(SRC == 0))
2099 + if (unlikely((u32)SRC == 0))
2100 return 0;
2101 tmp = (u32) DST;
2102 do_div(tmp, (u32) SRC);
2103 @@ -1310,6 +1311,14 @@ EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2104 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2105 };
2106
2107 +#else
2108 +static unsigned int __bpf_prog_ret0(const void *ctx,
2109 + const struct bpf_insn *insn)
2110 +{
2111 + return 0;
2112 +}
2113 +#endif
2114 +
2115 bool bpf_prog_array_compatible(struct bpf_array *array,
2116 const struct bpf_prog *fp)
2117 {
2118 @@ -1357,9 +1366,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
2119 */
2120 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2121 {
2122 +#ifndef CONFIG_BPF_JIT_ALWAYS_ON
2123 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2124
2125 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2126 +#else
2127 + fp->bpf_func = __bpf_prog_ret0;
2128 +#endif
2129
2130 /* eBPF JITs can rewrite the program in case constant
2131 * blinding is active. However, in case of error during
2132 @@ -1368,6 +1381,12 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2133 * be JITed, but falls back to the interpreter.
2134 */
2135 fp = bpf_int_jit_compile(fp);
2136 +#ifdef CONFIG_BPF_JIT_ALWAYS_ON
2137 + if (!fp->jited) {
2138 + *err = -ENOTSUPP;
2139 + return fp;
2140 + }
2141 +#endif
2142 bpf_prog_lock_ro(fp);
2143
2144 /* The tail call compatibility check can only be done at
2145 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
2146 index 75a5c3312f46..f9339c3219bc 100644
2147 --- a/kernel/bpf/verifier.c
2148 +++ b/kernel/bpf/verifier.c
2149 @@ -986,6 +986,13 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
2150 return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
2151 }
2152
2153 +static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
2154 +{
2155 + const struct bpf_reg_state *reg = &env->cur_state.regs[regno];
2156 +
2157 + return reg->type == PTR_TO_CTX;
2158 +}
2159 +
2160 static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
2161 int off, int size, bool strict)
2162 {
2163 @@ -1258,6 +1265,12 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
2164 return -EACCES;
2165 }
2166
2167 + if (is_ctx_reg(env, insn->dst_reg)) {
2168 + verbose("BPF_XADD stores into R%d context is not allowed\n",
2169 + insn->dst_reg);
2170 + return -EACCES;
2171 + }
2172 +
2173 /* check whether atomic_add can read the memory */
2174 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
2175 BPF_SIZE(insn->code), BPF_READ, -1);
2176 @@ -3859,6 +3872,12 @@ static int do_check(struct bpf_verifier_env *env)
2177 if (err)
2178 return err;
2179
2180 + if (is_ctx_reg(env, insn->dst_reg)) {
2181 + verbose("BPF_ST stores into R%d context is not allowed\n",
2182 + insn->dst_reg);
2183 + return -EACCES;
2184 + }
2185 +
2186 /* check that memory (dst_reg + off) is writeable */
2187 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
2188 BPF_SIZE(insn->code), BPF_WRITE,
2189 @@ -4304,6 +4323,24 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
2190 int i, cnt, delta = 0;
2191
2192 for (i = 0; i < insn_cnt; i++, insn++) {
2193 + if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
2194 + insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
2195 + /* due to JIT bugs clear upper 32-bits of src register
2196 + * before div/mod operation
2197 + */
2198 + insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
2199 + insn_buf[1] = *insn;
2200 + cnt = 2;
2201 + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2202 + if (!new_prog)
2203 + return -ENOMEM;
2204 +
2205 + delta += cnt - 1;
2206 + env->prog = prog = new_prog;
2207 + insn = new_prog->insnsi + i + delta;
2208 + continue;
2209 + }
2210 +
2211 if (insn->code != (BPF_JMP | BPF_CALL))
2212 continue;
2213
2214 diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
2215 index 88f75f92ef36..052773df9f03 100644
2216 --- a/kernel/time/hrtimer.c
2217 +++ b/kernel/time/hrtimer.c
2218 @@ -655,7 +655,9 @@ static void hrtimer_reprogram(struct hrtimer *timer,
2219 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
2220 {
2221 base->expires_next = KTIME_MAX;
2222 + base->hang_detected = 0;
2223 base->hres_active = 0;
2224 + base->next_timer = NULL;
2225 }
2226
2227 /*
2228 @@ -1591,6 +1593,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
2229 timerqueue_init_head(&cpu_base->clock_base[i].active);
2230 }
2231
2232 + cpu_base->active_bases = 0;
2233 cpu_base->cpu = cpu;
2234 hrtimer_init_hres(cpu_base);
2235 return 0;
2236 diff --git a/lib/test_bpf.c b/lib/test_bpf.c
2237 index aa8812ae6776..6fbb73f3f531 100644
2238 --- a/lib/test_bpf.c
2239 +++ b/lib/test_bpf.c
2240 @@ -6207,9 +6207,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
2241 return NULL;
2242 }
2243 }
2244 - /* We don't expect to fail. */
2245 if (*err) {
2246 - pr_cont("FAIL to attach err=%d len=%d\n",
2247 + pr_cont("FAIL to prog_create err=%d len=%d\n",
2248 *err, fprog.len);
2249 return NULL;
2250 }
2251 @@ -6233,6 +6232,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
2252 * checks.
2253 */
2254 fp = bpf_prog_select_runtime(fp, err);
2255 + if (*err) {
2256 + pr_cont("FAIL to select_runtime err=%d\n", *err);
2257 + return NULL;
2258 + }
2259 break;
2260 }
2261
2262 @@ -6418,8 +6421,8 @@ static __init int test_bpf(void)
2263 pass_cnt++;
2264 continue;
2265 }
2266 -
2267 - return err;
2268 + err_cnt++;
2269 + continue;
2270 }
2271
2272 pr_cont("jited:%u ", fp->jited);
2273 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2274 index d51c2087c498..2de080003693 100644
2275 --- a/mm/page_alloc.c
2276 +++ b/mm/page_alloc.c
2277 @@ -3011,9 +3011,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2278 if (!area->nr_free)
2279 continue;
2280
2281 - if (alloc_harder)
2282 - return true;
2283 -
2284 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2285 if (!list_empty(&area->free_list[mt]))
2286 return true;
2287 @@ -3025,6 +3022,9 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2288 return true;
2289 }
2290 #endif
2291 + if (alloc_harder &&
2292 + !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
2293 + return true;
2294 }
2295 return false;
2296 }
2297 diff --git a/net/core/dev.c b/net/core/dev.c
2298 index 27357fc1730b..ffee085f0357 100644
2299 --- a/net/core/dev.c
2300 +++ b/net/core/dev.c
2301 @@ -3128,10 +3128,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
2302 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2303
2304 /* + transport layer */
2305 - if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2306 - hdr_len += tcp_hdrlen(skb);
2307 - else
2308 - hdr_len += sizeof(struct udphdr);
2309 + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
2310 + const struct tcphdr *th;
2311 + struct tcphdr _tcphdr;
2312 +
2313 + th = skb_header_pointer(skb, skb_transport_offset(skb),
2314 + sizeof(_tcphdr), &_tcphdr);
2315 + if (likely(th))
2316 + hdr_len += __tcp_hdrlen(th);
2317 + } else {
2318 + struct udphdr _udphdr;
2319 +
2320 + if (skb_header_pointer(skb, skb_transport_offset(skb),
2321 + sizeof(_udphdr), &_udphdr))
2322 + hdr_len += sizeof(struct udphdr);
2323 + }
2324
2325 if (shinfo->gso_type & SKB_GSO_DODGY)
2326 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2327 diff --git a/net/core/filter.c b/net/core/filter.c
2328 index 6ae94f825f72..d5158a10ac8f 100644
2329 --- a/net/core/filter.c
2330 +++ b/net/core/filter.c
2331 @@ -457,6 +457,10 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
2332 convert_bpf_extensions(fp, &insn))
2333 break;
2334
2335 + if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
2336 + fp->code == (BPF_ALU | BPF_MOD | BPF_X))
2337 + *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
2338 +
2339 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
2340 break;
2341
2342 @@ -1053,11 +1057,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
2343 */
2344 goto out_err_free;
2345
2346 - /* We are guaranteed to never error here with cBPF to eBPF
2347 - * transitions, since there's no issue with type compatibility
2348 - * checks on program arrays.
2349 - */
2350 fp = bpf_prog_select_runtime(fp, &err);
2351 + if (err)
2352 + goto out_err_free;
2353
2354 kfree(old_prog);
2355 return fp;
2356 diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
2357 index 0a977373d003..f950b80c0dd1 100644
2358 --- a/net/core/flow_dissector.c
2359 +++ b/net/core/flow_dissector.c
2360 @@ -876,8 +876,8 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
2361 out_good:
2362 ret = true;
2363
2364 - key_control->thoff = (u16)nhoff;
2365 out:
2366 + key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
2367 key_basic->n_proto = proto;
2368 key_basic->ip_proto = ip_proto;
2369
2370 @@ -885,7 +885,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
2371
2372 out_bad:
2373 ret = false;
2374 - key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
2375 goto out;
2376 }
2377 EXPORT_SYMBOL(__skb_flow_dissect);
2378 diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2379 index 16a1a4c4eb57..741ae2554190 100644
2380 --- a/net/core/neighbour.c
2381 +++ b/net/core/neighbour.c
2382 @@ -532,7 +532,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
2383 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
2384 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
2385
2386 - hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
2387 + hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
2388
2389 if (n->parms->dead) {
2390 rc = ERR_PTR(-EINVAL);
2391 @@ -544,7 +544,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
2392 n1 != NULL;
2393 n1 = rcu_dereference_protected(n1->next,
2394 lockdep_is_held(&tbl->lock))) {
2395 - if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
2396 + if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
2397 if (want_ref)
2398 neigh_hold(n1);
2399 rc = n1;
2400 diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
2401 index cbc3dde4cfcc..a47ad6cd41c0 100644
2402 --- a/net/core/sysctl_net_core.c
2403 +++ b/net/core/sysctl_net_core.c
2404 @@ -325,7 +325,13 @@ static struct ctl_table net_core_table[] = {
2405 .data = &bpf_jit_enable,
2406 .maxlen = sizeof(int),
2407 .mode = 0644,
2408 +#ifndef CONFIG_BPF_JIT_ALWAYS_ON
2409 .proc_handler = proc_dointvec
2410 +#else
2411 + .proc_handler = proc_dointvec_minmax,
2412 + .extra1 = &one,
2413 + .extra2 = &one,
2414 +#endif
2415 },
2416 # ifdef CONFIG_HAVE_EBPF_JIT
2417 {
2418 diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
2419 index e1295d5f2c56..97791b0b1b51 100644
2420 --- a/net/dccp/ccids/ccid2.c
2421 +++ b/net/dccp/ccids/ccid2.c
2422 @@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
2423
2424 ccid2_pr_debug("RTO_EXPIRE\n");
2425
2426 + if (sk->sk_state == DCCP_CLOSED)
2427 + goto out;
2428 +
2429 /* back-off timer */
2430 hc->tx_rto <<= 1;
2431 if (hc->tx_rto > DCCP_RTO_MAX)
2432 diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
2433 index 7c45b8896709..a1d1f50e0e19 100644
2434 --- a/net/ipv4/arp.c
2435 +++ b/net/ipv4/arp.c
2436 @@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
2437
2438 static int arp_constructor(struct neighbour *neigh)
2439 {
2440 - __be32 addr = *(__be32 *)neigh->primary_key;
2441 + __be32 addr;
2442 struct net_device *dev = neigh->dev;
2443 struct in_device *in_dev;
2444 struct neigh_parms *parms;
2445 + u32 inaddr_any = INADDR_ANY;
2446
2447 + if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
2448 + memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
2449 +
2450 + addr = *(__be32 *)neigh->primary_key;
2451 rcu_read_lock();
2452 in_dev = __in_dev_get_rcu(dev);
2453 if (!in_dev) {
2454 diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
2455 index f8b918c766b0..56c49623bb9d 100644
2456 --- a/net/ipv4/esp4_offload.c
2457 +++ b/net/ipv4/esp4_offload.c
2458 @@ -121,6 +121,9 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
2459 if (!xo)
2460 goto out;
2461
2462 + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
2463 + goto out;
2464 +
2465 seq = xo->seq.low;
2466
2467 x = skb->sp->xvec[skb->sp->len - 1];
2468 diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
2469 index c621266e0306..013fed55b610 100644
2470 --- a/net/ipv4/igmp.c
2471 +++ b/net/ipv4/igmp.c
2472 @@ -332,7 +332,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
2473 return htonl(INADDR_ANY);
2474
2475 for_ifa(in_dev) {
2476 - if (inet_ifa_match(fl4->saddr, ifa))
2477 + if (fl4->saddr == ifa->ifa_local)
2478 return fl4->saddr;
2479 } endfor_ifa(in_dev);
2480
2481 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
2482 index 804bead564db..0ba88efca7ad 100644
2483 --- a/net/ipv4/route.c
2484 +++ b/net/ipv4/route.c
2485 @@ -2762,6 +2762,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2486 if (err == 0 && rt->dst.error)
2487 err = -rt->dst.error;
2488 } else {
2489 + fl4.flowi4_iif = LOOPBACK_IFINDEX;
2490 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
2491 err = 0;
2492 if (IS_ERR(rt))
2493 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2494 index a0c72b09cefc..2a65d806b562 100644
2495 --- a/net/ipv4/tcp.c
2496 +++ b/net/ipv4/tcp.c
2497 @@ -2273,6 +2273,9 @@ void tcp_close(struct sock *sk, long timeout)
2498 tcp_send_active_reset(sk, GFP_ATOMIC);
2499 __NET_INC_STATS(sock_net(sk),
2500 LINUX_MIB_TCPABORTONMEMORY);
2501 + } else if (!check_net(sock_net(sk))) {
2502 + /* Not possible to send reset; just close */
2503 + tcp_set_state(sk, TCP_CLOSE);
2504 }
2505 }
2506
2507 diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
2508 index b6a2aa1dcf56..4d58e2ce0b5b 100644
2509 --- a/net/ipv4/tcp_offload.c
2510 +++ b/net/ipv4/tcp_offload.c
2511 @@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
2512 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
2513 netdev_features_t features)
2514 {
2515 + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
2516 + return ERR_PTR(-EINVAL);
2517 +
2518 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
2519 return ERR_PTR(-EINVAL);
2520
2521 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
2522 index e9af1879cd53..14ac7df95380 100644
2523 --- a/net/ipv4/tcp_timer.c
2524 +++ b/net/ipv4/tcp_timer.c
2525 @@ -50,11 +50,19 @@ static void tcp_write_err(struct sock *sk)
2526 * to prevent DoS attacks. It is called when a retransmission timeout
2527 * or zero probe timeout occurs on orphaned socket.
2528 *
2529 + * Also close if our net namespace is exiting; in that case there is no
2530 + * hope of ever communicating again since all netns interfaces are already
2531 + * down (or about to be down), and we need to release our dst references,
2532 + * which have been moved to the netns loopback interface, so the namespace
2533 + * can finish exiting. This condition is only possible if we are a kernel
2534 + * socket, as those do not hold references to the namespace.
2535 + *
2536 * Criteria is still not confirmed experimentally and may change.
2537 * We kill the socket, if:
2538 * 1. If number of orphaned sockets exceeds an administratively configured
2539 * limit.
2540 * 2. If we have strong memory pressure.
2541 + * 3. If our net namespace is exiting.
2542 */
2543 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
2544 {
2545 @@ -83,6 +91,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
2546 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
2547 return 1;
2548 }
2549 +
2550 + if (!check_net(sock_net(sk))) {
2551 + /* Not possible to send reset; just close */
2552 + tcp_done(sk);
2553 + return 1;
2554 + }
2555 +
2556 return 0;
2557 }
2558
2559 diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
2560 index 01801b77bd0d..ea6e6e7df0ee 100644
2561 --- a/net/ipv4/udp_offload.c
2562 +++ b/net/ipv4/udp_offload.c
2563 @@ -203,6 +203,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
2564 goto out;
2565 }
2566
2567 + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
2568 + goto out;
2569 +
2570 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
2571 goto out;
2572
2573 diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
2574 index 333a478aa161..1ea9d794447e 100644
2575 --- a/net/ipv6/esp6_offload.c
2576 +++ b/net/ipv6/esp6_offload.c
2577 @@ -148,6 +148,9 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
2578 if (!xo)
2579 goto out;
2580
2581 + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
2582 + goto out;
2583 +
2584 seq = xo->seq.low;
2585
2586 x = skb->sp->xvec[skb->sp->len - 1];
2587 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
2588 index 7a2df6646486..5b4870caf268 100644
2589 --- a/net/ipv6/ip6_gre.c
2590 +++ b/net/ipv6/ip6_gre.c
2591 @@ -337,11 +337,12 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
2592
2593 nt->dev = dev;
2594 nt->net = dev_net(dev);
2595 - ip6gre_tnl_link_config(nt, 1);
2596
2597 if (register_netdevice(dev) < 0)
2598 goto failed_free;
2599
2600 + ip6gre_tnl_link_config(nt, 1);
2601 +
2602 /* Can use a lockless transmit, unless we generate output sequences */
2603 if (!(nt->parms.o_flags & TUNNEL_SEQ))
2604 dev->features |= NETIF_F_LLTX;
2605 @@ -1307,7 +1308,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
2606
2607 static int ip6gre_tap_init(struct net_device *dev)
2608 {
2609 - struct ip6_tnl *tunnel;
2610 int ret;
2611
2612 ret = ip6gre_tunnel_init_common(dev);
2613 @@ -1316,10 +1316,6 @@ static int ip6gre_tap_init(struct net_device *dev)
2614
2615 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2616
2617 - tunnel = netdev_priv(dev);
2618 -
2619 - ip6gre_tnl_link_config(tunnel, 1);
2620 -
2621 return 0;
2622 }
2623
2624 @@ -1411,12 +1407,16 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
2625
2626 nt->dev = dev;
2627 nt->net = dev_net(dev);
2628 - ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
2629
2630 err = register_netdevice(dev);
2631 if (err)
2632 goto out;
2633
2634 + ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
2635 +
2636 + if (tb[IFLA_MTU])
2637 + ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2638 +
2639 dev_hold(dev);
2640 ip6gre_tunnel_link(ign, nt);
2641
2642 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2643 index 688ba5f7516b..3763dc01e374 100644
2644 --- a/net/ipv6/ip6_output.c
2645 +++ b/net/ipv6/ip6_output.c
2646 @@ -166,7 +166,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2647 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
2648 }
2649
2650 -static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
2651 +bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
2652 {
2653 if (!np->autoflowlabel_set)
2654 return ip6_default_np_autolabel(net);
2655 @@ -1206,14 +1206,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
2656 v6_cork->tclass = ipc6->tclass;
2657 if (rt->dst.flags & DST_XFRM_TUNNEL)
2658 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
2659 - rt->dst.dev->mtu : dst_mtu(&rt->dst);
2660 + READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
2661 else
2662 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
2663 - rt->dst.dev->mtu : dst_mtu(rt->dst.path);
2664 + READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
2665 if (np->frag_size < mtu) {
2666 if (np->frag_size)
2667 mtu = np->frag_size;
2668 }
2669 + if (mtu < IPV6_MIN_MTU)
2670 + return -EINVAL;
2671 cork->base.fragsize = mtu;
2672 if (dst_allfrag(rt->dst.path))
2673 cork->base.flags |= IPCORK_ALLFRAG;
2674 @@ -1733,6 +1735,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
2675 cork.base.flags = 0;
2676 cork.base.addr = 0;
2677 cork.base.opt = NULL;
2678 + cork.base.dst = NULL;
2679 v6_cork.opt = NULL;
2680 err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
2681 if (err) {
2682 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
2683 index 90dbfa78a390..3b251760cb8c 100644
2684 --- a/net/ipv6/ipv6_sockglue.c
2685 +++ b/net/ipv6/ipv6_sockglue.c
2686 @@ -1324,7 +1324,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
2687 break;
2688
2689 case IPV6_AUTOFLOWLABEL:
2690 - val = np->autoflowlabel;
2691 + val = ip6_autoflowlabel(sock_net(sk), np);
2692 break;
2693
2694 case IPV6_RECVFRAGSIZE:
2695 diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
2696 index d883c9204c01..278e49cd67d4 100644
2697 --- a/net/ipv6/tcpv6_offload.c
2698 +++ b/net/ipv6/tcpv6_offload.c
2699 @@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
2700 {
2701 struct tcphdr *th;
2702
2703 + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
2704 + return ERR_PTR(-EINVAL);
2705 +
2706 if (!pskb_may_pull(skb, sizeof(*th)))
2707 return ERR_PTR(-EINVAL);
2708
2709 diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
2710 index a0f89ad76f9d..2a04dc9c781b 100644
2711 --- a/net/ipv6/udp_offload.c
2712 +++ b/net/ipv6/udp_offload.c
2713 @@ -42,6 +42,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
2714 const struct ipv6hdr *ipv6h;
2715 struct udphdr *uh;
2716
2717 + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
2718 + goto out;
2719 +
2720 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
2721 goto out;
2722
2723 diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
2724 index 41628b393673..d33ce6d5ebce 100644
2725 --- a/net/netfilter/nfnetlink_cthelper.c
2726 +++ b/net/netfilter/nfnetlink_cthelper.c
2727 @@ -17,6 +17,7 @@
2728 #include <linux/types.h>
2729 #include <linux/list.h>
2730 #include <linux/errno.h>
2731 +#include <linux/capability.h>
2732 #include <net/netlink.h>
2733 #include <net/sock.h>
2734
2735 @@ -407,6 +408,9 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
2736 struct nfnl_cthelper *nlcth;
2737 int ret = 0;
2738
2739 + if (!capable(CAP_NET_ADMIN))
2740 + return -EPERM;
2741 +
2742 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
2743 return -EINVAL;
2744
2745 @@ -611,6 +615,9 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
2746 struct nfnl_cthelper *nlcth;
2747 bool tuple_set = false;
2748
2749 + if (!capable(CAP_NET_ADMIN))
2750 + return -EPERM;
2751 +
2752 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2753 struct netlink_dump_control c = {
2754 .dump = nfnl_cthelper_dump_table,
2755 @@ -678,6 +685,9 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
2756 struct nfnl_cthelper *nlcth, *n;
2757 int j = 0, ret;
2758
2759 + if (!capable(CAP_NET_ADMIN))
2760 + return -EPERM;
2761 +
2762 if (tb[NFCTH_NAME])
2763 helper_name = nla_data(tb[NFCTH_NAME]);
2764
2765 diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
2766 index 36e14b1f061d..a34f314a8c23 100644
2767 --- a/net/netfilter/xt_osf.c
2768 +++ b/net/netfilter/xt_osf.c
2769 @@ -19,6 +19,7 @@
2770 #include <linux/module.h>
2771 #include <linux/kernel.h>
2772
2773 +#include <linux/capability.h>
2774 #include <linux/if.h>
2775 #include <linux/inetdevice.h>
2776 #include <linux/ip.h>
2777 @@ -70,6 +71,9 @@ static int xt_osf_add_callback(struct net *net, struct sock *ctnl,
2778 struct xt_osf_finger *kf = NULL, *sf;
2779 int err = 0;
2780
2781 + if (!capable(CAP_NET_ADMIN))
2782 + return -EPERM;
2783 +
2784 if (!osf_attrs[OSF_ATTR_FINGER])
2785 return -EINVAL;
2786
2787 @@ -115,6 +119,9 @@ static int xt_osf_remove_callback(struct net *net, struct sock *ctnl,
2788 struct xt_osf_finger *sf;
2789 int err = -ENOENT;
2790
2791 + if (!capable(CAP_NET_ADMIN))
2792 + return -EPERM;
2793 +
2794 if (!osf_attrs[OSF_ATTR_FINGER])
2795 return -EINVAL;
2796
2797 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2798 index aac9d68b4636..533fd0503ba0 100644
2799 --- a/net/netlink/af_netlink.c
2800 +++ b/net/netlink/af_netlink.c
2801 @@ -2393,13 +2393,14 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2802 struct nlmsghdr *,
2803 struct netlink_ext_ack *))
2804 {
2805 - struct netlink_ext_ack extack = {};
2806 + struct netlink_ext_ack extack;
2807 struct nlmsghdr *nlh;
2808 int err;
2809
2810 while (skb->len >= nlmsg_total_size(0)) {
2811 int msglen;
2812
2813 + memset(&extack, 0, sizeof(extack));
2814 nlh = nlmsg_hdr(skb);
2815 err = 0;
2816
2817 diff --git a/net/sctp/offload.c b/net/sctp/offload.c
2818 index 275925b93b29..35bc7106d182 100644
2819 --- a/net/sctp/offload.c
2820 +++ b/net/sctp/offload.c
2821 @@ -45,6 +45,9 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
2822 struct sk_buff *segs = ERR_PTR(-EINVAL);
2823 struct sctphdr *sh;
2824
2825 + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP))
2826 + goto out;
2827 +
2828 sh = sctp_hdr(skb);
2829 if (!pskb_may_pull(skb, sizeof(*sh)))
2830 goto out;
2831 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2832 index df806b8819aa..1c08d86efe94 100644
2833 --- a/net/sctp/socket.c
2834 +++ b/net/sctp/socket.c
2835 @@ -84,7 +84,7 @@
2836 static int sctp_writeable(struct sock *sk);
2837 static void sctp_wfree(struct sk_buff *skb);
2838 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
2839 - size_t msg_len, struct sock **orig_sk);
2840 + size_t msg_len);
2841 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
2842 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
2843 static int sctp_wait_for_accept(struct sock *sk, long timeo);
2844 @@ -334,16 +334,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
2845 if (len < sizeof (struct sockaddr))
2846 return NULL;
2847
2848 + if (!opt->pf->af_supported(addr->sa.sa_family, opt))
2849 + return NULL;
2850 +
2851 /* V4 mapped address are really of AF_INET family */
2852 if (addr->sa.sa_family == AF_INET6 &&
2853 - ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
2854 - if (!opt->pf->af_supported(AF_INET, opt))
2855 - return NULL;
2856 - } else {
2857 - /* Does this PF support this AF? */
2858 - if (!opt->pf->af_supported(addr->sa.sa_family, opt))
2859 - return NULL;
2860 - }
2861 + ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
2862 + !opt->pf->af_supported(AF_INET, opt))
2863 + return NULL;
2864
2865 /* If we get this far, af is valid. */
2866 af = sctp_get_af_specific(addr->sa.sa_family);
2867 @@ -1882,8 +1880,14 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
2868 */
2869 if (sinit) {
2870 if (sinit->sinit_num_ostreams) {
2871 - asoc->c.sinit_num_ostreams =
2872 - sinit->sinit_num_ostreams;
2873 + __u16 outcnt = sinit->sinit_num_ostreams;
2874 +
2875 + asoc->c.sinit_num_ostreams = outcnt;
2876 + /* outcnt has been changed, so re-init stream */
2877 + err = sctp_stream_init(&asoc->stream, outcnt, 0,
2878 + GFP_KERNEL);
2879 + if (err)
2880 + goto out_free;
2881 }
2882 if (sinit->sinit_max_instreams) {
2883 asoc->c.sinit_max_instreams =
2884 @@ -1963,7 +1967,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
2885 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2886 if (!sctp_wspace(asoc)) {
2887 /* sk can be changed by peel off when waiting for buf. */
2888 - err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
2889 + err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
2890 if (err) {
2891 if (err == -ESRCH) {
2892 /* asoc is already dead. */
2893 @@ -7827,12 +7831,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
2894
2895 /* Helper function to wait for space in the sndbuf. */
2896 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
2897 - size_t msg_len, struct sock **orig_sk)
2898 + size_t msg_len)
2899 {
2900 struct sock *sk = asoc->base.sk;
2901 - int err = 0;
2902 long current_timeo = *timeo_p;
2903 DEFINE_WAIT(wait);
2904 + int err = 0;
2905
2906 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
2907 *timeo_p, msg_len);
2908 @@ -7861,17 +7865,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
2909 release_sock(sk);
2910 current_timeo = schedule_timeout(current_timeo);
2911 lock_sock(sk);
2912 - if (sk != asoc->base.sk) {
2913 - release_sock(sk);
2914 - sk = asoc->base.sk;
2915 - lock_sock(sk);
2916 - }
2917 + if (sk != asoc->base.sk)
2918 + goto do_error;
2919
2920 *timeo_p = current_timeo;
2921 }
2922
2923 out:
2924 - *orig_sk = sk;
2925 finish_wait(&asoc->wait, &wait);
2926
2927 /* Release the association's refcnt. */
2928 diff --git a/net/socket.c b/net/socket.c
2929 index c729625eb5d3..d894c7c5fa54 100644
2930 --- a/net/socket.c
2931 +++ b/net/socket.c
2932 @@ -2642,6 +2642,15 @@ static int __init sock_init(void)
2933
2934 core_initcall(sock_init); /* early initcall */
2935
2936 +static int __init jit_init(void)
2937 +{
2938 +#ifdef CONFIG_BPF_JIT_ALWAYS_ON
2939 + bpf_jit_enable = 1;
2940 +#endif
2941 + return 0;
2942 +}
2943 +pure_initcall(jit_init);
2944 +
2945 #ifdef CONFIG_PROC_FS
2946 void socket_seq_show(struct seq_file *seq)
2947 {
2948 diff --git a/net/tipc/node.c b/net/tipc/node.c
2949 index 198dbc7adbe1..f6c5743c170e 100644
2950 --- a/net/tipc/node.c
2951 +++ b/net/tipc/node.c
2952 @@ -1848,36 +1848,38 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2953
2954 if (strcmp(name, tipc_bclink_name) == 0) {
2955 err = tipc_nl_add_bc_link(net, &msg);
2956 - if (err) {
2957 - nlmsg_free(msg.skb);
2958 - return err;
2959 - }
2960 + if (err)
2961 + goto err_free;
2962 } else {
2963 int bearer_id;
2964 struct tipc_node *node;
2965 struct tipc_link *link;
2966
2967 node = tipc_node_find_by_name(net, name, &bearer_id);
2968 - if (!node)
2969 - return -EINVAL;
2970 + if (!node) {
2971 + err = -EINVAL;
2972 + goto err_free;
2973 + }
2974
2975 tipc_node_read_lock(node);
2976 link = node->links[bearer_id].link;
2977 if (!link) {
2978 tipc_node_read_unlock(node);
2979 - nlmsg_free(msg.skb);
2980 - return -EINVAL;
2981 + err = -EINVAL;
2982 + goto err_free;
2983 }
2984
2985 err = __tipc_nl_add_link(net, &msg, link, 0);
2986 tipc_node_read_unlock(node);
2987 - if (err) {
2988 - nlmsg_free(msg.skb);
2989 - return err;
2990 - }
2991 + if (err)
2992 + goto err_free;
2993 }
2994
2995 return genlmsg_reply(msg.skb, info);
2996 +
2997 +err_free:
2998 + nlmsg_free(msg.skb);
2999 + return err;
3000 }
3001
3002 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
3003 diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
3004 index 60aff60e30ad..282361ac0263 100644
3005 --- a/net/tls/tls_main.c
3006 +++ b/net/tls/tls_main.c
3007 @@ -364,14 +364,16 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
3008 crypto_info = &ctx->crypto_send;
3009
3010 /* Currently we don't support set crypto info more than one time */
3011 - if (TLS_CRYPTO_INFO_READY(crypto_info))
3012 + if (TLS_CRYPTO_INFO_READY(crypto_info)) {
3013 + rc = -EBUSY;
3014 goto out;
3015 + }
3016
3017 switch (tmp_crypto_info.cipher_type) {
3018 case TLS_CIPHER_AES_GCM_128: {
3019 if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
3020 rc = -EINVAL;
3021 - goto out;
3022 + goto err_crypto_info;
3023 }
3024 rc = copy_from_user(
3025 crypto_info,
3026 @@ -386,7 +388,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
3027 }
3028 default:
3029 rc = -EINVAL;
3030 - goto out;
3031 + goto err_crypto_info;
3032 }
3033
3034 ctx->sk_write_space = sk->sk_write_space;
3035 @@ -444,6 +446,15 @@ static int tls_init(struct sock *sk)
3036 struct tls_context *ctx;
3037 int rc = 0;
3038
3039 + /* The TLS ulp is currently supported only for TCP sockets
3040 + * in ESTABLISHED state.
3041 + * Supporting sockets in LISTEN state will require us
3042 + * to modify the accept implementation to clone rather then
3043 + * share the ulp context.
3044 + */
3045 + if (sk->sk_state != TCP_ESTABLISHED)
3046 + return -ENOTSUPP;
3047 +
3048 /* allocate tls context */
3049 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3050 if (!ctx) {
3051 diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
3052 index f00383a37622..83f886d7c1f8 100644
3053 --- a/net/tls/tls_sw.c
3054 +++ b/net/tls/tls_sw.c
3055 @@ -407,7 +407,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
3056
3057 while (msg_data_left(msg)) {
3058 if (sk->sk_err) {
3059 - ret = sk->sk_err;
3060 + ret = -sk->sk_err;
3061 goto send_end;
3062 }
3063
3064 @@ -560,7 +560,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
3065 size_t copy, required_size;
3066
3067 if (sk->sk_err) {
3068 - ret = sk->sk_err;
3069 + ret = -sk->sk_err;
3070 goto sendpage_end;
3071 }
3072
3073 @@ -697,18 +697,17 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
3074 }
3075 default:
3076 rc = -EINVAL;
3077 - goto out;
3078 + goto free_priv;
3079 }
3080
3081 ctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
3082 ctx->tag_size = tag_size;
3083 ctx->overhead_size = ctx->prepend_size + ctx->tag_size;
3084 ctx->iv_size = iv_size;
3085 - ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
3086 - GFP_KERNEL);
3087 + ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL);
3088 if (!ctx->iv) {
3089 rc = -ENOMEM;
3090 - goto out;
3091 + goto free_priv;
3092 }
3093 memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
3094 memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
3095 @@ -756,7 +755,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
3096
3097 rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size);
3098 if (!rc)
3099 - goto out;
3100 + return 0;
3101
3102 free_aead:
3103 crypto_free_aead(sw_ctx->aead_send);
3104 @@ -767,6 +766,9 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
3105 free_iv:
3106 kfree(ctx->iv);
3107 ctx->iv = NULL;
3108 +free_priv:
3109 + kfree(ctx->priv_ctx);
3110 + ctx->priv_ctx = NULL;
3111 out:
3112 return rc;
3113 }
3114 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
3115 index 6bc16bb61b55..688ed34f0671 100644
3116 --- a/net/xfrm/xfrm_policy.c
3117 +++ b/net/xfrm/xfrm_policy.c
3118 @@ -2056,8 +2056,11 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
3119 if (num_xfrms <= 0)
3120 goto make_dummy_bundle;
3121
3122 + local_bh_disable();
3123 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
3124 - xflo->dst_orig);
3125 + xflo->dst_orig);
3126 + local_bh_enable();
3127 +
3128 if (IS_ERR(xdst)) {
3129 err = PTR_ERR(xdst);
3130 if (err != -EAGAIN)
3131 @@ -2144,9 +2147,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3132 goto no_transform;
3133 }
3134
3135 + local_bh_disable();
3136 xdst = xfrm_resolve_and_create_bundle(
3137 pols, num_pols, fl,
3138 family, dst_orig);
3139 + local_bh_enable();
3140 +
3141 if (IS_ERR(xdst)) {
3142 xfrm_pols_put(pols, num_pols);
3143 err = PTR_ERR(xdst);
3144 diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
3145 index 1241487de93f..16299939d3ff 100644
3146 --- a/tools/testing/selftests/bpf/test_verifier.c
3147 +++ b/tools/testing/selftests/bpf/test_verifier.c
3148 @@ -2595,6 +2595,29 @@ static struct bpf_test tests[] = {
3149 .result = ACCEPT,
3150 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3151 },
3152 + {
3153 + "context stores via ST",
3154 + .insns = {
3155 + BPF_MOV64_IMM(BPF_REG_0, 0),
3156 + BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3157 + BPF_EXIT_INSN(),
3158 + },
3159 + .errstr = "BPF_ST stores into R1 context is not allowed",
3160 + .result = REJECT,
3161 + .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3162 + },
3163 + {
3164 + "context stores via XADD",
3165 + .insns = {
3166 + BPF_MOV64_IMM(BPF_REG_0, 0),
3167 + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3168 + BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3169 + BPF_EXIT_INSN(),
3170 + },
3171 + .errstr = "BPF_XADD stores into R1 context is not allowed",
3172 + .result = REJECT,
3173 + .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3174 + },
3175 {
3176 "direct packet access: test1",
3177 .insns = {
3178 @@ -4317,7 +4340,8 @@ static struct bpf_test tests[] = {
3179 .fixup_map1 = { 2 },
3180 .errstr_unpriv = "R2 leaks addr into mem",
3181 .result_unpriv = REJECT,
3182 - .result = ACCEPT,
3183 + .result = REJECT,
3184 + .errstr = "BPF_XADD stores into R1 context is not allowed",
3185 },
3186 {
3187 "leak pointer into ctx 2",
3188 @@ -4331,7 +4355,8 @@ static struct bpf_test tests[] = {
3189 },
3190 .errstr_unpriv = "R10 leaks addr into mem",
3191 .result_unpriv = REJECT,
3192 - .result = ACCEPT,
3193 + .result = REJECT,
3194 + .errstr = "BPF_XADD stores into R1 context is not allowed",
3195 },
3196 {
3197 "leak pointer into ctx 3",