Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0167-4.19.68-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3464 - (show annotations) (download)
Tue Oct 29 10:31:25 2019 UTC (4 years, 6 months ago) by niro
File size: 89559 byte(s)
-linux-4.19.68
1 diff --git a/Makefile b/Makefile
2 index b6aa6e8d4411..6f164b04d953 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 19
9 -SUBLEVEL = 67
10 +SUBLEVEL = 68
11 EXTRAVERSION =
12 NAME = "People's Front"
13
14 diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
15 index 7ed320895d1f..f52a2968a3b6 100644
16 --- a/arch/arm64/include/asm/efi.h
17 +++ b/arch/arm64/include/asm/efi.h
18 @@ -94,7 +94,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
19 ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
20
21 #define alloc_screen_info(x...) &screen_info
22 -#define free_screen_info(x...)
23 +
24 +static inline void free_screen_info(efi_system_table_t *sys_table_arg,
25 + struct screen_info *si)
26 +{
27 +}
28
29 /* redeclare as 'hidden' so the compiler will generate relative references */
30 extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
31 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
32 index ea423db39364..2214a403f39b 100644
33 --- a/arch/arm64/include/asm/pgtable.h
34 +++ b/arch/arm64/include/asm/pgtable.h
35 @@ -419,8 +419,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
36 PMD_TYPE_SECT)
37
38 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
39 -#define pud_sect(pud) (0)
40 -#define pud_table(pud) (1)
41 +static inline bool pud_sect(pud_t pud) { return false; }
42 +static inline bool pud_table(pud_t pud) { return true; }
43 #else
44 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
45 PUD_TYPE_SECT)
46 diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
47 index 57e962290df3..7eff8afa035f 100644
48 --- a/arch/arm64/kernel/ftrace.c
49 +++ b/arch/arm64/kernel/ftrace.c
50 @@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
51
52 if (offset < -SZ_128M || offset >= SZ_128M) {
53 #ifdef CONFIG_ARM64_MODULE_PLTS
54 - struct plt_entry trampoline;
55 + struct plt_entry trampoline, *dst;
56 struct module *mod;
57
58 /*
59 @@ -104,24 +104,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
60 * is added in the future, but for now, the pr_err() below
61 * deals with a theoretical issue only.
62 */
63 + dst = mod->arch.ftrace_trampoline;
64 trampoline = get_plt_entry(addr);
65 - if (!plt_entries_equal(mod->arch.ftrace_trampoline,
66 - &trampoline)) {
67 - if (!plt_entries_equal(mod->arch.ftrace_trampoline,
68 - &(struct plt_entry){})) {
69 + if (!plt_entries_equal(dst, &trampoline)) {
70 + if (!plt_entries_equal(dst, &(struct plt_entry){})) {
71 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
72 return -EINVAL;
73 }
74
75 /* point the trampoline to our ftrace entry point */
76 module_disable_ro(mod);
77 - *mod->arch.ftrace_trampoline = trampoline;
78 + *dst = trampoline;
79 module_enable_ro(mod, true);
80
81 - /* update trampoline before patching in the branch */
82 - smp_wmb();
83 + /*
84 + * Ensure updated trampoline is visible to instruction
85 + * fetch before we patch in the branch.
86 + */
87 + __flush_icache_range((unsigned long)&dst[0],
88 + (unsigned long)&dst[1]);
89 }
90 - addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
91 + addr = (unsigned long)dst;
92 #else /* CONFIG_ARM64_MODULE_PLTS */
93 return -EINVAL;
94 #endif /* CONFIG_ARM64_MODULE_PLTS */
95 diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
96 index 933adbc0f654..0311fe52c8ff 100644
97 --- a/arch/arm64/kernel/return_address.c
98 +++ b/arch/arm64/kernel/return_address.c
99 @@ -11,6 +11,7 @@
100
101 #include <linux/export.h>
102 #include <linux/ftrace.h>
103 +#include <linux/kprobes.h>
104
105 #include <asm/stack_pointer.h>
106 #include <asm/stacktrace.h>
107 @@ -32,6 +33,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
108 return 0;
109 }
110 }
111 +NOKPROBE_SYMBOL(save_return_addr);
112
113 void *return_address(unsigned int level)
114 {
115 @@ -55,3 +57,4 @@ void *return_address(unsigned int level)
116 return NULL;
117 }
118 EXPORT_SYMBOL_GPL(return_address);
119 +NOKPROBE_SYMBOL(return_address);
120 diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
121 index 4989f7ea1e59..bb482ec044b6 100644
122 --- a/arch/arm64/kernel/stacktrace.c
123 +++ b/arch/arm64/kernel/stacktrace.c
124 @@ -18,6 +18,7 @@
125 #include <linux/kernel.h>
126 #include <linux/export.h>
127 #include <linux/ftrace.h>
128 +#include <linux/kprobes.h>
129 #include <linux/sched.h>
130 #include <linux/sched/debug.h>
131 #include <linux/sched/task_stack.h>
132 @@ -85,6 +86,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
133
134 return 0;
135 }
136 +NOKPROBE_SYMBOL(unwind_frame);
137
138 void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
139 int (*fn)(struct stackframe *, void *), void *data)
140 @@ -99,6 +101,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
141 break;
142 }
143 }
144 +NOKPROBE_SYMBOL(walk_stackframe);
145
146 #ifdef CONFIG_STACKTRACE
147 struct stack_trace_data {
148 diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
149 index 7a5173ea2276..4c2e96ef306e 100644
150 --- a/arch/arm64/kvm/regmap.c
151 +++ b/arch/arm64/kvm/regmap.c
152 @@ -189,13 +189,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
153 switch (spsr_idx) {
154 case KVM_SPSR_SVC:
155 write_sysreg_el1(v, spsr);
156 + break;
157 case KVM_SPSR_ABT:
158 write_sysreg(v, spsr_abt);
159 + break;
160 case KVM_SPSR_UND:
161 write_sysreg(v, spsr_und);
162 + break;
163 case KVM_SPSR_IRQ:
164 write_sysreg(v, spsr_irq);
165 + break;
166 case KVM_SPSR_FIQ:
167 write_sysreg(v, spsr_fiq);
168 + break;
169 }
170 }
171 diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
172 index dd6b05bff75b..d911a8c2314d 100644
173 --- a/arch/riscv/include/asm/switch_to.h
174 +++ b/arch/riscv/include/asm/switch_to.h
175 @@ -23,7 +23,7 @@ extern void __fstate_restore(struct task_struct *restore_from);
176
177 static inline void __fstate_clean(struct pt_regs *regs)
178 {
179 - regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
180 + regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
181 }
182
183 static inline void fstate_save(struct task_struct *task,
184 diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
185 index d9ff3b42da7c..2569ffc061f9 100644
186 --- a/arch/sh/kernel/hw_breakpoint.c
187 +++ b/arch/sh/kernel/hw_breakpoint.c
188 @@ -160,6 +160,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
189 switch (sh_type) {
190 case SH_BREAKPOINT_READ:
191 *gen_type = HW_BREAKPOINT_R;
192 + break;
193 case SH_BREAKPOINT_WRITE:
194 *gen_type = HW_BREAKPOINT_W;
195 break;
196 diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
197 index a285fbd0fd9b..15580e4fc766 100644
198 --- a/arch/xtensa/kernel/setup.c
199 +++ b/arch/xtensa/kernel/setup.c
200 @@ -515,6 +515,7 @@ void cpu_reset(void)
201 "add %2, %2, %7\n\t"
202 "addi %0, %0, -1\n\t"
203 "bnez %0, 1b\n\t"
204 + "isync\n\t"
205 /* Jump to identity mapping */
206 "jx %3\n"
207 "2:\n\t"
208 diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
209 index c92c10d55374..5bece9752ed6 100644
210 --- a/drivers/ata/libahci_platform.c
211 +++ b/drivers/ata/libahci_platform.c
212 @@ -313,6 +313,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
213 hpriv->phys[port] = NULL;
214 rc = 0;
215 break;
216 + case -EPROBE_DEFER:
217 + /* Do not complain yet */
218 + break;
219
220 default:
221 dev_err(dev,
222 diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
223 index 173e6f2dd9af..eefda51f97d3 100644
224 --- a/drivers/ata/libata-zpodd.c
225 +++ b/drivers/ata/libata-zpodd.c
226 @@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
227 unsigned int ret;
228 struct rm_feature_desc *desc;
229 struct ata_taskfile tf;
230 - static const char cdb[] = { GPCMD_GET_CONFIGURATION,
231 + static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
232 2, /* only 1 feature descriptor requested */
233 0, 3, /* 3, removable medium feature */
234 0, 0, 0,/* reserved */
235 diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
236 index 33481368740e..113152425a95 100644
237 --- a/drivers/clk/at91/clk-generated.c
238 +++ b/drivers/clk/at91/clk-generated.c
239 @@ -153,6 +153,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
240 continue;
241
242 div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
243 + if (div > GENERATED_MAX_DIV + 1)
244 + div = GENERATED_MAX_DIV + 1;
245
246 clk_generated_best_diff(req, parent, parent_rate, div,
247 &best_diff, &best_rate);
248 diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
249 index f4b013e9352d..24485bee9b49 100644
250 --- a/drivers/clk/renesas/renesas-cpg-mssr.c
251 +++ b/drivers/clk/renesas/renesas-cpg-mssr.c
252 @@ -535,17 +535,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
253 unsigned int reg = id / 32;
254 unsigned int bit = id % 32;
255 u32 bitmask = BIT(bit);
256 - unsigned long flags;
257 - u32 value;
258
259 dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
260
261 /* Reset module */
262 - spin_lock_irqsave(&priv->rmw_lock, flags);
263 - value = readl(priv->base + SRCR(reg));
264 - value |= bitmask;
265 - writel(value, priv->base + SRCR(reg));
266 - spin_unlock_irqrestore(&priv->rmw_lock, flags);
267 + writel(bitmask, priv->base + SRCR(reg));
268
269 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
270 udelay(35);
271 @@ -562,16 +556,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
272 unsigned int reg = id / 32;
273 unsigned int bit = id % 32;
274 u32 bitmask = BIT(bit);
275 - unsigned long flags;
276 - u32 value;
277
278 dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
279
280 - spin_lock_irqsave(&priv->rmw_lock, flags);
281 - value = readl(priv->base + SRCR(reg));
282 - value |= bitmask;
283 - writel(value, priv->base + SRCR(reg));
284 - spin_unlock_irqrestore(&priv->rmw_lock, flags);
285 + writel(bitmask, priv->base + SRCR(reg));
286 return 0;
287 }
288
289 diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
290 index 87892471eb96..bad8099832d4 100644
291 --- a/drivers/clk/sprd/Kconfig
292 +++ b/drivers/clk/sprd/Kconfig
293 @@ -2,6 +2,7 @@ config SPRD_COMMON_CLK
294 tristate "Clock support for Spreadtrum SoCs"
295 depends on ARCH_SPRD || COMPILE_TEST
296 default ARCH_SPRD
297 + select REGMAP_MMIO
298
299 if SPRD_COMMON_CLK
300
301 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
302 index f5fb93795a69..65cecfdd9b45 100644
303 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
304 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
305 @@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
306 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
307 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
308
309 - data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
310 + data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
311 if (!data)
312 return -ENOMEM;
313
314 diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
315 index bf6cad6c9178..7a3e5a8f6439 100644
316 --- a/drivers/gpu/drm/bridge/Kconfig
317 +++ b/drivers/gpu/drm/bridge/Kconfig
318 @@ -46,6 +46,7 @@ config DRM_DUMB_VGA_DAC
319 config DRM_LVDS_ENCODER
320 tristate "Transparent parallel to LVDS encoder support"
321 depends on OF
322 + select DRM_KMS_HELPER
323 select DRM_PANEL_BRIDGE
324 help
325 Support for transparent parallel to LVDS encoders that don't require
326 diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
327 index 0ddb6eec7b11..df228436a03d 100644
328 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
329 +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
330 @@ -108,12 +108,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
331 scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
332 do {
333 cpu_relax();
334 - } while (retry > 1 &&
335 + } while (--retry > 1 &&
336 scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
337 do {
338 cpu_relax();
339 scaler_write(1, SCALER_INT_EN);
340 - } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
341 + } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
342
343 return retry ? 0 : -EIO;
344 }
345 diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
346 index ed9a3a1e50ef..dbfd2c006f74 100644
347 --- a/drivers/gpu/drm/msm/msm_drv.c
348 +++ b/drivers/gpu/drm/msm/msm_drv.c
349 @@ -1284,7 +1284,8 @@ static int add_gpu_components(struct device *dev,
350 if (!np)
351 return 0;
352
353 - drm_of_component_match_add(dev, matchptr, compare_of, np);
354 + if (of_device_is_available(np))
355 + drm_of_component_match_add(dev, matchptr, compare_of, np);
356
357 of_node_put(np);
358
359 diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
360 index 6e1a4a4fc0c1..ab9da597106f 100644
361 --- a/drivers/hid/hid-holtek-kbd.c
362 +++ b/drivers/hid/hid-holtek-kbd.c
363 @@ -126,9 +126,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
364
365 /* Locate the boot interface, to receive the LED change events */
366 struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
367 + struct hid_device *boot_hid;
368 + struct hid_input *boot_hid_input;
369
370 - struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
371 - struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
372 + if (unlikely(boot_interface == NULL))
373 + return -ENODEV;
374 +
375 + boot_hid = usb_get_intfdata(boot_interface);
376 + boot_hid_input = list_first_entry(&boot_hid->inputs,
377 struct hid_input, list);
378
379 return boot_hid_input->input->event(boot_hid_input->input, type, code,
380 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
381 index a746017fac17..5a949ca42b1d 100644
382 --- a/drivers/hid/usbhid/hiddev.c
383 +++ b/drivers/hid/usbhid/hiddev.c
384 @@ -297,6 +297,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
385 spin_unlock_irq(&list->hiddev->list_lock);
386
387 mutex_lock(&hiddev->existancelock);
388 + /*
389 + * recheck exist with existance lock held to
390 + * avoid opening a disconnected device
391 + */
392 + if (!list->hiddev->exist) {
393 + res = -ENODEV;
394 + goto bail_unlock;
395 + }
396 if (!list->hiddev->open++)
397 if (list->hiddev->exist) {
398 struct hid_device *hid = hiddev->hid;
399 @@ -313,6 +321,10 @@ bail_normal_power:
400 hid_hw_power(hid, PM_HINT_NORMAL);
401 bail_unlock:
402 mutex_unlock(&hiddev->existancelock);
403 +
404 + spin_lock_irq(&list->hiddev->list_lock);
405 + list_del(&list->node);
406 + spin_unlock_irq(&list->hiddev->list_lock);
407 bail:
408 file->private_data = NULL;
409 vfree(list);
410 diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
411 index ce9af43fa2de..49c1956e6a67 100644
412 --- a/drivers/iio/adc/max9611.c
413 +++ b/drivers/iio/adc/max9611.c
414 @@ -483,7 +483,7 @@ static int max9611_init(struct max9611_dev *max9611)
415 if (ret)
416 return ret;
417
418 - regval = ret & MAX9611_TEMP_MASK;
419 + regval &= MAX9611_TEMP_MASK;
420
421 if ((regval > MAX9611_TEMP_MAX_POS &&
422 regval < MAX9611_TEMP_MIN_NEG) ||
423 diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
424 index ef459f2f2eeb..7586c1dd73f1 100644
425 --- a/drivers/infiniband/core/mad.c
426 +++ b/drivers/infiniband/core/mad.c
427 @@ -3182,18 +3182,18 @@ static int ib_mad_port_open(struct ib_device *device,
428 if (has_smi)
429 cq_size *= 2;
430
431 + port_priv->pd = ib_alloc_pd(device, 0);
432 + if (IS_ERR(port_priv->pd)) {
433 + dev_err(&device->dev, "Couldn't create ib_mad PD\n");
434 + ret = PTR_ERR(port_priv->pd);
435 + goto error3;
436 + }
437 +
438 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
439 IB_POLL_WORKQUEUE);
440 if (IS_ERR(port_priv->cq)) {
441 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
442 ret = PTR_ERR(port_priv->cq);
443 - goto error3;
444 - }
445 -
446 - port_priv->pd = ib_alloc_pd(device, 0);
447 - if (IS_ERR(port_priv->pd)) {
448 - dev_err(&device->dev, "Couldn't create ib_mad PD\n");
449 - ret = PTR_ERR(port_priv->pd);
450 goto error4;
451 }
452
453 @@ -3236,11 +3236,11 @@ error8:
454 error7:
455 destroy_mad_qp(&port_priv->qp_info[0]);
456 error6:
457 - ib_dealloc_pd(port_priv->pd);
458 -error4:
459 ib_free_cq(port_priv->cq);
460 cleanup_recv_queue(&port_priv->qp_info[1]);
461 cleanup_recv_queue(&port_priv->qp_info[0]);
462 +error4:
463 + ib_dealloc_pd(port_priv->pd);
464 error3:
465 kfree(port_priv);
466
467 @@ -3270,8 +3270,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
468 destroy_workqueue(port_priv->wq);
469 destroy_mad_qp(&port_priv->qp_info[1]);
470 destroy_mad_qp(&port_priv->qp_info[0]);
471 - ib_dealloc_pd(port_priv->pd);
472 ib_free_cq(port_priv->cq);
473 + ib_dealloc_pd(port_priv->pd);
474 cleanup_recv_queue(&port_priv->qp_info[1]);
475 cleanup_recv_queue(&port_priv->qp_info[0]);
476 /* XXX: Handle deallocation of MAD registration tables */
477 diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
478 index c34a6852d691..a18f3f8ad77f 100644
479 --- a/drivers/infiniband/core/user_mad.c
480 +++ b/drivers/infiniband/core/user_mad.c
481 @@ -49,6 +49,7 @@
482 #include <linux/sched.h>
483 #include <linux/semaphore.h>
484 #include <linux/slab.h>
485 +#include <linux/nospec.h>
486
487 #include <linux/uaccess.h>
488
489 @@ -868,11 +869,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
490
491 if (get_user(id, arg))
492 return -EFAULT;
493 + if (id >= IB_UMAD_MAX_AGENTS)
494 + return -EINVAL;
495
496 mutex_lock(&file->port->file_mutex);
497 mutex_lock(&file->mutex);
498
499 - if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
500 + id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
501 + if (!__get_agent(file, id)) {
502 ret = -EINVAL;
503 goto out;
504 }
505 diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
506 index 9bab4fb65c68..bd1fdadf7ba0 100644
507 --- a/drivers/infiniband/hw/mlx5/mr.c
508 +++ b/drivers/infiniband/hw/mlx5/mr.c
509 @@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
510 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
511 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
512 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
513 -static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
514 -{
515 - return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
516 -}
517
518 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
519 {
520 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
521 }
522
523 -static bool use_umr(struct mlx5_ib_dev *dev, int order)
524 -{
525 - return order <= mr_cache_max_order(dev) &&
526 - umr_can_modify_entity_size(dev);
527 -}
528 -
529 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
530 {
531 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
532 @@ -1305,7 +1295,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
533 {
534 struct mlx5_ib_dev *dev = to_mdev(pd->device);
535 struct mlx5_ib_mr *mr = NULL;
536 - bool populate_mtts = false;
537 + bool use_umr;
538 struct ib_umem *umem;
539 int page_shift;
540 int npages;
541 @@ -1338,29 +1328,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
542 if (err < 0)
543 return ERR_PTR(err);
544
545 - if (use_umr(dev, order)) {
546 + use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
547 + (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
548 + !MLX5_CAP_GEN(dev->mdev, atomic));
549 +
550 + if (order <= mr_cache_max_order(dev) && use_umr) {
551 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
552 page_shift, order, access_flags);
553 if (PTR_ERR(mr) == -EAGAIN) {
554 mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
555 mr = NULL;
556 }
557 - populate_mtts = false;
558 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
559 if (access_flags & IB_ACCESS_ON_DEMAND) {
560 err = -EINVAL;
561 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
562 goto error;
563 }
564 - populate_mtts = true;
565 + use_umr = false;
566 }
567
568 if (!mr) {
569 - if (!umr_can_modify_entity_size(dev))
570 - populate_mtts = true;
571 mutex_lock(&dev->slow_path_mutex);
572 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
573 - page_shift, access_flags, populate_mtts);
574 + page_shift, access_flags, !use_umr);
575 mutex_unlock(&dev->slow_path_mutex);
576 }
577
578 @@ -1378,7 +1369,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
579 update_odp_mr(mr);
580 #endif
581
582 - if (!populate_mtts) {
583 + if (use_umr) {
584 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
585
586 if (access_flags & IB_ACCESS_ON_DEMAND)
587 diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
588 index 78073259c9a1..c431df7401b4 100644
589 --- a/drivers/input/joystick/iforce/iforce-usb.c
590 +++ b/drivers/input/joystick/iforce/iforce-usb.c
591 @@ -141,7 +141,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
592 return -ENODEV;
593
594 epirq = &interface->endpoint[0].desc;
595 + if (!usb_endpoint_is_int_in(epirq))
596 + return -ENODEV;
597 +
598 epout = &interface->endpoint[1].desc;
599 + if (!usb_endpoint_is_int_out(epout))
600 + return -ENODEV;
601
602 if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL)))
603 goto fail;
604 diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
605 index 10a039148234..538986e5ac5b 100644
606 --- a/drivers/input/mouse/trackpoint.h
607 +++ b/drivers/input/mouse/trackpoint.h
608 @@ -161,7 +161,8 @@ struct trackpoint_data {
609 #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
610 int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
611 #else
612 -inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
613 +static inline int trackpoint_detect(struct psmouse *psmouse,
614 + bool set_properties)
615 {
616 return -ENOSYS;
617 }
618 diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
619 index 75b500651e4e..b1cf0c971274 100644
620 --- a/drivers/input/tablet/kbtab.c
621 +++ b/drivers/input/tablet/kbtab.c
622 @@ -116,6 +116,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
623 if (intf->cur_altsetting->desc.bNumEndpoints < 1)
624 return -ENODEV;
625
626 + endpoint = &intf->cur_altsetting->endpoint[0].desc;
627 + if (!usb_endpoint_is_int_in(endpoint))
628 + return -ENODEV;
629 +
630 kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
631 input_dev = input_allocate_device();
632 if (!kbtab || !input_dev)
633 @@ -154,8 +158,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
634 input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
635 input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
636
637 - endpoint = &intf->cur_altsetting->endpoint[0].desc;
638 -
639 usb_fill_int_urb(kbtab->irq, dev,
640 usb_rcvintpipe(dev, endpoint->bEndpointAddress),
641 kbtab->data, 8,
642 diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
643 index 3a1d30304f7e..66b4800bcdd8 100644
644 --- a/drivers/iommu/amd_iommu_init.c
645 +++ b/drivers/iommu/amd_iommu_init.c
646 @@ -1710,7 +1710,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
647 NULL,
648 };
649
650 -static int iommu_init_pci(struct amd_iommu *iommu)
651 +static int __init iommu_init_pci(struct amd_iommu *iommu)
652 {
653 int cap_ptr = iommu->cap_ptr;
654 u32 range, misc, low, high;
655 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
656 index ee30e8965d1b..9ba73e11757d 100644
657 --- a/drivers/irqchip/irq-gic-v3-its.c
658 +++ b/drivers/irqchip/irq-gic-v3-its.c
659 @@ -2883,7 +2883,7 @@ static int its_vpe_init(struct its_vpe *vpe)
660
661 if (!its_alloc_vpe_table(vpe_id)) {
662 its_vpe_id_free(vpe_id);
663 - its_free_pending_table(vpe->vpt_page);
664 + its_free_pending_table(vpt_page);
665 return -ENOMEM;
666 }
667
668 diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
669 index 4760307ab43f..cef8f5e2e8fc 100644
670 --- a/drivers/irqchip/irq-imx-gpcv2.c
671 +++ b/drivers/irqchip/irq-imx-gpcv2.c
672 @@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
673 .irq_unmask = imx_gpcv2_irq_unmask,
674 .irq_set_wake = imx_gpcv2_irq_set_wake,
675 .irq_retrigger = irq_chip_retrigger_hierarchy,
676 + .irq_set_type = irq_chip_set_type_parent,
677 #ifdef CONFIG_SMP
678 .irq_set_affinity = irq_chip_set_affinity_parent,
679 #endif
680 diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
681 index 7d480c930eaf..7e426e4d1352 100644
682 --- a/drivers/md/dm-core.h
683 +++ b/drivers/md/dm-core.h
684 @@ -130,6 +130,7 @@ struct mapped_device {
685 };
686
687 int md_in_flight(struct mapped_device *md);
688 +void disable_discard(struct mapped_device *md);
689 void disable_write_same(struct mapped_device *md);
690 void disable_write_zeroes(struct mapped_device *md);
691
692 diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
693 index 6e547b8dd298..264b84e274aa 100644
694 --- a/drivers/md/dm-rq.c
695 +++ b/drivers/md/dm-rq.c
696 @@ -295,11 +295,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
697 }
698
699 if (unlikely(error == BLK_STS_TARGET)) {
700 - if (req_op(clone) == REQ_OP_WRITE_SAME &&
701 - !clone->q->limits.max_write_same_sectors)
702 + if (req_op(clone) == REQ_OP_DISCARD &&
703 + !clone->q->limits.max_discard_sectors)
704 + disable_discard(tio->md);
705 + else if (req_op(clone) == REQ_OP_WRITE_SAME &&
706 + !clone->q->limits.max_write_same_sectors)
707 disable_write_same(tio->md);
708 - if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
709 - !clone->q->limits.max_write_zeroes_sectors)
710 + else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
711 + !clone->q->limits.max_write_zeroes_sectors)
712 disable_write_zeroes(tio->md);
713 }
714
715 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
716 index 42768fe92b41..c9860e3b04dd 100644
717 --- a/drivers/md/dm.c
718 +++ b/drivers/md/dm.c
719 @@ -910,6 +910,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
720 }
721 }
722
723 +void disable_discard(struct mapped_device *md)
724 +{
725 + struct queue_limits *limits = dm_get_queue_limits(md);
726 +
727 + /* device doesn't really support DISCARD, disable it */
728 + limits->max_discard_sectors = 0;
729 + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
730 +}
731 +
732 void disable_write_same(struct mapped_device *md)
733 {
734 struct queue_limits *limits = dm_get_queue_limits(md);
735 @@ -935,11 +944,14 @@ static void clone_endio(struct bio *bio)
736 dm_endio_fn endio = tio->ti->type->end_io;
737
738 if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
739 - if (bio_op(bio) == REQ_OP_WRITE_SAME &&
740 - !bio->bi_disk->queue->limits.max_write_same_sectors)
741 + if (bio_op(bio) == REQ_OP_DISCARD &&
742 + !bio->bi_disk->queue->limits.max_discard_sectors)
743 + disable_discard(md);
744 + else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
745 + !bio->bi_disk->queue->limits.max_write_same_sectors)
746 disable_write_same(md);
747 - if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
748 - !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
749 + else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
750 + !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
751 disable_write_zeroes(md);
752 }
753
754 diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
755 index 7fdac277e382..9c77bfe4334f 100644
756 --- a/drivers/mmc/host/sdhci-of-arasan.c
757 +++ b/drivers/mmc/host/sdhci-of-arasan.c
758 @@ -788,7 +788,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
759
760 ret = mmc_of_parse(host->mmc);
761 if (ret) {
762 - dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
763 + if (ret != -EPROBE_DEFER)
764 + dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
765 goto unreg_clk;
766 }
767
768 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
769 index be0b785becd0..8f14f85b8e95 100644
770 --- a/drivers/net/bonding/bond_main.c
771 +++ b/drivers/net/bonding/bond_main.c
772 @@ -1102,6 +1102,8 @@ static void bond_compute_features(struct bonding *bond)
773 done:
774 bond_dev->vlan_features = vlan_features;
775 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
776 + NETIF_F_HW_VLAN_CTAG_TX |
777 + NETIF_F_HW_VLAN_STAG_TX |
778 NETIF_F_GSO_UDP_L4;
779 bond_dev->gso_max_segs = gso_max_segs;
780 netif_set_gso_max_size(bond_dev, gso_max_size);
781 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
782 index 33baa17fa9d5..cf01e73d1bcc 100644
783 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
784 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
785 @@ -3058,12 +3058,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
786 /* if VF indicate to PF this function is going down (PF will delete sp
787 * elements and clear initializations
788 */
789 - if (IS_VF(bp))
790 + if (IS_VF(bp)) {
791 + bnx2x_clear_vlan_info(bp);
792 bnx2x_vfpf_close_vf(bp);
793 - else if (unload_mode != UNLOAD_RECOVERY)
794 + } else if (unload_mode != UNLOAD_RECOVERY) {
795 /* if this is a normal/close unload need to clean up chip*/
796 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
797 - else {
798 + } else {
799 /* Send the UNLOAD_REQUEST to the MCP */
800 bnx2x_send_unload_req(bp, unload_mode);
801
802 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
803 index 0e508e5defce..ee5159ef837e 100644
804 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
805 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
806 @@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
807 void bnx2x_disable_close_the_gate(struct bnx2x *bp);
808 int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
809
810 +void bnx2x_clear_vlan_info(struct bnx2x *bp);
811 +
812 /**
813 * bnx2x_sp_event - handle ramrods completion.
814 *
815 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
816 index 2c9af0f420e5..68c62e32e882 100644
817 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
818 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
819 @@ -8488,11 +8488,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
820 return rc;
821 }
822
823 +void bnx2x_clear_vlan_info(struct bnx2x *bp)
824 +{
825 + struct bnx2x_vlan_entry *vlan;
826 +
827 + /* Mark that hw forgot all entries */
828 + list_for_each_entry(vlan, &bp->vlan_reg, link)
829 + vlan->hw = false;
830 +
831 + bp->vlan_cnt = 0;
832 +}
833 +
834 static int bnx2x_del_all_vlans(struct bnx2x *bp)
835 {
836 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
837 unsigned long ramrod_flags = 0, vlan_flags = 0;
838 - struct bnx2x_vlan_entry *vlan;
839 int rc;
840
841 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
842 @@ -8501,10 +8511,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
843 if (rc)
844 return rc;
845
846 - /* Mark that hw forgot all entries */
847 - list_for_each_entry(vlan, &bp->vlan_reg, link)
848 - vlan->hw = false;
849 - bp->vlan_cnt = 0;
850 + bnx2x_clear_vlan_info(bp);
851
852 return 0;
853 }
854 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
855 index f5cd9539980f..45d9a5f8fa1b 100644
856 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
857 +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
858 @@ -1190,7 +1190,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
859 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
860 if (err) {
861 en_err(priv, "Failed to allocate RSS indirection QP\n");
862 - goto rss_err;
863 + goto qp_alloc_err;
864 }
865
866 rss_map->indir_qp->event = mlx4_en_sqp_event;
867 @@ -1244,6 +1244,7 @@ indir_err:
868 MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
869 mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
870 mlx4_qp_free(mdev->dev, rss_map->indir_qp);
871 +qp_alloc_err:
872 kfree(rss_map->indir_qp);
873 rss_map->indir_qp = NULL;
874 rss_err:
875 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
876 index 45cdde694d20..a4be04debe67 100644
877 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
878 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
879 @@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
880 return &arfs_t->rules_hash[bucket_idx];
881 }
882
883 -static u8 arfs_get_ip_proto(const struct sk_buff *skb)
884 -{
885 - return (skb->protocol == htons(ETH_P_IP)) ?
886 - ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
887 -}
888 -
889 static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
890 u8 ip_proto, __be16 etype)
891 {
892 @@ -599,31 +593,9 @@ out:
893 arfs_may_expire_flow(priv);
894 }
895
896 -/* return L4 destination port from ip4/6 packets */
897 -static __be16 arfs_get_dst_port(const struct sk_buff *skb)
898 -{
899 - char *transport_header;
900 -
901 - transport_header = skb_transport_header(skb);
902 - if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
903 - return ((struct tcphdr *)transport_header)->dest;
904 - return ((struct udphdr *)transport_header)->dest;
905 -}
906 -
907 -/* return L4 source port from ip4/6 packets */
908 -static __be16 arfs_get_src_port(const struct sk_buff *skb)
909 -{
910 - char *transport_header;
911 -
912 - transport_header = skb_transport_header(skb);
913 - if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
914 - return ((struct tcphdr *)transport_header)->source;
915 - return ((struct udphdr *)transport_header)->source;
916 -}
917 -
918 static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
919 struct arfs_table *arfs_t,
920 - const struct sk_buff *skb,
921 + const struct flow_keys *fk,
922 u16 rxq, u32 flow_id)
923 {
924 struct arfs_rule *rule;
925 @@ -638,19 +610,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
926 INIT_WORK(&rule->arfs_work, arfs_handle_work);
927
928 tuple = &rule->tuple;
929 - tuple->etype = skb->protocol;
930 + tuple->etype = fk->basic.n_proto;
931 + tuple->ip_proto = fk->basic.ip_proto;
932 if (tuple->etype == htons(ETH_P_IP)) {
933 - tuple->src_ipv4 = ip_hdr(skb)->saddr;
934 - tuple->dst_ipv4 = ip_hdr(skb)->daddr;
935 + tuple->src_ipv4 = fk->addrs.v4addrs.src;
936 + tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
937 } else {
938 - memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
939 + memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
940 sizeof(struct in6_addr));
941 - memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
942 + memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
943 sizeof(struct in6_addr));
944 }
945 - tuple->ip_proto = arfs_get_ip_proto(skb);
946 - tuple->src_port = arfs_get_src_port(skb);
947 - tuple->dst_port = arfs_get_dst_port(skb);
948 + tuple->src_port = fk->ports.src;
949 + tuple->dst_port = fk->ports.dst;
950
951 rule->flow_id = flow_id;
952 rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
953 @@ -661,37 +633,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
954 return rule;
955 }
956
957 -static bool arfs_cmp_ips(struct arfs_tuple *tuple,
958 - const struct sk_buff *skb)
959 +static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
960 {
961 - if (tuple->etype == htons(ETH_P_IP) &&
962 - tuple->src_ipv4 == ip_hdr(skb)->saddr &&
963 - tuple->dst_ipv4 == ip_hdr(skb)->daddr)
964 - return true;
965 - if (tuple->etype == htons(ETH_P_IPV6) &&
966 - (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
967 - sizeof(struct in6_addr))) &&
968 - (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
969 - sizeof(struct in6_addr))))
970 - return true;
971 + if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
972 + return false;
973 + if (tuple->etype != fk->basic.n_proto)
974 + return false;
975 + if (tuple->etype == htons(ETH_P_IP))
976 + return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
977 + tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
978 + if (tuple->etype == htons(ETH_P_IPV6))
979 + return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
980 + sizeof(struct in6_addr)) &&
981 + !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
982 + sizeof(struct in6_addr));
983 return false;
984 }
985
986 static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
987 - const struct sk_buff *skb)
988 + const struct flow_keys *fk)
989 {
990 struct arfs_rule *arfs_rule;
991 struct hlist_head *head;
992 - __be16 src_port = arfs_get_src_port(skb);
993 - __be16 dst_port = arfs_get_dst_port(skb);
994
995 - head = arfs_hash_bucket(arfs_t, src_port, dst_port);
996 + head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
997 hlist_for_each_entry(arfs_rule, head, hlist) {
998 - if (arfs_rule->tuple.src_port == src_port &&
999 - arfs_rule->tuple.dst_port == dst_port &&
1000 - arfs_cmp_ips(&arfs_rule->tuple, skb)) {
1001 + if (arfs_cmp(&arfs_rule->tuple, fk))
1002 return arfs_rule;
1003 - }
1004 }
1005
1006 return NULL;
1007 @@ -704,20 +672,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1008 struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
1009 struct arfs_table *arfs_t;
1010 struct arfs_rule *arfs_rule;
1011 + struct flow_keys fk;
1012 +
1013 + if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
1014 + return -EPROTONOSUPPORT;
1015
1016 - if (skb->protocol != htons(ETH_P_IP) &&
1017 - skb->protocol != htons(ETH_P_IPV6))
1018 + if (fk.basic.n_proto != htons(ETH_P_IP) &&
1019 + fk.basic.n_proto != htons(ETH_P_IPV6))
1020 return -EPROTONOSUPPORT;
1021
1022 if (skb->encapsulation)
1023 return -EPROTONOSUPPORT;
1024
1025 - arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
1026 + arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
1027 if (!arfs_t)
1028 return -EPROTONOSUPPORT;
1029
1030 spin_lock_bh(&arfs->arfs_lock);
1031 - arfs_rule = arfs_find_rule(arfs_t, skb);
1032 + arfs_rule = arfs_find_rule(arfs_t, &fk);
1033 if (arfs_rule) {
1034 if (arfs_rule->rxq == rxq_index) {
1035 spin_unlock_bh(&arfs->arfs_lock);
1036 @@ -725,8 +697,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1037 }
1038 arfs_rule->rxq = rxq_index;
1039 } else {
1040 - arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
1041 - rxq_index, flow_id);
1042 + arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
1043 if (!arfs_rule) {
1044 spin_unlock_bh(&arfs->arfs_lock);
1045 return -ENOMEM;
1046 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1047 index 792bb8bc0cd3..2b9350f4c752 100644
1048 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1049 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1050 @@ -1083,6 +1083,9 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
1051 struct mlx5_core_dev *mdev = priv->mdev;
1052 int err;
1053
1054 + if (!MLX5_CAP_GEN(mdev, vport_group_manager))
1055 + return -EOPNOTSUPP;
1056 +
1057 if (pauseparam->autoneg)
1058 return -EINVAL;
1059
1060 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1061 index dc30f11f4766..3feb49badda9 100644
1062 --- a/drivers/net/team/team.c
1063 +++ b/drivers/net/team/team.c
1064 @@ -1011,6 +1011,8 @@ static void __team_compute_features(struct team *team)
1065
1066 team->dev->vlan_features = vlan_features;
1067 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1068 + NETIF_F_HW_VLAN_CTAG_TX |
1069 + NETIF_F_HW_VLAN_STAG_TX |
1070 NETIF_F_GSO_UDP_L4;
1071 team->dev->hard_header_len = max_hard_header_len;
1072
1073 diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
1074 index f4247b275e09..b7a0df95d4b0 100644
1075 --- a/drivers/net/usb/pegasus.c
1076 +++ b/drivers/net/usb/pegasus.c
1077 @@ -285,7 +285,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
1078 static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
1079 {
1080 int i;
1081 - __u8 tmp;
1082 + __u8 tmp = 0;
1083 __le16 retdatai;
1084 int ret;
1085
1086 diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
1087 index d5081ffdc8f0..1c849106b793 100644
1088 --- a/drivers/net/xen-netback/netback.c
1089 +++ b/drivers/net/xen-netback/netback.c
1090 @@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1091 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1092 nskb = xenvif_alloc_skb(0);
1093 if (unlikely(nskb == NULL)) {
1094 + skb_shinfo(skb)->nr_frags = 0;
1095 kfree_skb(skb);
1096 xenvif_tx_err(queue, &txreq, extra_count, idx);
1097 if (net_ratelimit())
1098 @@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1099
1100 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1101 /* Failure in xenvif_set_skb_gso is fatal. */
1102 + skb_shinfo(skb)->nr_frags = 0;
1103 kfree_skb(skb);
1104 kfree_skb(nskb);
1105 break;
1106 diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
1107 index 1c64fd8e9234..72bdda4ccebf 100644
1108 --- a/drivers/pwm/sysfs.c
1109 +++ b/drivers/pwm/sysfs.c
1110 @@ -263,7 +263,6 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
1111 export->pwm = pwm;
1112 mutex_init(&export->lock);
1113
1114 - export->child.class = parent->class;
1115 export->child.release = pwm_export_release;
1116 export->child.parent = parent;
1117 export->child.devt = MKDEV(0, 0);
1118 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
1119 index c43eccdea65d..f570b8c5d857 100644
1120 --- a/drivers/scsi/hpsa.c
1121 +++ b/drivers/scsi/hpsa.c
1122 @@ -2320,6 +2320,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1123 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1124 switch (c2->error_data.status) {
1125 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1126 + if (cmd)
1127 + cmd->result = 0;
1128 break;
1129 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1130 cmd->result |= SAM_STAT_CHECK_CONDITION;
1131 @@ -2479,8 +2481,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
1132
1133 /* check for good status */
1134 if (likely(c2->error_data.serv_response == 0 &&
1135 - c2->error_data.status == 0))
1136 + c2->error_data.status == 0)) {
1137 + cmd->result = 0;
1138 return hpsa_cmd_free_and_done(h, c, cmd);
1139 + }
1140
1141 /*
1142 * Any RAID offload error results in retry which will use
1143 @@ -5617,6 +5621,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
1144 }
1145 c = cmd_tagged_alloc(h, cmd);
1146
1147 + /*
1148 + * This is necessary because the SML doesn't zero out this field during
1149 + * error recovery.
1150 + */
1151 + cmd->result = 0;
1152 +
1153 /*
1154 * Call alternate submit routine for I/O accelerated commands.
1155 * Retries always go down the normal I/O path.
1156 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
1157 index f84f9bf15027..ddce32fe0513 100644
1158 --- a/drivers/scsi/qla2xxx/qla_init.c
1159 +++ b/drivers/scsi/qla2xxx/qla_init.c
1160 @@ -4732,7 +4732,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1161 ql_log(ql_log_warn, vha, 0xd049,
1162 "Failed to allocate ct_sns request.\n");
1163 kfree(fcport);
1164 - fcport = NULL;
1165 + return NULL;
1166 }
1167 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
1168 INIT_LIST_HEAD(&fcport->gnl_entry);
1169 diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
1170 index 2edf3ee91300..caf4d4df4bd3 100644
1171 --- a/drivers/staging/comedi/drivers/dt3000.c
1172 +++ b/drivers/staging/comedi/drivers/dt3000.c
1173 @@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
1174 static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
1175 unsigned int flags)
1176 {
1177 - int divider, base, prescale;
1178 + unsigned int divider, base, prescale;
1179
1180 - /* This function needs improvment */
1181 + /* This function needs improvement */
1182 /* Don't know if divider==0 works. */
1183
1184 for (prescale = 0; prescale < 16; prescale++) {
1185 @@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
1186 divider = (*nanosec) / base;
1187 break;
1188 case CMDF_ROUND_UP:
1189 - divider = (*nanosec) / base;
1190 + divider = DIV_ROUND_UP(*nanosec, base);
1191 break;
1192 }
1193 if (divider < 65536) {
1194 @@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
1195 }
1196
1197 prescale = 15;
1198 - base = timer_base * (1 << prescale);
1199 + base = timer_base * (prescale + 1);
1200 divider = 65535;
1201 *nanosec = divider * base;
1202 return (prescale << 16) | (divider);
1203 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1204 index 5b442bc68a76..59675cc7aa01 100644
1205 --- a/drivers/usb/class/cdc-acm.c
1206 +++ b/drivers/usb/class/cdc-acm.c
1207 @@ -1333,10 +1333,6 @@ made_compressed_probe:
1208 tty_port_init(&acm->port);
1209 acm->port.ops = &acm_port_ops;
1210
1211 - minor = acm_alloc_minor(acm);
1212 - if (minor < 0)
1213 - goto alloc_fail1;
1214 -
1215 ctrlsize = usb_endpoint_maxp(epctrl);
1216 readsize = usb_endpoint_maxp(epread) *
1217 (quirks == SINGLE_RX_URB ? 1 : 2);
1218 @@ -1344,6 +1340,13 @@ made_compressed_probe:
1219 acm->writesize = usb_endpoint_maxp(epwrite) * 20;
1220 acm->control = control_interface;
1221 acm->data = data_interface;
1222 +
1223 + usb_get_intf(acm->control); /* undone in destruct() */
1224 +
1225 + minor = acm_alloc_minor(acm);
1226 + if (minor < 0)
1227 + goto alloc_fail1;
1228 +
1229 acm->minor = minor;
1230 acm->dev = usb_dev;
1231 if (h.usb_cdc_acm_descriptor)
1232 @@ -1490,7 +1493,6 @@ skip_countries:
1233 usb_driver_claim_interface(&acm_driver, data_interface, acm);
1234 usb_set_intfdata(data_interface, acm);
1235
1236 - usb_get_intf(control_interface);
1237 tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
1238 &control_interface->dev);
1239 if (IS_ERR(tty_dev)) {
1240 diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
1241 index 65de6f73b672..558890ada0e5 100644
1242 --- a/drivers/usb/core/file.c
1243 +++ b/drivers/usb/core/file.c
1244 @@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
1245 intf->minor = minor;
1246 break;
1247 }
1248 - up_write(&minor_rwsem);
1249 - if (intf->minor < 0)
1250 + if (intf->minor < 0) {
1251 + up_write(&minor_rwsem);
1252 return -EXFULL;
1253 + }
1254
1255 /* create a usb class device for this usb interface */
1256 snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
1257 @@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
1258 MKDEV(USB_MAJOR, minor), class_driver,
1259 "%s", kbasename(name));
1260 if (IS_ERR(intf->usb_dev)) {
1261 - down_write(&minor_rwsem);
1262 usb_minors[minor] = NULL;
1263 intf->minor = -1;
1264 - up_write(&minor_rwsem);
1265 retval = PTR_ERR(intf->usb_dev);
1266 }
1267 + up_write(&minor_rwsem);
1268 return retval;
1269 }
1270 EXPORT_SYMBOL_GPL(usb_register_dev);
1271 @@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
1272 return;
1273
1274 dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
1275 + device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
1276
1277 down_write(&minor_rwsem);
1278 usb_minors[intf->minor] = NULL;
1279 up_write(&minor_rwsem);
1280
1281 - device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
1282 intf->usb_dev = NULL;
1283 intf->minor = -1;
1284 destroy_usb_class();
1285 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
1286 index 4020ce8db6ce..0d3fd2083165 100644
1287 --- a/drivers/usb/core/message.c
1288 +++ b/drivers/usb/core/message.c
1289 @@ -2211,14 +2211,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
1290 (struct usb_cdc_dmm_desc *)buffer;
1291 break;
1292 case USB_CDC_MDLM_TYPE:
1293 - if (elength < sizeof(struct usb_cdc_mdlm_desc *))
1294 + if (elength < sizeof(struct usb_cdc_mdlm_desc))
1295 goto next_desc;
1296 if (desc)
1297 return -EINVAL;
1298 desc = (struct usb_cdc_mdlm_desc *)buffer;
1299 break;
1300 case USB_CDC_MDLM_DETAIL_TYPE:
1301 - if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
1302 + if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
1303 goto next_desc;
1304 if (detail)
1305 return -EINVAL;
1306 diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
1307 index fea02c7ad4f4..a5254e82d628 100644
1308 --- a/drivers/usb/gadget/udc/renesas_usb3.c
1309 +++ b/drivers/usb/gadget/udc/renesas_usb3.c
1310 @@ -19,6 +19,7 @@
1311 #include <linux/pm_runtime.h>
1312 #include <linux/sizes.h>
1313 #include <linux/slab.h>
1314 +#include <linux/string.h>
1315 #include <linux/sys_soc.h>
1316 #include <linux/uaccess.h>
1317 #include <linux/usb/ch9.h>
1318 @@ -2378,9 +2379,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
1319 if (usb3->forced_b_device)
1320 return -EBUSY;
1321
1322 - if (!strncmp(buf, "host", strlen("host")))
1323 + if (sysfs_streq(buf, "host"))
1324 new_mode_is_host = true;
1325 - else if (!strncmp(buf, "peripheral", strlen("peripheral")))
1326 + else if (sysfs_streq(buf, "peripheral"))
1327 new_mode_is_host = false;
1328 else
1329 return -EINVAL;
1330 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1331 index e0a4749ba565..56f572cb08f8 100644
1332 --- a/drivers/usb/serial/option.c
1333 +++ b/drivers/usb/serial/option.c
1334 @@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = {
1335 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
1336 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
1337
1338 + /* Motorola devices */
1339 + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
1340 + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
1341 + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
1342 + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
1343
1344 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
1345 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
1346 @@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = {
1347 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
1348 .driver_info = RSVD(2) },
1349 { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
1350 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
1351 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
1352 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
1353 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
1354 @@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = {
1355 .driver_info = RSVD(4) },
1356 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
1357 .driver_info = RSVD(4) },
1358 + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
1359 + .driver_info = RSVD(4) },
1360 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1361 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1362 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
1363 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
1364 .driver_info = RSVD(4) },
1365 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
1366 + .driver_info = RSVD(4) },
1367 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
1368 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1369 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
1370 diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
1371 index 73427d8e0116..e5694133ebe5 100644
1372 --- a/drivers/xen/xen-pciback/conf_space_capability.c
1373 +++ b/drivers/xen/xen-pciback/conf_space_capability.c
1374 @@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
1375 {
1376 int err;
1377 u16 old_value;
1378 - pci_power_t new_state, old_state;
1379 + pci_power_t new_state;
1380
1381 err = pci_read_config_word(dev, offset, &old_value);
1382 if (err)
1383 goto out;
1384
1385 - old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
1386 new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
1387
1388 new_value &= PM_OK_BITS;
1389 diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
1390 index ac6c383d6314..19855659f650 100644
1391 --- a/fs/btrfs/backref.c
1392 +++ b/fs/btrfs/backref.c
1393 @@ -1485,7 +1485,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
1394 goto out;
1395 }
1396
1397 - trans = btrfs_attach_transaction(root);
1398 + trans = btrfs_join_transaction_nostart(root);
1399 if (IS_ERR(trans)) {
1400 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1401 ret = PTR_ERR(trans);
1402 diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
1403 index f1ca53a3ff0b..26317bca5649 100644
1404 --- a/fs/btrfs/transaction.c
1405 +++ b/fs/btrfs/transaction.c
1406 @@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
1407 [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
1408 [TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
1409 __TRANS_ATTACH |
1410 - __TRANS_JOIN),
1411 + __TRANS_JOIN |
1412 + __TRANS_JOIN_NOSTART),
1413 [TRANS_STATE_UNBLOCKED] = (__TRANS_START |
1414 __TRANS_ATTACH |
1415 __TRANS_JOIN |
1416 - __TRANS_JOIN_NOLOCK),
1417 + __TRANS_JOIN_NOLOCK |
1418 + __TRANS_JOIN_NOSTART),
1419 [TRANS_STATE_COMPLETED] = (__TRANS_START |
1420 __TRANS_ATTACH |
1421 __TRANS_JOIN |
1422 - __TRANS_JOIN_NOLOCK),
1423 + __TRANS_JOIN_NOLOCK |
1424 + __TRANS_JOIN_NOSTART),
1425 };
1426
1427 void btrfs_put_transaction(struct btrfs_transaction *transaction)
1428 @@ -531,7 +534,8 @@ again:
1429 ret = join_transaction(fs_info, type);
1430 if (ret == -EBUSY) {
1431 wait_current_trans(fs_info);
1432 - if (unlikely(type == TRANS_ATTACH))
1433 + if (unlikely(type == TRANS_ATTACH ||
1434 + type == TRANS_JOIN_NOSTART))
1435 ret = -ENOENT;
1436 }
1437 } while (ret == -EBUSY);
1438 @@ -647,6 +651,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
1439 BTRFS_RESERVE_NO_FLUSH, true);
1440 }
1441
1442 +/*
1443 + * Similar to regular join but it never starts a transaction when none is
1444 + * running or after waiting for the current one to finish.
1445 + */
1446 +struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
1447 +{
1448 + return start_transaction(root, 0, TRANS_JOIN_NOSTART,
1449 + BTRFS_RESERVE_NO_FLUSH, true);
1450 +}
1451 +
1452 /*
1453 * btrfs_attach_transaction() - catch the running transaction
1454 *
1455 diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
1456 index 4cbb1b55387d..c1d34cc70472 100644
1457 --- a/fs/btrfs/transaction.h
1458 +++ b/fs/btrfs/transaction.h
1459 @@ -97,11 +97,13 @@ struct btrfs_transaction {
1460 #define __TRANS_JOIN (1U << 11)
1461 #define __TRANS_JOIN_NOLOCK (1U << 12)
1462 #define __TRANS_DUMMY (1U << 13)
1463 +#define __TRANS_JOIN_NOSTART (1U << 14)
1464
1465 #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
1466 #define TRANS_ATTACH (__TRANS_ATTACH)
1467 #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
1468 #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
1469 +#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
1470
1471 #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
1472
1473 @@ -187,6 +189,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
1474 int min_factor);
1475 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
1476 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
1477 +struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
1478 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
1479 struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
1480 struct btrfs_root *root);
1481 diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
1482 index 3a24ce3deb01..c146e12a8601 100644
1483 --- a/fs/ocfs2/xattr.c
1484 +++ b/fs/ocfs2/xattr.c
1485 @@ -3833,7 +3833,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
1486 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
1487 int low_bucket = 0, bucket, high_bucket;
1488 struct ocfs2_xattr_bucket *search;
1489 - u32 last_hash;
1490 u64 blkno, lower_blkno = 0;
1491
1492 search = ocfs2_xattr_bucket_new(inode);
1493 @@ -3877,8 +3876,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
1494 if (xh->xh_count)
1495 xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
1496
1497 - last_hash = le32_to_cpu(xe->xe_name_hash);
1498 -
1499 /* record lower_blkno which may be the insert place. */
1500 lower_blkno = blkno;
1501
1502 diff --git a/fs/seq_file.c b/fs/seq_file.c
1503 index 1dea7a8a5255..05e58b56f620 100644
1504 --- a/fs/seq_file.c
1505 +++ b/fs/seq_file.c
1506 @@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
1507 }
1508 if (seq_has_overflowed(m))
1509 goto Eoverflow;
1510 + p = m->op->next(m, p, &m->index);
1511 if (pos + m->count > offset) {
1512 m->from = offset - pos;
1513 m->count -= m->from;
1514 @@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
1515 }
1516 pos += m->count;
1517 m->count = 0;
1518 - p = m->op->next(m, p, &m->index);
1519 if (pos == offset)
1520 break;
1521 }
1522 diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
1523 index c64bea7a52be..e9f20b813a69 100644
1524 --- a/include/asm-generic/getorder.h
1525 +++ b/include/asm-generic/getorder.h
1526 @@ -7,24 +7,6 @@
1527 #include <linux/compiler.h>
1528 #include <linux/log2.h>
1529
1530 -/*
1531 - * Runtime evaluation of get_order()
1532 - */
1533 -static inline __attribute_const__
1534 -int __get_order(unsigned long size)
1535 -{
1536 - int order;
1537 -
1538 - size--;
1539 - size >>= PAGE_SHIFT;
1540 -#if BITS_PER_LONG == 32
1541 - order = fls(size);
1542 -#else
1543 - order = fls64(size);
1544 -#endif
1545 - return order;
1546 -}
1547 -
1548 /**
1549 * get_order - Determine the allocation order of a memory size
1550 * @size: The size for which to get the order
1551 @@ -43,19 +25,27 @@ int __get_order(unsigned long size)
1552 * to hold an object of the specified size.
1553 *
1554 * The result is undefined if the size is 0.
1555 - *
1556 - * This function may be used to initialise variables with compile time
1557 - * evaluations of constants.
1558 */
1559 -#define get_order(n) \
1560 -( \
1561 - __builtin_constant_p(n) ? ( \
1562 - ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
1563 - (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
1564 - ilog2((n) - 1) - PAGE_SHIFT + 1) \
1565 - ) : \
1566 - __get_order(n) \
1567 -)
1568 +static inline __attribute_const__ int get_order(unsigned long size)
1569 +{
1570 + if (__builtin_constant_p(size)) {
1571 + if (!size)
1572 + return BITS_PER_LONG - PAGE_SHIFT;
1573 +
1574 + if (size < (1UL << PAGE_SHIFT))
1575 + return 0;
1576 +
1577 + return ilog2((size) - 1) - PAGE_SHIFT + 1;
1578 + }
1579 +
1580 + size--;
1581 + size >>= PAGE_SHIFT;
1582 +#if BITS_PER_LONG == 32
1583 + return fls(size);
1584 +#else
1585 + return fls64(size);
1586 +#endif
1587 +}
1588
1589 #endif /* __ASSEMBLY__ */
1590
1591 diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
1592 index fbf5cfc9b352..fd965ffbb92e 100644
1593 --- a/include/drm/i915_pciids.h
1594 +++ b/include/drm/i915_pciids.h
1595 @@ -386,6 +386,7 @@
1596 INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
1597 INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
1598 INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
1599 + INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
1600 INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
1601
1602 /* CFL H */
1603 diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
1604 index 90ac450745f1..561fefc2a980 100644
1605 --- a/include/kvm/arm_vgic.h
1606 +++ b/include/kvm/arm_vgic.h
1607 @@ -361,6 +361,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
1608
1609 void kvm_vgic_load(struct kvm_vcpu *vcpu);
1610 void kvm_vgic_put(struct kvm_vcpu *vcpu);
1611 +void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
1612
1613 #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
1614 #define vgic_initialized(k) ((k)->arch.vgic.initialized)
1615 diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
1616 index 4e3625109b28..64d54acc9928 100644
1617 --- a/kernel/sched/cpufreq_schedutil.c
1618 +++ b/kernel/sched/cpufreq_schedutil.c
1619 @@ -40,6 +40,7 @@ struct sugov_policy {
1620 struct task_struct *thread;
1621 bool work_in_progress;
1622
1623 + bool limits_changed;
1624 bool need_freq_update;
1625 };
1626
1627 @@ -90,8 +91,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
1628 !cpufreq_this_cpu_can_update(sg_policy->policy))
1629 return false;
1630
1631 - if (unlikely(sg_policy->need_freq_update))
1632 + if (unlikely(sg_policy->limits_changed)) {
1633 + sg_policy->limits_changed = false;
1634 + sg_policy->need_freq_update = true;
1635 return true;
1636 + }
1637
1638 delta_ns = time - sg_policy->last_freq_update_time;
1639
1640 @@ -405,7 +409,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
1641 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
1642 {
1643 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
1644 - sg_policy->need_freq_update = true;
1645 + sg_policy->limits_changed = true;
1646 }
1647
1648 static void sugov_update_single(struct update_util_data *hook, u64 time,
1649 @@ -425,7 +429,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
1650 if (!sugov_should_update_freq(sg_policy, time))
1651 return;
1652
1653 - busy = sugov_cpu_is_busy(sg_cpu);
1654 + /* Limits may have changed, don't skip frequency update */
1655 + busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
1656
1657 util = sugov_get_util(sg_cpu);
1658 max = sg_cpu->max;
1659 @@ -798,6 +803,7 @@ static int sugov_start(struct cpufreq_policy *policy)
1660 sg_policy->last_freq_update_time = 0;
1661 sg_policy->next_freq = 0;
1662 sg_policy->work_in_progress = false;
1663 + sg_policy->limits_changed = false;
1664 sg_policy->need_freq_update = false;
1665 sg_policy->cached_raw_freq = 0;
1666
1667 @@ -849,7 +855,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
1668 mutex_unlock(&sg_policy->work_lock);
1669 }
1670
1671 - sg_policy->need_freq_update = true;
1672 + sg_policy->limits_changed = true;
1673 }
1674
1675 static struct cpufreq_governor schedutil_gov = {
1676 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
1677 index 6c94b6865ac2..5eeabece0c17 100644
1678 --- a/mm/kmemleak.c
1679 +++ b/mm/kmemleak.c
1680 @@ -126,7 +126,7 @@
1681 /* GFP bitmask for kmemleak internal allocations */
1682 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
1683 __GFP_NORETRY | __GFP_NOMEMALLOC | \
1684 - __GFP_NOWARN | __GFP_NOFAIL)
1685 + __GFP_NOWARN)
1686
1687 /* scanning area inside a memory block */
1688 struct kmemleak_scan_area {
1689 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
1690 index 7e7cc0cd89fe..ecde75f2189b 100644
1691 --- a/mm/memcontrol.c
1692 +++ b/mm/memcontrol.c
1693 @@ -1037,26 +1037,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
1694 css_put(&prev->css);
1695 }
1696
1697 -static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1698 +static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1699 + struct mem_cgroup *dead_memcg)
1700 {
1701 - struct mem_cgroup *memcg = dead_memcg;
1702 struct mem_cgroup_reclaim_iter *iter;
1703 struct mem_cgroup_per_node *mz;
1704 int nid;
1705 int i;
1706
1707 - for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1708 - for_each_node(nid) {
1709 - mz = mem_cgroup_nodeinfo(memcg, nid);
1710 - for (i = 0; i <= DEF_PRIORITY; i++) {
1711 - iter = &mz->iter[i];
1712 - cmpxchg(&iter->position,
1713 - dead_memcg, NULL);
1714 - }
1715 + for_each_node(nid) {
1716 + mz = mem_cgroup_nodeinfo(from, nid);
1717 + for (i = 0; i <= DEF_PRIORITY; i++) {
1718 + iter = &mz->iter[i];
1719 + cmpxchg(&iter->position,
1720 + dead_memcg, NULL);
1721 }
1722 }
1723 }
1724
1725 +static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1726 +{
1727 + struct mem_cgroup *memcg = dead_memcg;
1728 + struct mem_cgroup *last;
1729 +
1730 + do {
1731 + __invalidate_reclaim_iterators(memcg, dead_memcg);
1732 + last = memcg;
1733 + } while ((memcg = parent_mem_cgroup(memcg)));
1734 +
1735 + /*
1736 + * When cgruop1 non-hierarchy mode is used,
1737 + * parent_mem_cgroup() does not walk all the way up to the
1738 + * cgroup root (root_mem_cgroup). So we have to handle
1739 + * dead_memcg from cgroup root separately.
1740 + */
1741 + if (last != root_mem_cgroup)
1742 + __invalidate_reclaim_iterators(root_mem_cgroup,
1743 + dead_memcg);
1744 +}
1745 +
1746 /**
1747 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1748 * @memcg: hierarchy root
1749 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
1750 index 62f945ea3e36..70298b635b59 100644
1751 --- a/mm/mempolicy.c
1752 +++ b/mm/mempolicy.c
1753 @@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
1754 },
1755 };
1756
1757 -static void migrate_page_add(struct page *page, struct list_head *pagelist,
1758 +static int migrate_page_add(struct page *page, struct list_head *pagelist,
1759 unsigned long flags);
1760
1761 struct queue_pages {
1762 @@ -429,11 +429,14 @@ static inline bool queue_pages_required(struct page *page,
1763 }
1764
1765 /*
1766 - * queue_pages_pmd() has three possible return values:
1767 - * 1 - pages are placed on the right node or queued successfully.
1768 - * 0 - THP was split.
1769 - * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
1770 - * page was already on a node that does not follow the policy.
1771 + * queue_pages_pmd() has four possible return values:
1772 + * 0 - pages are placed on the right node or queued successfully.
1773 + * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
1774 + * specified.
1775 + * 2 - THP was split.
1776 + * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
1777 + * existing page was already on a node that does not follow the
1778 + * policy.
1779 */
1780 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
1781 unsigned long end, struct mm_walk *walk)
1782 @@ -451,23 +454,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
1783 if (is_huge_zero_page(page)) {
1784 spin_unlock(ptl);
1785 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
1786 + ret = 2;
1787 goto out;
1788 }
1789 - if (!queue_pages_required(page, qp)) {
1790 - ret = 1;
1791 + if (!queue_pages_required(page, qp))
1792 goto unlock;
1793 - }
1794
1795 - ret = 1;
1796 flags = qp->flags;
1797 /* go to thp migration */
1798 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1799 - if (!vma_migratable(walk->vma)) {
1800 - ret = -EIO;
1801 + if (!vma_migratable(walk->vma) ||
1802 + migrate_page_add(page, qp->pagelist, flags)) {
1803 + ret = 1;
1804 goto unlock;
1805 }
1806 -
1807 - migrate_page_add(page, qp->pagelist, flags);
1808 } else
1809 ret = -EIO;
1810 unlock:
1811 @@ -479,6 +479,13 @@ out:
1812 /*
1813 * Scan through pages checking if pages follow certain conditions,
1814 * and move them to the pagelist if they do.
1815 + *
1816 + * queue_pages_pte_range() has three possible return values:
1817 + * 0 - pages are placed on the right node or queued successfully.
1818 + * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
1819 + * specified.
1820 + * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
1821 + * on a node that does not follow the policy.
1822 */
1823 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
1824 unsigned long end, struct mm_walk *walk)
1825 @@ -488,17 +495,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
1826 struct queue_pages *qp = walk->private;
1827 unsigned long flags = qp->flags;
1828 int ret;
1829 + bool has_unmovable = false;
1830 pte_t *pte;
1831 spinlock_t *ptl;
1832
1833 ptl = pmd_trans_huge_lock(pmd, vma);
1834 if (ptl) {
1835 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
1836 - if (ret > 0)
1837 - return 0;
1838 - else if (ret < 0)
1839 + if (ret != 2)
1840 return ret;
1841 }
1842 + /* THP was split, fall through to pte walk */
1843
1844 if (pmd_trans_unstable(pmd))
1845 return 0;
1846 @@ -519,14 +526,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
1847 if (!queue_pages_required(page, qp))
1848 continue;
1849 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1850 - if (!vma_migratable(vma))
1851 + /* MPOL_MF_STRICT must be specified if we get here */
1852 + if (!vma_migratable(vma)) {
1853 + has_unmovable = true;
1854 break;
1855 - migrate_page_add(page, qp->pagelist, flags);
1856 + }
1857 +
1858 + /*
1859 + * Do not abort immediately since there may be
1860 + * temporary off LRU pages in the range. Still
1861 + * need migrate other LRU pages.
1862 + */
1863 + if (migrate_page_add(page, qp->pagelist, flags))
1864 + has_unmovable = true;
1865 } else
1866 break;
1867 }
1868 pte_unmap_unlock(pte - 1, ptl);
1869 cond_resched();
1870 +
1871 + if (has_unmovable)
1872 + return 1;
1873 +
1874 return addr != end ? -EIO : 0;
1875 }
1876
1877 @@ -639,7 +660,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
1878 *
1879 * If pages found in a given range are on a set of nodes (determined by
1880 * @nodes and @flags,) it's isolated and queued to the pagelist which is
1881 - * passed via @private.)
1882 + * passed via @private.
1883 + *
1884 + * queue_pages_range() has three possible return values:
1885 + * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
1886 + * specified.
1887 + * 0 - queue pages successfully or no misplaced page.
1888 + * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
1889 */
1890 static int
1891 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
1892 @@ -926,7 +953,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
1893 /*
1894 * page migration, thp tail pages can be passed.
1895 */
1896 -static void migrate_page_add(struct page *page, struct list_head *pagelist,
1897 +static int migrate_page_add(struct page *page, struct list_head *pagelist,
1898 unsigned long flags)
1899 {
1900 struct page *head = compound_head(page);
1901 @@ -939,8 +966,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
1902 mod_node_page_state(page_pgdat(head),
1903 NR_ISOLATED_ANON + page_is_file_cache(head),
1904 hpage_nr_pages(head));
1905 + } else if (flags & MPOL_MF_STRICT) {
1906 + /*
1907 + * Non-movable page may reach here. And, there may be
1908 + * temporary off LRU pages or non-LRU movable pages.
1909 + * Treat them as unmovable pages since they can't be
1910 + * isolated, so they can't be moved at the moment. It
1911 + * should return -EIO for this case too.
1912 + */
1913 + return -EIO;
1914 }
1915 }
1916 +
1917 + return 0;
1918 }
1919
1920 /* page allocation callback for NUMA node migration */
1921 @@ -1143,9 +1181,10 @@ static struct page *new_page(struct page *page, unsigned long start)
1922 }
1923 #else
1924
1925 -static void migrate_page_add(struct page *page, struct list_head *pagelist,
1926 +static int migrate_page_add(struct page *page, struct list_head *pagelist,
1927 unsigned long flags)
1928 {
1929 + return -EIO;
1930 }
1931
1932 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1933 @@ -1168,6 +1207,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1934 struct mempolicy *new;
1935 unsigned long end;
1936 int err;
1937 + int ret;
1938 LIST_HEAD(pagelist);
1939
1940 if (flags & ~(unsigned long)MPOL_MF_VALID)
1941 @@ -1229,10 +1269,15 @@ static long do_mbind(unsigned long start, unsigned long len,
1942 if (err)
1943 goto mpol_out;
1944
1945 - err = queue_pages_range(mm, start, end, nmask,
1946 + ret = queue_pages_range(mm, start, end, nmask,
1947 flags | MPOL_MF_INVERT, &pagelist);
1948 - if (!err)
1949 - err = mbind_range(mm, start, end, new);
1950 +
1951 + if (ret < 0) {
1952 + err = -EIO;
1953 + goto up_out;
1954 + }
1955 +
1956 + err = mbind_range(mm, start, end, new);
1957
1958 if (!err) {
1959 int nr_failed = 0;
1960 @@ -1245,13 +1290,14 @@ static long do_mbind(unsigned long start, unsigned long len,
1961 putback_movable_pages(&pagelist);
1962 }
1963
1964 - if (nr_failed && (flags & MPOL_MF_STRICT))
1965 + if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1966 err = -EIO;
1967 } else
1968 putback_movable_pages(&pagelist);
1969
1970 +up_out:
1971 up_write(&mm->mmap_sem);
1972 - mpol_out:
1973 +mpol_out:
1974 mpol_put(new);
1975 return err;
1976 }
1977 diff --git a/mm/rmap.c b/mm/rmap.c
1978 index f048c2651954..1bd94ea62f7f 100644
1979 --- a/mm/rmap.c
1980 +++ b/mm/rmap.c
1981 @@ -1467,7 +1467,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1982 /*
1983 * No need to invalidate here it will synchronize on
1984 * against the special swap migration pte.
1985 + *
1986 + * The assignment to subpage above was computed from a
1987 + * swap PTE which results in an invalid pointer.
1988 + * Since only PAGE_SIZE pages can currently be
1989 + * migrated, just set it to page. This will need to be
1990 + * changed when hugepage migrations to device private
1991 + * memory are supported.
1992 */
1993 + subpage = page;
1994 goto discard;
1995 }
1996
1997 diff --git a/mm/usercopy.c b/mm/usercopy.c
1998 index 14faadcedd06..51411f9c4068 100644
1999 --- a/mm/usercopy.c
2000 +++ b/mm/usercopy.c
2001 @@ -151,7 +151,7 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
2002 bool to_user)
2003 {
2004 /* Reject if object wraps past end of memory. */
2005 - if (ptr + n < ptr)
2006 + if (ptr + (n - 1) < ptr)
2007 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
2008
2009 /* Reject if NULL or ZERO-allocation. */
2010 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2011 index 0bb4d712b80c..995b3842ba7c 100644
2012 --- a/net/bridge/netfilter/ebtables.c
2013 +++ b/net/bridge/netfilter/ebtables.c
2014 @@ -1779,20 +1779,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
2015 return 0;
2016 }
2017
2018 +static int ebt_compat_init_offsets(unsigned int number)
2019 +{
2020 + if (number > INT_MAX)
2021 + return -EINVAL;
2022 +
2023 + /* also count the base chain policies */
2024 + number += NF_BR_NUMHOOKS;
2025 +
2026 + return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
2027 +}
2028
2029 static int compat_table_info(const struct ebt_table_info *info,
2030 struct compat_ebt_replace *newinfo)
2031 {
2032 unsigned int size = info->entries_size;
2033 const void *entries = info->entries;
2034 + int ret;
2035
2036 newinfo->entries_size = size;
2037 - if (info->nentries) {
2038 - int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
2039 - info->nentries);
2040 - if (ret)
2041 - return ret;
2042 - }
2043 + ret = ebt_compat_init_offsets(info->nentries);
2044 + if (ret)
2045 + return ret;
2046
2047 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
2048 entries, newinfo);
2049 @@ -2241,11 +2249,9 @@ static int compat_do_replace(struct net *net, void __user *user,
2050
2051 xt_compat_lock(NFPROTO_BRIDGE);
2052
2053 - if (tmp.nentries) {
2054 - ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2055 - if (ret < 0)
2056 - goto out_unlock;
2057 - }
2058 + ret = ebt_compat_init_offsets(tmp.nentries);
2059 + if (ret < 0)
2060 + goto out_unlock;
2061
2062 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2063 if (ret < 0)
2064 diff --git a/net/dsa/switch.c b/net/dsa/switch.c
2065 index 142b294d3446..b0b9413fa5bf 100644
2066 --- a/net/dsa/switch.c
2067 +++ b/net/dsa/switch.c
2068 @@ -127,6 +127,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
2069 {
2070 int port;
2071
2072 + if (!ds->ops->port_mdb_add)
2073 + return;
2074 +
2075 for_each_set_bit(port, bitmap, ds->num_ports)
2076 ds->ops->port_mdb_add(ds, port, mdb);
2077 }
2078 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
2079 index 27eff89fad01..c6073d17c324 100644
2080 --- a/net/netfilter/nf_conntrack_core.c
2081 +++ b/net/netfilter/nf_conntrack_core.c
2082 @@ -431,13 +431,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
2083 * table location, we assume id gets exposed to userspace.
2084 *
2085 * Following nf_conn items do not change throughout lifetime
2086 - * of the nf_conn after it has been committed to main hash table:
2087 + * of the nf_conn:
2088 *
2089 * 1. nf_conn address
2090 - * 2. nf_conn->ext address
2091 - * 3. nf_conn->master address (normally NULL)
2092 - * 4. tuple
2093 - * 5. the associated net namespace
2094 + * 2. nf_conn->master address (normally NULL)
2095 + * 3. the associated net namespace
2096 + * 4. the original direction tuple
2097 */
2098 u32 nf_ct_get_id(const struct nf_conn *ct)
2099 {
2100 @@ -447,9 +446,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
2101 net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
2102
2103 a = (unsigned long)ct;
2104 - b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
2105 - c = (unsigned long)ct->ext;
2106 - d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
2107 + b = (unsigned long)ct->master;
2108 + c = (unsigned long)nf_ct_net(ct);
2109 + d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
2110 + sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
2111 &ct_id_seed);
2112 #ifdef CONFIG_64BIT
2113 return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
2114 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2115 index 93b5a4200585..7204e7bbebb0 100644
2116 --- a/net/packet/af_packet.c
2117 +++ b/net/packet/af_packet.c
2118 @@ -2616,6 +2616,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2119
2120 mutex_lock(&po->pg_vec_lock);
2121
2122 + /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2123 + * we need to confirm it under protection of pg_vec_lock.
2124 + */
2125 + if (unlikely(!po->tx_ring.pg_vec)) {
2126 + err = -EBUSY;
2127 + goto out;
2128 + }
2129 if (likely(saddr == NULL)) {
2130 dev = packet_cached_dev_get(po);
2131 proto = po->num;
2132 diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
2133 index 3131b4154c74..28adac31f0ff 100644
2134 --- a/net/sctp/sm_sideeffect.c
2135 +++ b/net/sctp/sm_sideeffect.c
2136 @@ -561,7 +561,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
2137 */
2138 if (net->sctp.pf_enable &&
2139 (transport->state == SCTP_ACTIVE) &&
2140 - (asoc->pf_retrans < transport->pathmaxrxt) &&
2141 + (transport->error_count < transport->pathmaxrxt) &&
2142 (transport->error_count > asoc->pf_retrans)) {
2143
2144 sctp_assoc_control_transport(asoc, transport,
2145 diff --git a/net/sctp/stream.c b/net/sctp/stream.c
2146 index 0da57938a6c5..87061a4bb44b 100644
2147 --- a/net/sctp/stream.c
2148 +++ b/net/sctp/stream.c
2149 @@ -416,6 +416,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
2150 nstr_list[i] = htons(str_list[i]);
2151
2152 if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
2153 + kfree(nstr_list);
2154 retval = -EAGAIN;
2155 goto out;
2156 }
2157 diff --git a/net/tipc/addr.c b/net/tipc/addr.c
2158 index b88d48d00913..0f1eaed1bd1b 100644
2159 --- a/net/tipc/addr.c
2160 +++ b/net/tipc/addr.c
2161 @@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
2162 tipc_set_node_id(net, node_id);
2163 }
2164 tn->trial_addr = addr;
2165 + tn->addr_trial_end = jiffies;
2166 pr_info("32-bit node address hash set to %x\n", addr);
2167 }
2168
2169 diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
2170 index dad5583451af..3b2861f47709 100644
2171 --- a/scripts/Kconfig.include
2172 +++ b/scripts/Kconfig.include
2173 @@ -20,7 +20,7 @@ success = $(if-success,$(1),y,n)
2174
2175 # $(cc-option,<flag>)
2176 # Return y if the compiler supports <flag>, n otherwise
2177 -cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null)
2178 +cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
2179
2180 # $(ld-option,<flag>)
2181 # Return y if the linker supports <flag>, n otherwise
2182 diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
2183 index 7d4af0d0accb..51884c7b8069 100644
2184 --- a/scripts/Makefile.modpost
2185 +++ b/scripts/Makefile.modpost
2186 @@ -75,7 +75,7 @@ modpost = scripts/mod/modpost \
2187 $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
2188 $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
2189 $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
2190 - $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
2191 + $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
2192 $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
2193 $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
2194 $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
2195 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
2196 index 579984ecdec3..bb2bd33b00ec 100644
2197 --- a/sound/pci/hda/hda_generic.c
2198 +++ b/sound/pci/hda/hda_generic.c
2199 @@ -6033,6 +6033,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
2200 }
2201 EXPORT_SYMBOL_GPL(snd_hda_gen_free);
2202
2203 +/**
2204 + * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
2205 + * @codec: the HDA codec
2206 + *
2207 + * This can be put as patch_ops reboot_notify function.
2208 + */
2209 +void snd_hda_gen_reboot_notify(struct hda_codec *codec)
2210 +{
2211 + /* Make the codec enter D3 to avoid spurious noises from the internal
2212 + * speaker during (and after) reboot
2213 + */
2214 + snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
2215 + snd_hda_codec_write(codec, codec->core.afg, 0,
2216 + AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
2217 + msleep(10);
2218 +}
2219 +EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
2220 +
2221 #ifdef CONFIG_PM
2222 /**
2223 * snd_hda_gen_check_power_status - check the loopback power save state
2224 @@ -6060,6 +6078,7 @@ static const struct hda_codec_ops generic_patch_ops = {
2225 .init = snd_hda_gen_init,
2226 .free = snd_hda_gen_free,
2227 .unsol_event = snd_hda_jack_unsol_event,
2228 + .reboot_notify = snd_hda_gen_reboot_notify,
2229 #ifdef CONFIG_PM
2230 .check_power_status = snd_hda_gen_check_power_status,
2231 #endif
2232 @@ -6082,7 +6101,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
2233
2234 err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
2235 if (err < 0)
2236 - return err;
2237 + goto error;
2238
2239 err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
2240 if (err < 0)
2241 diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
2242 index 10123664fa61..ce9c293717b9 100644
2243 --- a/sound/pci/hda/hda_generic.h
2244 +++ b/sound/pci/hda/hda_generic.h
2245 @@ -336,6 +336,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
2246 struct auto_pin_cfg *cfg);
2247 int snd_hda_gen_build_controls(struct hda_codec *codec);
2248 int snd_hda_gen_build_pcms(struct hda_codec *codec);
2249 +void snd_hda_gen_reboot_notify(struct hda_codec *codec);
2250
2251 /* standard jack event callbacks */
2252 void snd_hda_gen_hp_automute(struct hda_codec *codec,
2253 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2254 index 81cea34aff1c..7a3e34b120b3 100644
2255 --- a/sound/pci/hda/hda_intel.c
2256 +++ b/sound/pci/hda/hda_intel.c
2257 @@ -2655,6 +2655,9 @@ static const struct pci_device_id azx_ids[] = {
2258 /* AMD, X370 & co */
2259 { PCI_DEVICE(0x1022, 0x1457),
2260 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
2261 + /* AMD, X570 & co */
2262 + { PCI_DEVICE(0x1022, 0x1487),
2263 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
2264 /* AMD Stoney */
2265 { PCI_DEVICE(0x1022, 0x157a),
2266 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
2267 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2268 index b70fbfa80546..6f17b256fcd0 100644
2269 --- a/sound/pci/hda/patch_conexant.c
2270 +++ b/sound/pci/hda/patch_conexant.c
2271 @@ -176,23 +176,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
2272 {
2273 struct conexant_spec *spec = codec->spec;
2274
2275 - switch (codec->core.vendor_id) {
2276 - case 0x14f12008: /* CX8200 */
2277 - case 0x14f150f2: /* CX20722 */
2278 - case 0x14f150f4: /* CX20724 */
2279 - break;
2280 - default:
2281 - return;
2282 - }
2283 -
2284 /* Turn the problematic codec into D3 to avoid spurious noises
2285 from the internal speaker during (and after) reboot */
2286 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
2287 -
2288 - snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
2289 - snd_hda_codec_write(codec, codec->core.afg, 0,
2290 - AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
2291 - msleep(10);
2292 + snd_hda_gen_reboot_notify(codec);
2293 }
2294
2295 static void cx_auto_free(struct hda_codec *codec)
2296 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2297 index dc1989686f09..9b5caf099bfb 100644
2298 --- a/sound/pci/hda/patch_realtek.c
2299 +++ b/sound/pci/hda/patch_realtek.c
2300 @@ -868,15 +868,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
2301 alc_shutup(codec);
2302 }
2303
2304 -/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
2305 -static void alc_d3_at_reboot(struct hda_codec *codec)
2306 -{
2307 - snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
2308 - snd_hda_codec_write(codec, codec->core.afg, 0,
2309 - AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
2310 - msleep(10);
2311 -}
2312 -
2313 #define alc_free snd_hda_gen_free
2314
2315 #ifdef CONFIG_PM
2316 @@ -5111,7 +5102,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
2317 struct alc_spec *spec = codec->spec;
2318
2319 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
2320 - spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
2321 + spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
2322 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
2323 codec->power_save_node = 0; /* avoid click noises */
2324 snd_hda_apply_pincfgs(codec, pincfgs);
2325 @@ -6851,6 +6842,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2326 SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
2327 SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
2328 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
2329 + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
2330 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
2331 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
2332 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
2333 diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
2334 index 7e1c6c2dc99e..4b3e1c48ca2f 100644
2335 --- a/sound/usb/mixer.c
2336 +++ b/sound/usb/mixer.c
2337 @@ -83,6 +83,7 @@ struct mixer_build {
2338 unsigned char *buffer;
2339 unsigned int buflen;
2340 DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
2341 + DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
2342 struct usb_audio_term oterm;
2343 const struct usbmix_name_map *map;
2344 const struct usbmix_selector_map *selector_map;
2345 @@ -759,6 +760,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
2346 return -EINVAL;
2347 if (!desc->bNrInPins)
2348 return -EINVAL;
2349 + if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
2350 + return -EINVAL;
2351
2352 switch (state->mixer->protocol) {
2353 case UAC_VERSION_1:
2354 @@ -788,16 +791,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
2355 * parse the source unit recursively until it reaches to a terminal
2356 * or a branched unit.
2357 */
2358 -static int check_input_term(struct mixer_build *state, int id,
2359 +static int __check_input_term(struct mixer_build *state, int id,
2360 struct usb_audio_term *term)
2361 {
2362 int protocol = state->mixer->protocol;
2363 int err;
2364 void *p1;
2365 + unsigned char *hdr;
2366
2367 memset(term, 0, sizeof(*term));
2368 - while ((p1 = find_audio_control_unit(state, id)) != NULL) {
2369 - unsigned char *hdr = p1;
2370 + for (;;) {
2371 + /* a loop in the terminal chain? */
2372 + if (test_and_set_bit(id, state->termbitmap))
2373 + return -EINVAL;
2374 +
2375 + p1 = find_audio_control_unit(state, id);
2376 + if (!p1)
2377 + break;
2378 +
2379 + hdr = p1;
2380 term->id = id;
2381
2382 if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
2383 @@ -815,7 +827,7 @@ static int check_input_term(struct mixer_build *state, int id,
2384
2385 /* call recursively to verify that the
2386 * referenced clock entity is valid */
2387 - err = check_input_term(state, d->bCSourceID, term);
2388 + err = __check_input_term(state, d->bCSourceID, term);
2389 if (err < 0)
2390 return err;
2391
2392 @@ -849,7 +861,7 @@ static int check_input_term(struct mixer_build *state, int id,
2393 case UAC2_CLOCK_SELECTOR: {
2394 struct uac_selector_unit_descriptor *d = p1;
2395 /* call recursively to retrieve the channel info */
2396 - err = check_input_term(state, d->baSourceID[0], term);
2397 + err = __check_input_term(state, d->baSourceID[0], term);
2398 if (err < 0)
2399 return err;
2400 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
2401 @@ -912,7 +924,7 @@ static int check_input_term(struct mixer_build *state, int id,
2402
2403 /* call recursively to verify that the
2404 * referenced clock entity is valid */
2405 - err = check_input_term(state, d->bCSourceID, term);
2406 + err = __check_input_term(state, d->bCSourceID, term);
2407 if (err < 0)
2408 return err;
2409
2410 @@ -963,7 +975,7 @@ static int check_input_term(struct mixer_build *state, int id,
2411 case UAC3_CLOCK_SELECTOR: {
2412 struct uac_selector_unit_descriptor *d = p1;
2413 /* call recursively to retrieve the channel info */
2414 - err = check_input_term(state, d->baSourceID[0], term);
2415 + err = __check_input_term(state, d->baSourceID[0], term);
2416 if (err < 0)
2417 return err;
2418 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
2419 @@ -979,7 +991,7 @@ static int check_input_term(struct mixer_build *state, int id,
2420 return -EINVAL;
2421
2422 /* call recursively to retrieve the channel info */
2423 - err = check_input_term(state, d->baSourceID[0], term);
2424 + err = __check_input_term(state, d->baSourceID[0], term);
2425 if (err < 0)
2426 return err;
2427
2428 @@ -997,6 +1009,15 @@ static int check_input_term(struct mixer_build *state, int id,
2429 return -ENODEV;
2430 }
2431
2432 +
2433 +static int check_input_term(struct mixer_build *state, int id,
2434 + struct usb_audio_term *term)
2435 +{
2436 + memset(term, 0, sizeof(*term));
2437 + memset(state->termbitmap, 0, sizeof(state->termbitmap));
2438 + return __check_input_term(state, id, term);
2439 +}
2440 +
2441 /*
2442 * Feature Unit
2443 */
2444 diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
2445 index a94bd6850a0b..54c34c107cab 100644
2446 --- a/tools/perf/util/header.c
2447 +++ b/tools/perf/util/header.c
2448 @@ -3285,6 +3285,13 @@ int perf_session__read_header(struct perf_session *session)
2449 data->file.path);
2450 }
2451
2452 + if (f_header.attr_size == 0) {
2453 + pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
2454 + "Was the 'perf record' command properly terminated?\n",
2455 + data->file.path);
2456 + return -EINVAL;
2457 + }
2458 +
2459 nr_attrs = f_header.attrs.size / f_header.attr_size;
2460 lseek(fd, f_header.attrs.offset, SEEK_SET);
2461
2462 @@ -3365,7 +3372,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
2463 size += sizeof(struct perf_event_header);
2464 size += ids * sizeof(u64);
2465
2466 - ev = malloc(size);
2467 + ev = zalloc(size);
2468
2469 if (ev == NULL)
2470 return -ENOMEM;
2471 diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
2472 index 02bac8abd206..d982650deb33 100644
2473 --- a/virt/kvm/arm/arm.c
2474 +++ b/virt/kvm/arm/arm.c
2475 @@ -338,6 +338,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
2476 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
2477 {
2478 kvm_timer_schedule(vcpu);
2479 + /*
2480 + * If we're about to block (most likely because we've just hit a
2481 + * WFI), we need to sync back the state of the GIC CPU interface
2482 + * so that we have the lastest PMR and group enables. This ensures
2483 + * that kvm_arch_vcpu_runnable has up-to-date data to decide
2484 + * whether we have pending interrupts.
2485 + */
2486 + preempt_disable();
2487 + kvm_vgic_vmcr_sync(vcpu);
2488 + preempt_enable();
2489 +
2490 kvm_vgic_v4_enable_doorbell(vcpu);
2491 }
2492
2493 diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
2494 index 69b892abd7dc..57281c1594d0 100644
2495 --- a/virt/kvm/arm/vgic/vgic-v2.c
2496 +++ b/virt/kvm/arm/vgic/vgic-v2.c
2497 @@ -495,10 +495,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
2498 kvm_vgic_global_state.vctrl_base + GICH_APR);
2499 }
2500
2501 -void vgic_v2_put(struct kvm_vcpu *vcpu)
2502 +void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
2503 {
2504 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
2505
2506 cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
2507 +}
2508 +
2509 +void vgic_v2_put(struct kvm_vcpu *vcpu)
2510 +{
2511 + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
2512 +
2513 + vgic_v2_vmcr_sync(vcpu);
2514 cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
2515 }
2516 diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
2517 index 3f2350a4d4ab..5c55995a1a16 100644
2518 --- a/virt/kvm/arm/vgic/vgic-v3.c
2519 +++ b/virt/kvm/arm/vgic/vgic-v3.c
2520 @@ -674,12 +674,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
2521 __vgic_v3_activate_traps(vcpu);
2522 }
2523
2524 -void vgic_v3_put(struct kvm_vcpu *vcpu)
2525 +void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
2526 {
2527 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
2528
2529 if (likely(cpu_if->vgic_sre))
2530 cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
2531 +}
2532 +
2533 +void vgic_v3_put(struct kvm_vcpu *vcpu)
2534 +{
2535 + vgic_v3_vmcr_sync(vcpu);
2536
2537 kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
2538
2539 diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
2540 index c5165e3b80cb..250cd72c95a5 100644
2541 --- a/virt/kvm/arm/vgic/vgic.c
2542 +++ b/virt/kvm/arm/vgic/vgic.c
2543 @@ -902,6 +902,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
2544 vgic_v3_put(vcpu);
2545 }
2546
2547 +void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
2548 +{
2549 + if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
2550 + return;
2551 +
2552 + if (kvm_vgic_global_state.type == VGIC_V2)
2553 + vgic_v2_vmcr_sync(vcpu);
2554 + else
2555 + vgic_v3_vmcr_sync(vcpu);
2556 +}
2557 +
2558 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
2559 {
2560 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
2561 diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
2562 index a90024718ca4..d5e454279925 100644
2563 --- a/virt/kvm/arm/vgic/vgic.h
2564 +++ b/virt/kvm/arm/vgic/vgic.h
2565 @@ -204,6 +204,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
2566 void vgic_v2_init_lrs(void);
2567 void vgic_v2_load(struct kvm_vcpu *vcpu);
2568 void vgic_v2_put(struct kvm_vcpu *vcpu);
2569 +void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
2570
2571 void vgic_v2_save_state(struct kvm_vcpu *vcpu);
2572 void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
2573 @@ -234,6 +235,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
2574
2575 void vgic_v3_load(struct kvm_vcpu *vcpu);
2576 void vgic_v3_put(struct kvm_vcpu *vcpu);
2577 +void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
2578
2579 bool vgic_has_its(struct kvm *kvm);
2580 int kvm_vgic_register_its_device(void);