Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.8/0110-4.8.11-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2850 - (hide annotations) (download)
Fri Jan 6 09:45:08 2017 UTC (7 years, 4 months ago) by niro
File size: 79029 byte(s)
-linux-4.8.11
1 niro 2850 diff --git a/Makefile b/Makefile
2     index 7cf2b4985703..2b1bcbacebcd 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 8
8     -SUBLEVEL = 10
9     +SUBLEVEL = 11
10     EXTRAVERSION =
11     NAME = Psychotic Stoned Sheep
12    
13     @@ -399,11 +399,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
14     -fno-strict-aliasing -fno-common \
15     -Werror-implicit-function-declaration \
16     -Wno-format-security \
17     - -std=gnu89
18     + -std=gnu89 $(call cc-option,-fno-PIE)
19     +
20    
21     KBUILD_AFLAGS_KERNEL :=
22     KBUILD_CFLAGS_KERNEL :=
23     -KBUILD_AFLAGS := -D__ASSEMBLY__
24     +KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
25     KBUILD_AFLAGS_MODULE := -DMODULE
26     KBUILD_CFLAGS_MODULE := -DMODULE
27     KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
28     @@ -621,6 +622,7 @@ include arch/$(SRCARCH)/Makefile
29    
30     KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
31     KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
32     +KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
33    
34     ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
35     KBUILD_CFLAGS += -Os
36     diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
37     index dec4b073ceb1..379939699164 100644
38     --- a/arch/arm/boot/dts/imx53-qsb.dts
39     +++ b/arch/arm/boot/dts/imx53-qsb.dts
40     @@ -64,8 +64,8 @@
41     };
42    
43     ldo3_reg: ldo3 {
44     - regulator-min-microvolt = <600000>;
45     - regulator-max-microvolt = <1800000>;
46     + regulator-min-microvolt = <1725000>;
47     + regulator-max-microvolt = <3300000>;
48     regulator-always-on;
49     };
50    
51     @@ -76,8 +76,8 @@
52     };
53    
54     ldo5_reg: ldo5 {
55     - regulator-min-microvolt = <1725000>;
56     - regulator-max-microvolt = <3300000>;
57     + regulator-min-microvolt = <1200000>;
58     + regulator-max-microvolt = <3600000>;
59     regulator-always-on;
60     };
61    
62     @@ -100,14 +100,14 @@
63     };
64    
65     ldo9_reg: ldo9 {
66     - regulator-min-microvolt = <1200000>;
67     + regulator-min-microvolt = <1250000>;
68     regulator-max-microvolt = <3600000>;
69     regulator-always-on;
70     };
71    
72     ldo10_reg: ldo10 {
73     - regulator-min-microvolt = <1250000>;
74     - regulator-max-microvolt = <3650000>;
75     + regulator-min-microvolt = <1200000>;
76     + regulator-max-microvolt = <3600000>;
77     regulator-always-on;
78     };
79     };
80     diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
81     index 2065f46fa740..38b6a2b49d68 100644
82     --- a/arch/arm64/include/asm/perf_event.h
83     +++ b/arch/arm64/include/asm/perf_event.h
84     @@ -46,7 +46,15 @@
85     #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
86     #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
87    
88     -#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */
89     +/*
90     + * PMUv3 event types: required events
91     + */
92     +#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
93     +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
94     +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
95     +#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
96     +#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
97     +#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
98    
99     /*
100     * Event filters for PMUv3
101     diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
102     index 838ccf123307..2c4df1520c45 100644
103     --- a/arch/arm64/kernel/perf_event.c
104     +++ b/arch/arm64/kernel/perf_event.c
105     @@ -30,17 +30,9 @@
106    
107     /*
108     * ARMv8 PMUv3 Performance Events handling code.
109     - * Common event types.
110     + * Common event types (some are defined in asm/perf_event.h).
111     */
112    
113     -/* Required events. */
114     -#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
115     -#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
116     -#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
117     -#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
118     -#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
119     -#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
120     -
121     /* At least one of the following is required. */
122     #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
123     #define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
124     diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
125     index e51367d159d0..31c144f7339a 100644
126     --- a/arch/arm64/kvm/sys_regs.c
127     +++ b/arch/arm64/kvm/sys_regs.c
128     @@ -602,8 +602,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
129    
130     idx = ARMV8_PMU_CYCLE_IDX;
131     } else {
132     - BUG();
133     + return false;
134     }
135     + } else if (r->CRn == 0 && r->CRm == 9) {
136     + /* PMCCNTR */
137     + if (pmu_access_event_counter_el0_disabled(vcpu))
138     + return false;
139     +
140     + idx = ARMV8_PMU_CYCLE_IDX;
141     } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
142     /* PMEVCNTRn_EL0 */
143     if (pmu_access_event_counter_el0_disabled(vcpu))
144     @@ -611,7 +617,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
145    
146     idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
147     } else {
148     - BUG();
149     + return false;
150     }
151    
152     if (!pmu_counter_idx_valid(vcpu, idx))
153     diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
154     index 7ac8e6eaab5b..8d586cff8a41 100644
155     --- a/arch/powerpc/kernel/setup_64.c
156     +++ b/arch/powerpc/kernel/setup_64.c
157     @@ -226,17 +226,25 @@ static void __init configure_exceptions(void)
158     if (firmware_has_feature(FW_FEATURE_OPAL))
159     opal_configure_cores();
160    
161     - /* Enable AIL if supported, and we are in hypervisor mode */
162     - if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
163     - early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
164     - unsigned long lpcr = mfspr(SPRN_LPCR);
165     - mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
166     - }
167     + /* AIL on native is done in cpu_ready_for_interrupts() */
168     }
169     }
170    
171     static void cpu_ready_for_interrupts(void)
172     {
173     + /*
174     + * Enable AIL if supported, and we are in hypervisor mode. This
175     + * is called once for every processor.
176     + *
177     + * If we are not in hypervisor mode the job is done once for
178     + * the whole partition in configure_exceptions().
179     + */
180     + if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
181     + early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
182     + unsigned long lpcr = mfspr(SPRN_LPCR);
183     + mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
184     + }
185     +
186     /* Set IR and DR in PACA MSR */
187     get_paca()->kernel_msr = MSR_KERNEL;
188     }
189     diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
190     index b81fe2d63e15..1e81a37c034e 100644
191     --- a/arch/x86/kernel/cpu/amd.c
192     +++ b/arch/x86/kernel/cpu/amd.c
193     @@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
194     #ifdef CONFIG_SMP
195     unsigned bits;
196     int cpu = smp_processor_id();
197     - unsigned int socket_id, core_complex_id;
198    
199     bits = c->x86_coreid_bits;
200     /* Low order bits define the core id (index of core in socket) */
201     @@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
202     if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
203     return;
204    
205     - socket_id = (c->apicid >> bits) - 1;
206     - core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
207     -
208     - per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
209     + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
210     #endif
211     }
212    
213     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
214     index 46f74d461f3f..2fff65794f46 100644
215     --- a/arch/x86/kvm/x86.c
216     +++ b/arch/x86/kvm/x86.c
217     @@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
218     struct kvm_shared_msrs *locals
219     = container_of(urn, struct kvm_shared_msrs, urn);
220     struct kvm_shared_msr_values *values;
221     + unsigned long flags;
222    
223     + /*
224     + * Disabling irqs at this point since the following code could be
225     + * interrupted and executed through kvm_arch_hardware_disable()
226     + */
227     + local_irq_save(flags);
228     + if (locals->registered) {
229     + locals->registered = false;
230     + user_return_notifier_unregister(urn);
231     + }
232     + local_irq_restore(flags);
233     for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
234     values = &locals->values[slot];
235     if (values->host != values->curr) {
236     @@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
237     values->curr = values->host;
238     }
239     }
240     - locals->registered = false;
241     - user_return_notifier_unregister(urn);
242     }
243    
244     static void shared_msr_update(unsigned slot, u32 msr)
245     @@ -3372,6 +3381,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
246     };
247     case KVM_SET_VAPIC_ADDR: {
248     struct kvm_vapic_addr va;
249     + int idx;
250    
251     r = -EINVAL;
252     if (!lapic_in_kernel(vcpu))
253     @@ -3379,7 +3389,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
254     r = -EFAULT;
255     if (copy_from_user(&va, argp, sizeof va))
256     goto out;
257     + idx = srcu_read_lock(&vcpu->kvm->srcu);
258     r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
259     + srcu_read_unlock(&vcpu->kvm->srcu, idx);
260     break;
261     }
262     case KVM_X86_SETUP_MCE: {
263     diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
264     index ac58c1616408..555b9fa0ad43 100644
265     --- a/arch/x86/purgatory/Makefile
266     +++ b/arch/x86/purgatory/Makefile
267     @@ -16,6 +16,7 @@ KCOV_INSTRUMENT := n
268    
269     KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
270     KBUILD_CFLAGS += -m$(BITS)
271     +KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
272    
273     $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
274     $(call if_changed,ld)
275     diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
276     index e44944f4be77..2932a5bd892f 100644
277     --- a/drivers/base/power/main.c
278     +++ b/drivers/base/power/main.c
279     @@ -1027,6 +1027,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
280     TRACE_DEVICE(dev);
281     TRACE_SUSPEND(0);
282    
283     + dpm_wait_for_children(dev, async);
284     +
285     if (async_error)
286     goto Complete;
287    
288     @@ -1038,8 +1040,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
289     if (dev->power.syscore || dev->power.direct_complete)
290     goto Complete;
291    
292     - dpm_wait_for_children(dev, async);
293     -
294     if (dev->pm_domain) {
295     info = "noirq power domain ";
296     callback = pm_noirq_op(&dev->pm_domain->ops, state);
297     @@ -1174,6 +1174,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
298    
299     __pm_runtime_disable(dev, false);
300    
301     + dpm_wait_for_children(dev, async);
302     +
303     if (async_error)
304     goto Complete;
305    
306     @@ -1185,8 +1187,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
307     if (dev->power.syscore || dev->power.direct_complete)
308     goto Complete;
309    
310     - dpm_wait_for_children(dev, async);
311     -
312     if (dev->pm_domain) {
313     info = "late power domain ";
314     callback = pm_late_early_op(&dev->pm_domain->ops, state);
315     diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
316     index 19f9b622981a..7a6acc3e4a92 100644
317     --- a/drivers/clk/imx/clk-pllv3.c
318     +++ b/drivers/clk/imx/clk-pllv3.c
319     @@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
320     temp64 *= mfn;
321     do_div(temp64, mfd);
322    
323     - return (parent_rate * div) + (u32)temp64;
324     + return parent_rate * div + (unsigned long)temp64;
325     }
326    
327     static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
328     @@ -247,7 +247,11 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
329     do_div(temp64, parent_rate);
330     mfn = temp64;
331    
332     - return parent_rate * div + parent_rate * mfn / mfd;
333     + temp64 = (u64)parent_rate;
334     + temp64 *= mfn;
335     + do_div(temp64, mfd);
336     +
337     + return parent_rate * div + (unsigned long)temp64;
338     }
339    
340     static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
341     diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
342     index 3a51fff1b0e7..9adaf48aea23 100644
343     --- a/drivers/clk/mmp/clk-of-mmp2.c
344     +++ b/drivers/clk/mmp/clk-of-mmp2.c
345     @@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np)
346     }
347    
348     pxa_unit->apmu_base = of_iomap(np, 1);
349     - if (!pxa_unit->mpmu_base) {
350     + if (!pxa_unit->apmu_base) {
351     pr_err("failed to map apmu registers\n");
352     return;
353     }
354     diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
355     index 87f2317b2a00..f110c02e83cb 100644
356     --- a/drivers/clk/mmp/clk-of-pxa168.c
357     +++ b/drivers/clk/mmp/clk-of-pxa168.c
358     @@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np)
359     }
360    
361     pxa_unit->apmu_base = of_iomap(np, 1);
362     - if (!pxa_unit->mpmu_base) {
363     + if (!pxa_unit->apmu_base) {
364     pr_err("failed to map apmu registers\n");
365     return;
366     }
367     diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
368     index e22a67f76d93..64d1ef49caeb 100644
369     --- a/drivers/clk/mmp/clk-of-pxa910.c
370     +++ b/drivers/clk/mmp/clk-of-pxa910.c
371     @@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np)
372     }
373    
374     pxa_unit->apmu_base = of_iomap(np, 1);
375     - if (!pxa_unit->mpmu_base) {
376     + if (!pxa_unit->apmu_base) {
377     pr_err("failed to map apmu registers\n");
378     return;
379     }
380     @@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np)
381     }
382    
383     pxa_unit->apbcp_base = of_iomap(np, 3);
384     - if (!pxa_unit->mpmu_base) {
385     + if (!pxa_unit->apbcp_base) {
386     pr_err("failed to map apbcp registers\n");
387     return;
388     }
389     diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
390     index b3044219772c..2cde3796cb82 100644
391     --- a/drivers/crypto/caam/caamalg.c
392     +++ b/drivers/crypto/caam/caamalg.c
393     @@ -4542,6 +4542,15 @@ static int __init caam_algapi_init(void)
394     if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
395     continue;
396    
397     + /*
398     + * Check support for AES modes not available
399     + * on LP devices.
400     + */
401     + if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
402     + if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
403     + OP_ALG_AAI_XTS)
404     + continue;
405     +
406     t_alg = caam_alg_alloc(alg);
407     if (IS_ERR(t_alg)) {
408     err = PTR_ERR(t_alg);
409     diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
410     index 02f2a5621bb0..47d08b9da60d 100644
411     --- a/drivers/gpio/gpio-pca953x.c
412     +++ b/drivers/gpio/gpio-pca953x.c
413     @@ -372,14 +372,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
414     break;
415     }
416    
417     - memcpy(reg_val, chip->reg_output, NBANK(chip));
418     mutex_lock(&chip->i2c_lock);
419     + memcpy(reg_val, chip->reg_output, NBANK(chip));
420     for(bank=0; bank<NBANK(chip); bank++) {
421     unsigned bankmask = mask[bank / sizeof(*mask)] >>
422     ((bank % sizeof(*mask)) * 8);
423     if(bankmask) {
424     unsigned bankval = bits[bank / sizeof(*bits)] >>
425     ((bank % sizeof(*bits)) * 8);
426     + bankval &= bankmask;
427     reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
428     }
429     }
430     diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
431     index b2dee1024166..15704aaf9e4e 100644
432     --- a/drivers/gpio/gpiolib.c
433     +++ b/drivers/gpio/gpiolib.c
434     @@ -2667,8 +2667,11 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
435     if (IS_ERR(desc))
436     return PTR_ERR(desc);
437    
438     - /* Flush direction if something changed behind our back */
439     - if (chip->get_direction) {
440     + /*
441     + * If it's fast: flush the direction setting if something changed
442     + * behind our back
443     + */
444     + if (!chip->can_sleep && chip->get_direction) {
445     int dir = chip->get_direction(chip, offset);
446    
447     if (dir)
448     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
449     index 700c56baf2de..e443073f6ece 100644
450     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
451     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
452     @@ -492,6 +492,7 @@ struct amdgpu_bo {
453     u64 metadata_flags;
454     void *metadata;
455     u32 metadata_size;
456     + unsigned prime_shared_count;
457     /* list of all virtual address to which this bo
458     * is associated to
459     */
460     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
461     index 651115dcce12..c02db01f6583 100644
462     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
463     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
464     @@ -132,7 +132,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
465     entry->priority = min(info[i].bo_priority,
466     AMDGPU_BO_LIST_MAX_PRIORITY);
467     entry->tv.bo = &entry->robj->tbo;
468     - entry->tv.shared = true;
469     + entry->tv.shared = !entry->robj->prime_shared_count;
470    
471     if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
472     gds_obj = entry->robj;
473     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
474     index 7700dc22f243..3826d5aea0a6 100644
475     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
476     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
477     @@ -74,20 +74,36 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
478     if (ret)
479     return ERR_PTR(ret);
480    
481     + bo->prime_shared_count = 1;
482     return &bo->gem_base;
483     }
484    
485     int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
486     {
487     struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
488     - int ret = 0;
489     + long ret = 0;
490    
491     ret = amdgpu_bo_reserve(bo, false);
492     if (unlikely(ret != 0))
493     return ret;
494    
495     + /*
496     + * Wait for all shared fences to complete before we switch to future
497     + * use of exclusive fence on this prime shared bo.
498     + */
499     + ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
500     + MAX_SCHEDULE_TIMEOUT);
501     + if (unlikely(ret < 0)) {
502     + DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
503     + amdgpu_bo_unreserve(bo);
504     + return ret;
505     + }
506     +
507     /* pin buffer into GTT */
508     ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
509     + if (likely(ret == 0))
510     + bo->prime_shared_count++;
511     +
512     amdgpu_bo_unreserve(bo);
513     return ret;
514     }
515     @@ -102,6 +118,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
516     return;
517    
518     amdgpu_bo_unpin(bo);
519     + if (bo->prime_shared_count)
520     + bo->prime_shared_count--;
521     amdgpu_bo_unreserve(bo);
522     }
523    
524     diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
525     index 1f8af87c6294..cf2560708e03 100644
526     --- a/drivers/gpu/drm/i915/intel_bios.c
527     +++ b/drivers/gpu/drm/i915/intel_bios.c
528     @@ -1143,7 +1143,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
529     if (!child)
530     return;
531    
532     - aux_channel = child->raw[25];
533     + aux_channel = child->common.aux_channel;
534     ddc_pin = child->common.ddc_pin;
535    
536     is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
537     @@ -1673,7 +1673,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
538     return false;
539     }
540    
541     -bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
542     +static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
543     + enum port port)
544     {
545     static const struct {
546     u16 dp, hdmi;
547     @@ -1687,22 +1688,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
548     [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
549     [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
550     };
551     - int i;
552    
553     if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
554     return false;
555    
556     - if (!dev_priv->vbt.child_dev_num)
557     + if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
558     + (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
559     return false;
560    
561     + if (p_child->common.dvo_port == port_mapping[port].dp)
562     + return true;
563     +
564     + /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
565     + if (p_child->common.dvo_port == port_mapping[port].hdmi &&
566     + p_child->common.aux_channel != 0)
567     + return true;
568     +
569     + return false;
570     +}
571     +
572     +bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
573     + enum port port)
574     +{
575     + int i;
576     +
577     for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
578     const union child_device_config *p_child =
579     &dev_priv->vbt.child_dev[i];
580    
581     - if ((p_child->common.dvo_port == port_mapping[port].dp ||
582     - p_child->common.dvo_port == port_mapping[port].hdmi) &&
583     - (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
584     - (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
585     + if (child_dev_is_dp_dual_mode(p_child, port))
586     return true;
587     }
588    
589     diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
590     index 3051182cf483..b8aeb28e14d7 100644
591     --- a/drivers/gpu/drm/i915/intel_dp.c
592     +++ b/drivers/gpu/drm/i915/intel_dp.c
593     @@ -4323,21 +4323,11 @@ static enum drm_connector_status
594     intel_dp_detect(struct drm_connector *connector, bool force)
595     {
596     struct intel_dp *intel_dp = intel_attached_dp(connector);
597     - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
598     - struct intel_encoder *intel_encoder = &intel_dig_port->base;
599     enum drm_connector_status status = connector->status;
600    
601     DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
602     connector->base.id, connector->name);
603    
604     - if (intel_dp->is_mst) {
605     - /* MST devices are disconnected from a monitor POV */
606     - intel_dp_unset_edid(intel_dp);
607     - if (intel_encoder->type != INTEL_OUTPUT_EDP)
608     - intel_encoder->type = INTEL_OUTPUT_DP;
609     - return connector_status_disconnected;
610     - }
611     -
612     /* If full detect is not performed yet, do a full detect */
613     if (!intel_dp->detect_done)
614     status = intel_dp_long_pulse(intel_dp->attached_connector);
615     diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
616     index 68db9621f1f0..8886cab19f98 100644
617     --- a/drivers/gpu/drm/i915/intel_vbt_defs.h
618     +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
619     @@ -280,7 +280,8 @@ struct common_child_dev_config {
620     u8 dp_support:1;
621     u8 tmds_support:1;
622     u8 support_reserved:5;
623     - u8 not_common3[12];
624     + u8 aux_channel;
625     + u8 not_common3[11];
626     u8 iboost_level;
627     } __packed;
628    
629     diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
630     index d223650a97e4..11edabf425ae 100644
631     --- a/drivers/i2c/Kconfig
632     +++ b/drivers/i2c/Kconfig
633     @@ -59,7 +59,6 @@ config I2C_CHARDEV
634    
635     config I2C_MUX
636     tristate "I2C bus multiplexing support"
637     - depends on HAS_IOMEM
638     help
639     Say Y here if you want the I2C core to support the ability to
640     handle multiplexed I2C bus topologies, by presenting each
641     diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
642     index e280c8ecc0b5..96de9ce5669b 100644
643     --- a/drivers/i2c/muxes/Kconfig
644     +++ b/drivers/i2c/muxes/Kconfig
645     @@ -63,6 +63,7 @@ config I2C_MUX_PINCTRL
646    
647     config I2C_MUX_REG
648     tristate "Register-based I2C multiplexer"
649     + depends on HAS_IOMEM
650     help
651     If you say yes to this option, support will be included for a
652     register based I2C multiplexer. This driver provides access to
653     diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
654     index 3278ebf1cc5c..7e6f300009c5 100644
655     --- a/drivers/i2c/muxes/i2c-mux-pca954x.c
656     +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
657     @@ -247,9 +247,9 @@ static int pca954x_probe(struct i2c_client *client,
658     /* discard unconfigured channels */
659     break;
660     idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
661     - data->deselect |= (idle_disconnect_pd
662     - || idle_disconnect_dt) << num;
663     }
664     + data->deselect |= (idle_disconnect_pd ||
665     + idle_disconnect_dt) << num;
666    
667     ret = i2c_mux_add_adapter(muxc, force, num, class);
668    
669     diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
670     index c99525512b34..71c7c4c328ef 100644
671     --- a/drivers/infiniband/core/cm.c
672     +++ b/drivers/infiniband/core/cm.c
673     @@ -80,6 +80,8 @@ static struct ib_cm {
674     __be32 random_id_operand;
675     struct list_head timewait_list;
676     struct workqueue_struct *wq;
677     + /* Sync on cm change port state */
678     + spinlock_t state_lock;
679     } cm;
680    
681     /* Counter indexes ordered by attribute ID */
682     @@ -161,6 +163,8 @@ struct cm_port {
683     struct ib_mad_agent *mad_agent;
684     struct kobject port_obj;
685     u8 port_num;
686     + struct list_head cm_priv_prim_list;
687     + struct list_head cm_priv_altr_list;
688     struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
689     };
690    
691     @@ -241,6 +245,12 @@ struct cm_id_private {
692     u8 service_timeout;
693     u8 target_ack_delay;
694    
695     + struct list_head prim_list;
696     + struct list_head altr_list;
697     + /* Indicates that the send port mad is registered and av is set */
698     + int prim_send_port_not_ready;
699     + int altr_send_port_not_ready;
700     +
701     struct list_head work_list;
702     atomic_t work_count;
703     };
704     @@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
705     struct ib_mad_agent *mad_agent;
706     struct ib_mad_send_buf *m;
707     struct ib_ah *ah;
708     + struct cm_av *av;
709     + unsigned long flags, flags2;
710     + int ret = 0;
711    
712     + /* don't let the port to be released till the agent is down */
713     + spin_lock_irqsave(&cm.state_lock, flags2);
714     + spin_lock_irqsave(&cm.lock, flags);
715     + if (!cm_id_priv->prim_send_port_not_ready)
716     + av = &cm_id_priv->av;
717     + else if (!cm_id_priv->altr_send_port_not_ready &&
718     + (cm_id_priv->alt_av.port))
719     + av = &cm_id_priv->alt_av;
720     + else {
721     + pr_info("%s: not valid CM id\n", __func__);
722     + ret = -ENODEV;
723     + spin_unlock_irqrestore(&cm.lock, flags);
724     + goto out;
725     + }
726     + spin_unlock_irqrestore(&cm.lock, flags);
727     + /* Make sure the port haven't released the mad yet */
728     mad_agent = cm_id_priv->av.port->mad_agent;
729     - ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
730     - if (IS_ERR(ah))
731     - return PTR_ERR(ah);
732     + if (!mad_agent) {
733     + pr_info("%s: not a valid MAD agent\n", __func__);
734     + ret = -ENODEV;
735     + goto out;
736     + }
737     + ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
738     + if (IS_ERR(ah)) {
739     + ret = PTR_ERR(ah);
740     + goto out;
741     + }
742    
743     m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
744     - cm_id_priv->av.pkey_index,
745     + av->pkey_index,
746     0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
747     GFP_ATOMIC,
748     IB_MGMT_BASE_VERSION);
749     if (IS_ERR(m)) {
750     ib_destroy_ah(ah);
751     - return PTR_ERR(m);
752     + ret = PTR_ERR(m);
753     + goto out;
754     }
755    
756     /* Timeout set by caller if response is expected. */
757     @@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
758     atomic_inc(&cm_id_priv->refcount);
759     m->context[0] = cm_id_priv;
760     *msg = m;
761     - return 0;
762     +
763     +out:
764     + spin_unlock_irqrestore(&cm.state_lock, flags2);
765     + return ret;
766     }
767    
768     static int cm_alloc_response_msg(struct cm_port *port,
769     @@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
770     grh, &av->ah_attr);
771     }
772    
773     -static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
774     +static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
775     + struct cm_id_private *cm_id_priv)
776     {
777     struct cm_device *cm_dev;
778     struct cm_port *port = NULL;
779     @@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
780     &av->ah_attr);
781     av->timeout = path->packet_life_time + 1;
782    
783     - return 0;
784     + spin_lock_irqsave(&cm.lock, flags);
785     + if (&cm_id_priv->av == av)
786     + list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
787     + else if (&cm_id_priv->alt_av == av)
788     + list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
789     + else
790     + ret = -EINVAL;
791     +
792     + spin_unlock_irqrestore(&cm.lock, flags);
793     +
794     + return ret;
795     }
796    
797     static int cm_alloc_id(struct cm_id_private *cm_id_priv)
798     @@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
799     spin_lock_init(&cm_id_priv->lock);
800     init_completion(&cm_id_priv->comp);
801     INIT_LIST_HEAD(&cm_id_priv->work_list);
802     + INIT_LIST_HEAD(&cm_id_priv->prim_list);
803     + INIT_LIST_HEAD(&cm_id_priv->altr_list);
804     atomic_set(&cm_id_priv->work_count, -1);
805     atomic_set(&cm_id_priv->refcount, 1);
806     return &cm_id_priv->id;
807     @@ -892,6 +945,15 @@ retest:
808     break;
809     }
810    
811     + spin_lock_irq(&cm.lock);
812     + if (!list_empty(&cm_id_priv->altr_list) &&
813     + (!cm_id_priv->altr_send_port_not_ready))
814     + list_del(&cm_id_priv->altr_list);
815     + if (!list_empty(&cm_id_priv->prim_list) &&
816     + (!cm_id_priv->prim_send_port_not_ready))
817     + list_del(&cm_id_priv->prim_list);
818     + spin_unlock_irq(&cm.lock);
819     +
820     cm_free_id(cm_id->local_id);
821     cm_deref_id(cm_id_priv);
822     wait_for_completion(&cm_id_priv->comp);
823     @@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
824     goto out;
825     }
826    
827     - ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
828     + ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
829     + cm_id_priv);
830     if (ret)
831     goto error1;
832     if (param->alternate_path) {
833     ret = cm_init_av_by_path(param->alternate_path,
834     - &cm_id_priv->alt_av);
835     + &cm_id_priv->alt_av, cm_id_priv);
836     if (ret)
837     goto error1;
838     }
839     @@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work)
840     dev_put(gid_attr.ndev);
841     }
842     work->path[0].gid_type = gid_attr.gid_type;
843     - ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
844     + ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
845     + cm_id_priv);
846     }
847     if (ret) {
848     int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
849     @@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work)
850     goto rejected;
851     }
852     if (req_msg->alt_local_lid) {
853     - ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
854     + ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
855     + cm_id_priv);
856     if (ret) {
857     ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
858     &work->path[0].sgid,
859     @@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
860     goto out;
861     }
862    
863     - ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
864     + ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
865     + cm_id_priv);
866     if (ret)
867     goto out;
868     cm_id_priv->alt_av.timeout =
869     @@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work)
870     cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
871     work->mad_recv_wc->recv_buf.grh,
872     &cm_id_priv->av);
873     - cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
874     + cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
875     + cm_id_priv);
876     ret = atomic_inc_and_test(&cm_id_priv->work_count);
877     if (!ret)
878     list_add_tail(&work->list, &cm_id_priv->work_list);
879     @@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
880     return -EINVAL;
881    
882     cm_id_priv = container_of(cm_id, struct cm_id_private, id);
883     - ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
884     + ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
885     if (ret)
886     goto out;
887    
888     @@ -3468,7 +3535,9 @@ out:
889     static int cm_migrate(struct ib_cm_id *cm_id)
890     {
891     struct cm_id_private *cm_id_priv;
892     + struct cm_av tmp_av;
893     unsigned long flags;
894     + int tmp_send_port_not_ready;
895     int ret = 0;
896    
897     cm_id_priv = container_of(cm_id, struct cm_id_private, id);
898     @@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
899     (cm_id->lap_state == IB_CM_LAP_UNINIT ||
900     cm_id->lap_state == IB_CM_LAP_IDLE)) {
901     cm_id->lap_state = IB_CM_LAP_IDLE;
902     + /* Swap address vector */
903     + tmp_av = cm_id_priv->av;
904     cm_id_priv->av = cm_id_priv->alt_av;
905     + cm_id_priv->alt_av = tmp_av;
906     + /* Swap port send ready state */
907     + tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
908     + cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
909     + cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
910     } else
911     ret = -EINVAL;
912     spin_unlock_irqrestore(&cm_id_priv->lock, flags);
913     @@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device)
914     port->cm_dev = cm_dev;
915     port->port_num = i;
916    
917     + INIT_LIST_HEAD(&port->cm_priv_prim_list);
918     + INIT_LIST_HEAD(&port->cm_priv_altr_list);
919     +
920     ret = cm_create_port_fs(port);
921     if (ret)
922     goto error1;
923     @@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
924     {
925     struct cm_device *cm_dev = client_data;
926     struct cm_port *port;
927     + struct cm_id_private *cm_id_priv;
928     + struct ib_mad_agent *cur_mad_agent;
929     struct ib_port_modify port_modify = {
930     .clr_port_cap_mask = IB_PORT_CM_SUP
931     };
932     @@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
933    
934     port = cm_dev->port[i-1];
935     ib_modify_port(ib_device, port->port_num, 0, &port_modify);
936     + /* Mark all the cm_id's as not valid */
937     + spin_lock_irq(&cm.lock);
938     + list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
939     + cm_id_priv->altr_send_port_not_ready = 1;
940     + list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
941     + cm_id_priv->prim_send_port_not_ready = 1;
942     + spin_unlock_irq(&cm.lock);
943     /*
944     * We flush the queue here after the going_down set, this
945     * verify that no new works will be queued in the recv handler,
946     * after that we can call the unregister_mad_agent
947     */
948     flush_workqueue(cm.wq);
949     - ib_unregister_mad_agent(port->mad_agent);
950     + spin_lock_irq(&cm.state_lock);
951     + cur_mad_agent = port->mad_agent;
952     + port->mad_agent = NULL;
953     + spin_unlock_irq(&cm.state_lock);
954     + ib_unregister_mad_agent(cur_mad_agent);
955     cm_remove_port_fs(port);
956     }
957     +
958     device_unregister(cm_dev->device);
959     kfree(cm_dev);
960     }
961     @@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void)
962     INIT_LIST_HEAD(&cm.device_list);
963     rwlock_init(&cm.device_lock);
964     spin_lock_init(&cm.lock);
965     + spin_lock_init(&cm.state_lock);
966     cm.listen_service_table = RB_ROOT;
967     cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
968     cm.remote_id_table = RB_ROOT;
969     diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
970     index c68746ce6624..bdab61d9103c 100644
971     --- a/drivers/infiniband/core/umem.c
972     +++ b/drivers/infiniband/core/umem.c
973     @@ -174,7 +174,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
974    
975     cur_base = addr & PAGE_MASK;
976    
977     - if (npages == 0) {
978     + if (npages == 0 || npages > UINT_MAX) {
979     ret = -EINVAL;
980     goto out;
981     }
982     diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
983     index 0012fa58c105..44b1104eb168 100644
984     --- a/drivers/infiniband/core/uverbs_main.c
985     +++ b/drivers/infiniband/core/uverbs_main.c
986     @@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
987     container_of(uobj, struct ib_uqp_object, uevent.uobject);
988    
989     idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
990     - if (qp != qp->real_qp) {
991     - ib_close_qp(qp);
992     - } else {
993     + if (qp == qp->real_qp)
994     ib_uverbs_detach_umcast(qp, uqp);
995     - ib_destroy_qp(qp);
996     - }
997     + ib_destroy_qp(qp);
998     ib_uverbs_release_uevent(file, &uqp->uevent);
999     kfree(uqp);
1000     }
1001     diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
1002     index bcf76c33726b..e3629983f1bf 100644
1003     --- a/drivers/infiniband/hw/hfi1/rc.c
1004     +++ b/drivers/infiniband/hw/hfi1/rc.c
1005     @@ -87,7 +87,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
1006     struct hfi1_qp_priv *priv = qp->priv;
1007    
1008     qp->s_flags |= RVT_S_WAIT_RNR;
1009     - qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
1010     + priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to);
1011     add_timer(&priv->s_rnr_timer);
1012     }
1013    
1014     diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
1015     index 1694037d1eee..8f59a4fded93 100644
1016     --- a/drivers/infiniband/hw/hfi1/user_sdma.c
1017     +++ b/drivers/infiniband/hw/hfi1/user_sdma.c
1018     @@ -1152,7 +1152,7 @@ static int pin_vector_pages(struct user_sdma_request *req,
1019     rb_node = hfi1_mmu_rb_extract(pq->handler,
1020     (unsigned long)iovec->iov.iov_base,
1021     iovec->iov.iov_len);
1022     - if (rb_node && !IS_ERR(rb_node))
1023     + if (rb_node)
1024     node = container_of(rb_node, struct sdma_mmu_node, rb);
1025     else
1026     rb_node = NULL;
1027     diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
1028     index 5fc623362731..b9bf0759f10a 100644
1029     --- a/drivers/infiniband/hw/mlx4/ah.c
1030     +++ b/drivers/infiniband/hw/mlx4/ah.c
1031     @@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
1032     if (vlan_tag < 0x1000)
1033     vlan_tag |= (ah_attr->sl & 7) << 13;
1034     ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
1035     - ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
1036     + ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
1037     + if (ret < 0)
1038     + return ERR_PTR(ret);
1039     + ah->av.eth.gid_index = ret;
1040     ah->av.eth.vlan = cpu_to_be16(vlan_tag);
1041     ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
1042     if (ah_attr->static_rate) {
1043     diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
1044     index 5df63dacaaa3..efb6414cc5e4 100644
1045     --- a/drivers/infiniband/hw/mlx4/cq.c
1046     +++ b/drivers/infiniband/hw/mlx4/cq.c
1047     @@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
1048     if (context)
1049     if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
1050     err = -EFAULT;
1051     - goto err_dbmap;
1052     + goto err_cq_free;
1053     }
1054    
1055     return &cq->ibcq;
1056    
1057     +err_cq_free:
1058     + mlx4_cq_free(dev->dev, &cq->mcq);
1059     +
1060     err_dbmap:
1061     if (context)
1062     mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
1063     diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
1064     index e4fac9292e4a..ebe43cb7d06b 100644
1065     --- a/drivers/infiniband/hw/mlx5/cq.c
1066     +++ b/drivers/infiniband/hw/mlx5/cq.c
1067     @@ -917,8 +917,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
1068     if (err)
1069     goto err_create;
1070     } else {
1071     - /* for now choose 64 bytes till we have a proper interface */
1072     - cqe_size = 64;
1073     + cqe_size = cache_line_size() == 128 ? 128 : 64;
1074     err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
1075     &index, &inlen);
1076     if (err)
1077     diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
1078     index bff8707a2f1f..19f8820b4e92 100644
1079     --- a/drivers/infiniband/hw/mlx5/main.c
1080     +++ b/drivers/infiniband/hw/mlx5/main.c
1081     @@ -2100,14 +2100,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
1082     {
1083     struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
1084     struct ib_event ibev;
1085     -
1086     + bool fatal = false;
1087     u8 port = 0;
1088    
1089     switch (event) {
1090     case MLX5_DEV_EVENT_SYS_ERROR:
1091     - ibdev->ib_active = false;
1092     ibev.event = IB_EVENT_DEVICE_FATAL;
1093     mlx5_ib_handle_internal_error(ibdev);
1094     + fatal = true;
1095     break;
1096    
1097     case MLX5_DEV_EVENT_PORT_UP:
1098     @@ -2154,6 +2154,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
1099    
1100     if (ibdev->ib_active)
1101     ib_dispatch_event(&ibev);
1102     +
1103     + if (fatal)
1104     + ibdev->ib_active = false;
1105     }
1106    
1107     static void get_ext_port_caps(struct mlx5_ib_dev *dev)
1108     @@ -2835,7 +2838,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1109     }
1110     err = init_node_data(dev);
1111     if (err)
1112     - goto err_dealloc;
1113     + goto err_free_port;
1114    
1115     mutex_init(&dev->flow_db.lock);
1116     mutex_init(&dev->cap_mask_mutex);
1117     @@ -2845,7 +2848,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1118     if (ll == IB_LINK_LAYER_ETHERNET) {
1119     err = mlx5_enable_roce(dev);
1120     if (err)
1121     - goto err_dealloc;
1122     + goto err_free_port;
1123     }
1124    
1125     err = create_dev_resources(&dev->devr);
1126     diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
1127     index affc3f6598ca..19d590d39484 100644
1128     --- a/drivers/infiniband/hw/mlx5/qp.c
1129     +++ b/drivers/infiniband/hw/mlx5/qp.c
1130     @@ -2037,8 +2037,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1131    
1132     mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1133     qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
1134     - to_mcq(init_attr->recv_cq)->mcq.cqn,
1135     - to_mcq(init_attr->send_cq)->mcq.cqn);
1136     + init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
1137     + init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
1138    
1139     qp->trans_qp.xrcdn = xrcdn;
1140    
1141     @@ -4702,6 +4702,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
1142     udata->inlen))
1143     return ERR_PTR(-EOPNOTSUPP);
1144    
1145     + if (init_attr->log_ind_tbl_size >
1146     + MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
1147     + mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
1148     + init_attr->log_ind_tbl_size,
1149     + MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
1150     + return ERR_PTR(-EINVAL);
1151     + }
1152     +
1153     min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
1154     if (udata->outlen && udata->outlen < min_resp_len)
1155     return ERR_PTR(-EINVAL);
1156     diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
1157     index 33076a5eee2f..04ebbb576385 100644
1158     --- a/drivers/infiniband/sw/rdmavt/dma.c
1159     +++ b/drivers/infiniband/sw/rdmavt/dma.c
1160     @@ -90,9 +90,6 @@ static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
1161     if (WARN_ON(!valid_dma_direction(direction)))
1162     return BAD_DMA_ADDRESS;
1163    
1164     - if (offset + size > PAGE_SIZE)
1165     - return BAD_DMA_ADDRESS;
1166     -
1167     addr = (u64)page_address(page);
1168     if (addr)
1169     addr += offset;
1170     diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
1171     index eedf2f1cafdf..7f5d7358c99e 100644
1172     --- a/drivers/infiniband/sw/rxe/rxe_net.c
1173     +++ b/drivers/infiniband/sw/rxe/rxe_net.c
1174     @@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
1175     {
1176     int err;
1177     struct socket *sock;
1178     - struct udp_port_cfg udp_cfg;
1179     - struct udp_tunnel_sock_cfg tnl_cfg;
1180     -
1181     - memset(&udp_cfg, 0, sizeof(udp_cfg));
1182     + struct udp_port_cfg udp_cfg = {0};
1183     + struct udp_tunnel_sock_cfg tnl_cfg = {0};
1184    
1185     if (ipv6) {
1186     udp_cfg.family = AF_INET6;
1187     @@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
1188     return ERR_PTR(err);
1189     }
1190    
1191     - tnl_cfg.sk_user_data = NULL;
1192     tnl_cfg.encap_type = 1;
1193     tnl_cfg.encap_rcv = rxe_udp_encap_recv;
1194     - tnl_cfg.encap_destroy = NULL;
1195    
1196     /* Setup UDP tunnel */
1197     setup_udp_tunnel_sock(net, sock, &tnl_cfg);
1198     diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
1199     index 22ba24f2a2c1..f724a7ef9b67 100644
1200     --- a/drivers/infiniband/sw/rxe/rxe_qp.c
1201     +++ b/drivers/infiniband/sw/rxe/rxe_qp.c
1202     @@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
1203     if (qp->sq.queue) {
1204     __rxe_do_task(&qp->comp.task);
1205     __rxe_do_task(&qp->req.task);
1206     + rxe_queue_reset(qp->sq.queue);
1207     }
1208    
1209     /* cleanup attributes */
1210     @@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp)
1211     {
1212     qp->req.state = QP_STATE_ERROR;
1213     qp->resp.state = QP_STATE_ERROR;
1214     + qp->attr.qp_state = IB_QPS_ERR;
1215    
1216     /* drain work and packet queues */
1217     rxe_run_task(&qp->resp.task, 1);
1218     diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
1219     index 08274254eb88..d14bf496d62d 100644
1220     --- a/drivers/infiniband/sw/rxe/rxe_queue.c
1221     +++ b/drivers/infiniband/sw/rxe/rxe_queue.c
1222     @@ -84,6 +84,15 @@ err1:
1223     return -EINVAL;
1224     }
1225    
1226     +inline void rxe_queue_reset(struct rxe_queue *q)
1227     +{
1228     + /* queue is comprised from header and the memory
1229     + * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
1230     + * reset only the queue itself and not the management header
1231     + */
1232     + memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
1233     +}
1234     +
1235     struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
1236     int *num_elem,
1237     unsigned int elem_size)
1238     diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
1239     index 239fd609c31e..8c8641c87817 100644
1240     --- a/drivers/infiniband/sw/rxe/rxe_queue.h
1241     +++ b/drivers/infiniband/sw/rxe/rxe_queue.h
1242     @@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe,
1243     size_t buf_size,
1244     struct rxe_mmap_info **ip_p);
1245    
1246     +void rxe_queue_reset(struct rxe_queue *q);
1247     +
1248     struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
1249     int *num_elem,
1250     unsigned int elem_size);
1251     diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
1252     index 13a848a518e8..43bb16649e44 100644
1253     --- a/drivers/infiniband/sw/rxe/rxe_req.c
1254     +++ b/drivers/infiniband/sw/rxe/rxe_req.c
1255     @@ -695,7 +695,8 @@ next_wqe:
1256     qp->req.wqe_index);
1257     wqe->state = wqe_state_done;
1258     wqe->status = IB_WC_SUCCESS;
1259     - goto complete;
1260     + __rxe_do_task(&qp->comp.task);
1261     + return 0;
1262     }
1263     payload = mtu;
1264     }
1265     @@ -744,13 +745,17 @@ err:
1266     wqe->status = IB_WC_LOC_PROT_ERR;
1267     wqe->state = wqe_state_error;
1268    
1269     -complete:
1270     - if (qp_type(qp) != IB_QPT_RC) {
1271     - while (rxe_completer(qp) == 0)
1272     - ;
1273     - }
1274     -
1275     - return 0;
1276     + /*
1277     + * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
1278     + * ---------8<---------8<-------------
1279     + * ...Note that if a completion error occurs, a Work Completion
1280     + * will always be generated, even if the signaling
1281     + * indicator requests an Unsignaled Completion.
1282     + * ---------8<---------8<-------------
1283     + */
1284     + wqe->wr.send_flags |= IB_SEND_SIGNALED;
1285     + __rxe_do_task(&qp->comp.task);
1286     + return -EAGAIN;
1287    
1288     exit:
1289     return -EAGAIN;
1290     diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
1291     index 41b113875d64..70c646b0097d 100644
1292     --- a/drivers/mfd/intel-lpss.c
1293     +++ b/drivers/mfd/intel-lpss.c
1294     @@ -502,9 +502,6 @@ int intel_lpss_suspend(struct device *dev)
1295     for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
1296     lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
1297    
1298     - /* Put the device into reset state */
1299     - writel(0, lpss->priv + LPSS_PRIV_RESETS);
1300     -
1301     return 0;
1302     }
1303     EXPORT_SYMBOL_GPL(intel_lpss_suspend);
1304     diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
1305     index 3ac486a597f3..c57e407020f1 100644
1306     --- a/drivers/mfd/mfd-core.c
1307     +++ b/drivers/mfd/mfd-core.c
1308     @@ -399,6 +399,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
1309     clones[i]);
1310     }
1311    
1312     + put_device(dev);
1313     +
1314     return 0;
1315     }
1316     EXPORT_SYMBOL(mfd_clone_cell);
1317     diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
1318     index 94c7cc02fdab..00dd7ff70e01 100644
1319     --- a/drivers/mfd/stmpe.c
1320     +++ b/drivers/mfd/stmpe.c
1321     @@ -761,6 +761,8 @@ static int stmpe1801_reset(struct stmpe *stmpe)
1322     if (ret < 0)
1323     return ret;
1324    
1325     + msleep(10);
1326     +
1327     timeout = jiffies + msecs_to_jiffies(100);
1328     while (time_before(jiffies, timeout)) {
1329     ret = __stmpe_reg_read(stmpe, STMPE1801_REG_SYS_CTRL);
1330     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
1331     index 1b5f531eeb25..bf3fd34924bd 100644
1332     --- a/drivers/net/virtio_net.c
1333     +++ b/drivers/net/virtio_net.c
1334     @@ -2010,23 +2010,33 @@ static struct virtio_device_id id_table[] = {
1335     { 0 },
1336     };
1337    
1338     +#define VIRTNET_FEATURES \
1339     + VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
1340     + VIRTIO_NET_F_MAC, \
1341     + VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
1342     + VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
1343     + VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
1344     + VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
1345     + VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
1346     + VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
1347     + VIRTIO_NET_F_CTRL_MAC_ADDR, \
1348     + VIRTIO_NET_F_MTU
1349     +
1350     static unsigned int features[] = {
1351     - VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1352     - VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1353     - VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1354     - VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1355     - VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1356     - VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1357     - VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1358     - VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
1359     - VIRTIO_NET_F_CTRL_MAC_ADDR,
1360     + VIRTNET_FEATURES,
1361     +};
1362     +
1363     +static unsigned int features_legacy[] = {
1364     + VIRTNET_FEATURES,
1365     + VIRTIO_NET_F_GSO,
1366     VIRTIO_F_ANY_LAYOUT,
1367     - VIRTIO_NET_F_MTU,
1368     };
1369    
1370     static struct virtio_driver virtio_net_driver = {
1371     .feature_table = features,
1372     .feature_table_size = ARRAY_SIZE(features),
1373     + .feature_table_legacy = features_legacy,
1374     + .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
1375     .driver.name = KBUILD_MODNAME,
1376     .driver.owner = THIS_MODULE,
1377     .id_table = id_table,
1378     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
1379     index 4fdc3dad3e85..ea67ae9c87a0 100644
1380     --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
1381     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
1382     @@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
1383     ret = iwl_mvm_switch_to_d3(mvm);
1384     if (ret)
1385     return ret;
1386     + } else {
1387     + /* In theory, we wouldn't have to stop a running sched
1388     + * scan in order to start another one (for
1389     + * net-detect). But in practice this doesn't seem to
1390     + * work properly, so stop any running sched_scan now.
1391     + */
1392     + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
1393     + if (ret)
1394     + return ret;
1395     }
1396    
1397     /* rfkill release can be either for wowlan or netdetect */
1398     @@ -2088,6 +2097,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1399     iwl_mvm_update_changed_regdom(mvm);
1400    
1401     if (mvm->net_detect) {
1402     + /* If this is a non-unified image, we restart the FW,
1403     + * so no need to stop the netdetect scan. If that
1404     + * fails, continue and try to get the wake-up reasons,
1405     + * but trigger a HW restart by keeping a failure code
1406     + * in ret.
1407     + */
1408     + if (unified_image)
1409     + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
1410     + false);
1411     +
1412     iwl_mvm_query_netdetect_reasons(mvm, vif);
1413     /* has unlocked the mutex, so skip that */
1414     goto out;
1415     @@ -2271,7 +2290,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
1416     static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
1417     {
1418     struct iwl_mvm *mvm = inode->i_private;
1419     - int remaining_time = 10;
1420     + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1421     + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1422    
1423     mvm->d3_test_active = false;
1424    
1425     @@ -2282,17 +2302,21 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
1426     mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
1427    
1428     iwl_abort_notification_waits(&mvm->notif_wait);
1429     - ieee80211_restart_hw(mvm->hw);
1430     + if (!unified_image) {
1431     + int remaining_time = 10;
1432    
1433     - /* wait for restart and disconnect all interfaces */
1434     - while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
1435     - remaining_time > 0) {
1436     - remaining_time--;
1437     - msleep(1000);
1438     - }
1439     + ieee80211_restart_hw(mvm->hw);
1440    
1441     - if (remaining_time == 0)
1442     - IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
1443     + /* wait for restart and disconnect all interfaces */
1444     + while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
1445     + remaining_time > 0) {
1446     + remaining_time--;
1447     + msleep(1000);
1448     + }
1449     +
1450     + if (remaining_time == 0)
1451     + IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
1452     + }
1453    
1454     ieee80211_iterate_active_interfaces_atomic(
1455     mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1456     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1457     index 5dd77e336617..90a1f4a06ba1 100644
1458     --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1459     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1460     @@ -4097,7 +4097,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
1461     struct iwl_mvm_internal_rxq_notif *notif,
1462     u32 size)
1463     {
1464     - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
1465     u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
1466     int ret;
1467    
1468     @@ -4119,7 +4118,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
1469     }
1470    
1471     if (notif->sync)
1472     - ret = wait_event_timeout(notif_waitq,
1473     + ret = wait_event_timeout(mvm->rx_sync_waitq,
1474     atomic_read(&mvm->queue_sync_counter) == 0,
1475     HZ);
1476     WARN_ON_ONCE(!ret);
1477     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
1478     index 6a615bb73042..e9cb970139c7 100644
1479     --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
1480     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
1481     @@ -932,6 +932,7 @@ struct iwl_mvm {
1482     /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
1483     spinlock_t d0i3_tx_lock;
1484     wait_queue_head_t d0i3_exit_waitq;
1485     + wait_queue_head_t rx_sync_waitq;
1486    
1487     /* BT-Coex */
1488     struct iwl_bt_coex_profile_notif last_bt_notif;
1489     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
1490     index 55d9096da68c..30bbdec97d03 100644
1491     --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
1492     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
1493     @@ -618,6 +618,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
1494     spin_lock_init(&mvm->refs_lock);
1495     skb_queue_head_init(&mvm->d0i3_tx);
1496     init_waitqueue_head(&mvm->d0i3_exit_waitq);
1497     + init_waitqueue_head(&mvm->rx_sync_waitq);
1498    
1499     atomic_set(&mvm->queue_sync_counter, 0);
1500    
1501     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
1502     index afb7eb60e454..2b994be10b42 100644
1503     --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
1504     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
1505     @@ -545,7 +545,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
1506     "Received expired RX queue sync message\n");
1507     return;
1508     }
1509     - atomic_dec(&mvm->queue_sync_counter);
1510     + if (!atomic_dec_return(&mvm->queue_sync_counter))
1511     + wake_up(&mvm->rx_sync_waitq);
1512     }
1513    
1514     switch (internal_notif->type) {
1515     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
1516     index dac120f8861b..3707ec60b575 100644
1517     --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
1518     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
1519     @@ -1185,6 +1185,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
1520    
1521     static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
1522     {
1523     + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1524     + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1525     +
1526     /* This looks a bit arbitrary, but the idea is that if we run
1527     * out of possible simultaneous scans and the userspace is
1528     * trying to run a scan type that is already running, we
1529     @@ -1211,12 +1214,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
1530     return -EBUSY;
1531     return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
1532     case IWL_MVM_SCAN_NETDETECT:
1533     - /* No need to stop anything for net-detect since the
1534     - * firmware is restarted anyway. This way, any sched
1535     - * scans that were running will be restarted when we
1536     - * resume.
1537     - */
1538     - return 0;
1539     + /* For non-unified images, there's no need to stop
1540     + * anything for net-detect since the firmware is
1541     + * restarted anyway. This way, any sched scans that
1542     + * were running will be restarted when we resume.
1543     + */
1544     + if (!unified_image)
1545     + return 0;
1546     +
1547     + /* If this is a unified image and we ran out of scans,
1548     + * we need to stop something. Prefer stopping regular
1549     + * scans, because the results are useless at this
1550     + * point, and we should be able to keep running
1551     + * another scheduled scan while suspended.
1552     + */
1553     + if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
1554     + return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
1555     + true);
1556     + if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
1557     + return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
1558     + true);
1559     +
1560     + /* fall through, something is wrong if no scan was
1561     + * running but we ran out of scans.
1562     + */
1563     default:
1564     WARN_ON(1);
1565     break;
1566     diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
1567     index 78cf9a7f3eac..13842ca124ab 100644
1568     --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
1569     +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
1570     @@ -526,48 +526,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
1571     MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
1572    
1573     #ifdef CONFIG_ACPI
1574     -#define SPL_METHOD "SPLC"
1575     -#define SPL_DOMAINTYPE_MODULE BIT(0)
1576     -#define SPL_DOMAINTYPE_WIFI BIT(1)
1577     -#define SPL_DOMAINTYPE_WIGIG BIT(2)
1578     -#define SPL_DOMAINTYPE_RFEM BIT(3)
1579     +#define ACPI_SPLC_METHOD "SPLC"
1580     +#define ACPI_SPLC_DOMAIN_WIFI (0x07)
1581    
1582     -static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
1583     +static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
1584     {
1585     - union acpi_object *limits, *domain_type, *power_limit;
1586     -
1587     - if (splx->type != ACPI_TYPE_PACKAGE ||
1588     - splx->package.count != 2 ||
1589     - splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
1590     - splx->package.elements[0].integer.value != 0) {
1591     - IWL_ERR(trans, "Unsupported splx structure\n");
1592     + union acpi_object *data_pkg, *dflt_pwr_limit;
1593     + int i;
1594     +
1595     + /* We need at least two elements, one for the revision and one
1596     + * for the data itself. Also check that the revision is
1597     + * supported (currently only revision 0).
1598     + */
1599     + if (splc->type != ACPI_TYPE_PACKAGE ||
1600     + splc->package.count < 2 ||
1601     + splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
1602     + splc->package.elements[0].integer.value != 0) {
1603     + IWL_DEBUG_INFO(trans,
1604     + "Unsupported structure returned by the SPLC method. Ignoring.\n");
1605     return 0;
1606     }
1607    
1608     - limits = &splx->package.elements[1];
1609     - if (limits->type != ACPI_TYPE_PACKAGE ||
1610     - limits->package.count < 2 ||
1611     - limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
1612     - limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
1613     - IWL_ERR(trans, "Invalid limits element\n");
1614     - return 0;
1615     + /* loop through all the packages to find the one for WiFi */
1616     + for (i = 1; i < splc->package.count; i++) {
1617     + union acpi_object *domain;
1618     +
1619     + data_pkg = &splc->package.elements[i];
1620     +
1621     + /* Skip anything that is not a package with the right
1622     + * amount of elements (i.e. at least 2 integers).
1623     + */
1624     + if (data_pkg->type != ACPI_TYPE_PACKAGE ||
1625     + data_pkg->package.count < 2 ||
1626     + data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
1627     + data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
1628     + continue;
1629     +
1630     + domain = &data_pkg->package.elements[0];
1631     + if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
1632     + break;
1633     +
1634     + data_pkg = NULL;
1635     }
1636    
1637     - domain_type = &limits->package.elements[0];
1638     - power_limit = &limits->package.elements[1];
1639     - if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
1640     - IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
1641     + if (!data_pkg) {
1642     + IWL_DEBUG_INFO(trans,
1643     + "No element for the WiFi domain returned by the SPLC method.\n");
1644     return 0;
1645     }
1646    
1647     - return power_limit->integer.value;
1648     + dflt_pwr_limit = &data_pkg->package.elements[1];
1649     + return dflt_pwr_limit->integer.value;
1650     }
1651    
1652     static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
1653     {
1654     acpi_handle pxsx_handle;
1655     acpi_handle handle;
1656     - struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
1657     + struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
1658     acpi_status status;
1659    
1660     pxsx_handle = ACPI_HANDLE(&pdev->dev);
1661     @@ -578,23 +594,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
1662     }
1663    
1664     /* Get the method's handle */
1665     - status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
1666     + status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
1667     + &handle);
1668     if (ACPI_FAILURE(status)) {
1669     - IWL_DEBUG_INFO(trans, "SPL method not found\n");
1670     + IWL_DEBUG_INFO(trans, "SPLC method not found\n");
1671     return;
1672     }
1673    
1674     /* Call SPLC with no arguments */
1675     - status = acpi_evaluate_object(handle, NULL, NULL, &splx);
1676     + status = acpi_evaluate_object(handle, NULL, NULL, &splc);
1677     if (ACPI_FAILURE(status)) {
1678     IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
1679     return;
1680     }
1681    
1682     - trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
1683     + trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
1684     IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
1685     trans->dflt_pwr_limit);
1686     - kfree(splx.pointer);
1687     + kfree(splc.pointer);
1688     }
1689    
1690     #else /* CONFIG_ACPI */
1691     diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
1692     index 18650dccdb58..478bba527977 100644
1693     --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
1694     +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
1695     @@ -522,6 +522,7 @@ error:
1696     static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
1697     int slots_num, u32 txq_id)
1698     {
1699     + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1700     int ret;
1701    
1702     txq->need_update = false;
1703     @@ -536,6 +537,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
1704     return ret;
1705    
1706     spin_lock_init(&txq->lock);
1707     +
1708     + if (txq_id == trans_pcie->cmd_queue) {
1709     + static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
1710     +
1711     + lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
1712     + }
1713     +
1714     __skb_queue_head_init(&txq->overflow_q);
1715    
1716     /*
1717     diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
1718     index ec2e9c5fb993..22394fe30579 100644
1719     --- a/drivers/rtc/rtc-omap.c
1720     +++ b/drivers/rtc/rtc-omap.c
1721     @@ -109,6 +109,7 @@
1722     /* OMAP_RTC_OSC_REG bit fields: */
1723     #define OMAP_RTC_OSC_32KCLK_EN BIT(6)
1724     #define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3)
1725     +#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4)
1726    
1727     /* OMAP_RTC_IRQWAKEEN bit fields: */
1728     #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1)
1729     @@ -646,8 +647,9 @@ static int omap_rtc_probe(struct platform_device *pdev)
1730     */
1731     if (rtc->has_ext_clk) {
1732     reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
1733     - rtc_write(rtc, OMAP_RTC_OSC_REG,
1734     - reg | OMAP_RTC_OSC_SEL_32KCLK_SRC);
1735     + reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE;
1736     + reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC;
1737     + rtc_writel(rtc, OMAP_RTC_OSC_REG, reg);
1738     }
1739    
1740     rtc->type->lock(rtc);
1741     diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
1742     index d059ad4d0dbd..97ee1b46db69 100644
1743     --- a/drivers/uwb/lc-rc.c
1744     +++ b/drivers/uwb/lc-rc.c
1745     @@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index)
1746     struct uwb_rc *rc = NULL;
1747    
1748     dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
1749     - if (dev)
1750     + if (dev) {
1751     rc = dev_get_drvdata(dev);
1752     + put_device(dev);
1753     + }
1754     +
1755     return rc;
1756     }
1757    
1758     @@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
1759     if (dev) {
1760     rc = dev_get_drvdata(dev);
1761     __uwb_rc_get(rc);
1762     + put_device(dev);
1763     }
1764     +
1765     return rc;
1766     }
1767     EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
1768     @@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
1769    
1770     dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
1771     find_rc_grandpa);
1772     - if (dev)
1773     + if (dev) {
1774     rc = dev_get_drvdata(dev);
1775     + put_device(dev);
1776     + }
1777     +
1778     return rc;
1779     }
1780     EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
1781     @@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
1782     struct uwb_rc *rc = NULL;
1783    
1784     dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
1785     - if (dev)
1786     + if (dev) {
1787     rc = dev_get_drvdata(dev);
1788     + put_device(dev);
1789     + }
1790    
1791     return rc;
1792     }
1793     diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
1794     index c1304b8d4985..678e93741ae1 100644
1795     --- a/drivers/uwb/pal.c
1796     +++ b/drivers/uwb/pal.c
1797     @@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
1798    
1799     dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
1800    
1801     + put_device(dev);
1802     +
1803     return (dev != NULL);
1804     }
1805    
1806     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
1807     index ea31931386ec..7bd21aaedaaf 100644
1808     --- a/fs/ext4/ext4.h
1809     +++ b/fs/ext4/ext4.h
1810     @@ -235,6 +235,7 @@ struct ext4_io_submit {
1811     #define EXT4_MAX_BLOCK_SIZE 65536
1812     #define EXT4_MIN_BLOCK_LOG_SIZE 10
1813     #define EXT4_MAX_BLOCK_LOG_SIZE 16
1814     +#define EXT4_MAX_CLUSTER_LOG_SIZE 30
1815     #ifdef __KERNEL__
1816     # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
1817     #else
1818     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1819     index 3ec8708989ca..ec89f5005c2d 100644
1820     --- a/fs/ext4/super.c
1821     +++ b/fs/ext4/super.c
1822     @@ -3518,7 +3518,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1823     if (blocksize < EXT4_MIN_BLOCK_SIZE ||
1824     blocksize > EXT4_MAX_BLOCK_SIZE) {
1825     ext4_msg(sb, KERN_ERR,
1826     - "Unsupported filesystem blocksize %d", blocksize);
1827     + "Unsupported filesystem blocksize %d (%d log_block_size)",
1828     + blocksize, le32_to_cpu(es->s_log_block_size));
1829     + goto failed_mount;
1830     + }
1831     + if (le32_to_cpu(es->s_log_block_size) >
1832     + (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
1833     + ext4_msg(sb, KERN_ERR,
1834     + "Invalid log block size: %u",
1835     + le32_to_cpu(es->s_log_block_size));
1836     goto failed_mount;
1837     }
1838    
1839     @@ -3650,6 +3658,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1840     "block size (%d)", clustersize, blocksize);
1841     goto failed_mount;
1842     }
1843     + if (le32_to_cpu(es->s_log_cluster_size) >
1844     + (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
1845     + ext4_msg(sb, KERN_ERR,
1846     + "Invalid log cluster size: %u",
1847     + le32_to_cpu(es->s_log_cluster_size));
1848     + goto failed_mount;
1849     + }
1850     sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
1851     le32_to_cpu(es->s_log_block_size);
1852     sbi->s_clusters_per_group =
1853     diff --git a/fs/fuse/file.c b/fs/fuse/file.c
1854     index 3988b43c2f5a..a621dd98a865 100644
1855     --- a/fs/fuse/file.c
1856     +++ b/fs/fuse/file.c
1857     @@ -1985,6 +1985,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
1858     {
1859     struct inode *inode = page->mapping->host;
1860    
1861     + /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
1862     + if (!copied)
1863     + goto unlock;
1864     +
1865     if (!PageUptodate(page)) {
1866     /* Zero any unwritten bytes at the end of the page */
1867     size_t endoff = (pos + copied) & ~PAGE_MASK;
1868     @@ -1995,6 +1999,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
1869    
1870     fuse_write_update_size(inode, pos + copied);
1871     set_page_dirty(page);
1872     +
1873     +unlock:
1874     unlock_page(page);
1875     put_page(page);
1876    
1877     diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
1878     index ab02a457da1f..e5d193440374 100644
1879     --- a/include/linux/sunrpc/svc_xprt.h
1880     +++ b/include/linux/sunrpc/svc_xprt.h
1881     @@ -25,6 +25,7 @@ struct svc_xprt_ops {
1882     void (*xpo_detach)(struct svc_xprt *);
1883     void (*xpo_free)(struct svc_xprt *);
1884     int (*xpo_secure_port)(struct svc_rqst *);
1885     + void (*xpo_kill_temp_xprt)(struct svc_xprt *);
1886     };
1887    
1888     struct svc_xprt_class {
1889     diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
1890     index 9530fcd27704..9d592c66f754 100644
1891     --- a/kernel/irq/manage.c
1892     +++ b/kernel/irq/manage.c
1893     @@ -1341,12 +1341,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1894    
1895     } else if (new->flags & IRQF_TRIGGER_MASK) {
1896     unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1897     - unsigned int omsk = irq_settings_get_trigger_mask(desc);
1898     + unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1899    
1900     if (nmsk != omsk)
1901     /* hope the handler works with current trigger mode */
1902     pr_warn("irq %d uses trigger mode %u; requested %u\n",
1903     - irq, nmsk, omsk);
1904     + irq, omsk, nmsk);
1905     }
1906    
1907     *old_ptr = new;
1908     diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
1909     index 084452e34a12..bdff5ed57f10 100644
1910     --- a/kernel/power/suspend_test.c
1911     +++ b/kernel/power/suspend_test.c
1912     @@ -203,8 +203,10 @@ static int __init test_suspend(void)
1913    
1914     /* RTCs have initialized by now too ... can we use one? */
1915     dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
1916     - if (dev)
1917     + if (dev) {
1918     rtc = rtc_class_open(dev_name(dev));
1919     + put_device(dev);
1920     + }
1921     if (!rtc) {
1922     printk(warn_no_rtc);
1923     return 0;
1924     diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
1925     index d0a1617b52b4..979e7bfbde7a 100644
1926     --- a/kernel/trace/Makefile
1927     +++ b/kernel/trace/Makefile
1928     @@ -1,8 +1,4 @@
1929    
1930     -# We are fully aware of the dangers of __builtin_return_address()
1931     -FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
1932     -KBUILD_CFLAGS += $(FRAME_CFLAGS)
1933     -
1934     # Do not instrument the tracer itself:
1935    
1936     ifdef CONFIG_FUNCTION_TRACER
1937     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1938     index 84752c8e28b5..b1d7f1b5e791 100644
1939     --- a/kernel/trace/ftrace.c
1940     +++ b/kernel/trace/ftrace.c
1941     @@ -1856,6 +1856,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1942    
1943     /* Update rec->flags */
1944     do_for_each_ftrace_rec(pg, rec) {
1945     +
1946     + if (rec->flags & FTRACE_FL_DISABLED)
1947     + continue;
1948     +
1949     /* We need to update only differences of filter_hash */
1950     in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1951     in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1952     @@ -1878,6 +1882,10 @@ rollback:
1953    
1954     /* Roll back what we did above */
1955     do_for_each_ftrace_rec(pg, rec) {
1956     +
1957     + if (rec->flags & FTRACE_FL_DISABLED)
1958     + continue;
1959     +
1960     if (rec == end)
1961     goto err_out;
1962    
1963     @@ -2391,6 +2399,10 @@ void __weak ftrace_replace_code(int enable)
1964     return;
1965    
1966     do_for_each_ftrace_rec(pg, rec) {
1967     +
1968     + if (rec->flags & FTRACE_FL_DISABLED)
1969     + continue;
1970     +
1971     failed = __ftrace_replace_code(rec, enable);
1972     if (failed) {
1973     ftrace_bug(failed, rec);
1974     @@ -2757,7 +2769,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
1975     struct dyn_ftrace *rec;
1976    
1977     do_for_each_ftrace_rec(pg, rec) {
1978     - if (FTRACE_WARN_ON_ONCE(rec->flags))
1979     + if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
1980     pr_warn(" %pS flags:%lx\n",
1981     (void *)rec->ip, rec->flags);
1982     } while_for_each_ftrace_rec();
1983     @@ -3592,6 +3604,10 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
1984     goto out_unlock;
1985    
1986     do_for_each_ftrace_rec(pg, rec) {
1987     +
1988     + if (rec->flags & FTRACE_FL_DISABLED)
1989     + continue;
1990     +
1991     if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
1992     ret = enter_record(hash, rec, clear_filter);
1993     if (ret < 0) {
1994     @@ -3787,6 +3803,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1995    
1996     do_for_each_ftrace_rec(pg, rec) {
1997    
1998     + if (rec->flags & FTRACE_FL_DISABLED)
1999     + continue;
2000     +
2001     if (!ftrace_match_record(rec, &func_g, NULL, 0))
2002     continue;
2003    
2004     @@ -4679,6 +4698,9 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
2005    
2006     do_for_each_ftrace_rec(pg, rec) {
2007    
2008     + if (rec->flags & FTRACE_FL_DISABLED)
2009     + continue;
2010     +
2011     if (ftrace_match_record(rec, &func_g, NULL, 0)) {
2012     /* if it is in the array */
2013     exists = false;
2014     diff --git a/mm/Makefile b/mm/Makefile
2015     index 2ca1faf3fa09..295bd7a9f76b 100644
2016     --- a/mm/Makefile
2017     +++ b/mm/Makefile
2018     @@ -21,9 +21,6 @@ KCOV_INSTRUMENT_memcontrol.o := n
2019     KCOV_INSTRUMENT_mmzone.o := n
2020     KCOV_INSTRUMENT_vmstat.o := n
2021    
2022     -# Since __builtin_frame_address does work as used, disable the warning.
2023     -CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
2024     -
2025     mmu-y := nommu.o
2026     mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
2027     mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
2028     diff --git a/net/can/bcm.c b/net/can/bcm.c
2029     index 8e999ffdf28b..8af9d25ff988 100644
2030     --- a/net/can/bcm.c
2031     +++ b/net/can/bcm.c
2032     @@ -1549,24 +1549,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
2033     struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
2034     struct sock *sk = sock->sk;
2035     struct bcm_sock *bo = bcm_sk(sk);
2036     + int ret = 0;
2037    
2038     if (len < sizeof(*addr))
2039     return -EINVAL;
2040    
2041     - if (bo->bound)
2042     - return -EISCONN;
2043     + lock_sock(sk);
2044     +
2045     + if (bo->bound) {
2046     + ret = -EISCONN;
2047     + goto fail;
2048     + }
2049    
2050     /* bind a device to this socket */
2051     if (addr->can_ifindex) {
2052     struct net_device *dev;
2053    
2054     dev = dev_get_by_index(&init_net, addr->can_ifindex);
2055     - if (!dev)
2056     - return -ENODEV;
2057     -
2058     + if (!dev) {
2059     + ret = -ENODEV;
2060     + goto fail;
2061     + }
2062     if (dev->type != ARPHRD_CAN) {
2063     dev_put(dev);
2064     - return -ENODEV;
2065     + ret = -ENODEV;
2066     + goto fail;
2067     }
2068    
2069     bo->ifindex = dev->ifindex;
2070     @@ -1577,17 +1584,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
2071     bo->ifindex = 0;
2072     }
2073    
2074     - bo->bound = 1;
2075     -
2076     if (proc_dir) {
2077     /* unique socket address as filename */
2078     sprintf(bo->procname, "%lu", sock_i_ino(sk));
2079     bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
2080     proc_dir,
2081     &bcm_proc_fops, sk);
2082     + if (!bo->bcm_proc_read) {
2083     + ret = -ENOMEM;
2084     + goto fail;
2085     + }
2086     }
2087    
2088     - return 0;
2089     + bo->bound = 1;
2090     +
2091     +fail:
2092     + release_sock(sk);
2093     +
2094     + return ret;
2095     }
2096    
2097     static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2098     diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
2099     index 0af26699bf04..584ac76ec555 100644
2100     --- a/net/netfilter/nft_dynset.c
2101     +++ b/net/netfilter/nft_dynset.c
2102     @@ -143,7 +143,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
2103     if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
2104     if (!(set->flags & NFT_SET_TIMEOUT))
2105     return -EINVAL;
2106     - timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
2107     + timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
2108     + tb[NFTA_DYNSET_TIMEOUT])));
2109     }
2110    
2111     priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
2112     @@ -230,7 +231,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
2113     goto nla_put_failure;
2114     if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
2115     goto nla_put_failure;
2116     - if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout),
2117     + if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
2118     + cpu_to_be64(jiffies_to_msecs(priv->timeout)),
2119     NFTA_DYNSET_PAD))
2120     goto nla_put_failure;
2121     if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
2122     diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
2123     index c3f652395a80..3bc1d61694cb 100644
2124     --- a/net/sunrpc/svc_xprt.c
2125     +++ b/net/sunrpc/svc_xprt.c
2126     @@ -1002,14 +1002,8 @@ static void svc_age_temp_xprts(unsigned long closure)
2127     void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
2128     {
2129     struct svc_xprt *xprt;
2130     - struct svc_sock *svsk;
2131     - struct socket *sock;
2132     struct list_head *le, *next;
2133     LIST_HEAD(to_be_closed);
2134     - struct linger no_linger = {
2135     - .l_onoff = 1,
2136     - .l_linger = 0,
2137     - };
2138    
2139     spin_lock_bh(&serv->sv_lock);
2140     list_for_each_safe(le, next, &serv->sv_tempsocks) {
2141     @@ -1027,10 +1021,7 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
2142     list_del_init(le);
2143     xprt = list_entry(le, struct svc_xprt, xpt_list);
2144     dprintk("svc_age_temp_xprts_now: closing %p\n", xprt);
2145     - svsk = container_of(xprt, struct svc_sock, sk_xprt);
2146     - sock = svsk->sk_sock;
2147     - kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
2148     - (char *)&no_linger, sizeof(no_linger));
2149     + xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
2150     svc_close_xprt(xprt);
2151     }
2152     }
2153     diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
2154     index 57625f64efd5..a4bc98265d88 100644
2155     --- a/net/sunrpc/svcsock.c
2156     +++ b/net/sunrpc/svcsock.c
2157     @@ -438,6 +438,21 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
2158     return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
2159     }
2160    
2161     +static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt)
2162     +{
2163     + struct svc_sock *svsk;
2164     + struct socket *sock;
2165     + struct linger no_linger = {
2166     + .l_onoff = 1,
2167     + .l_linger = 0,
2168     + };
2169     +
2170     + svsk = container_of(xprt, struct svc_sock, sk_xprt);
2171     + sock = svsk->sk_sock;
2172     + kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
2173     + (char *)&no_linger, sizeof(no_linger));
2174     +}
2175     +
2176     /*
2177     * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo
2178     */
2179     @@ -648,6 +663,10 @@ static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt)
2180     return NULL;
2181     }
2182    
2183     +static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt)
2184     +{
2185     +}
2186     +
2187     static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
2188     struct net *net,
2189     struct sockaddr *sa, int salen,
2190     @@ -667,6 +686,7 @@ static struct svc_xprt_ops svc_udp_ops = {
2191     .xpo_has_wspace = svc_udp_has_wspace,
2192     .xpo_accept = svc_udp_accept,
2193     .xpo_secure_port = svc_sock_secure_port,
2194     + .xpo_kill_temp_xprt = svc_udp_kill_temp_xprt,
2195     };
2196    
2197     static struct svc_xprt_class svc_udp_class = {
2198     @@ -1242,6 +1262,7 @@ static struct svc_xprt_ops svc_tcp_ops = {
2199     .xpo_has_wspace = svc_tcp_has_wspace,
2200     .xpo_accept = svc_tcp_accept,
2201     .xpo_secure_port = svc_sock_secure_port,
2202     + .xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt,
2203     };
2204    
2205     static struct svc_xprt_class svc_tcp_class = {
2206     diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
2207     index 924271c9ef3e..a55b8093a7f9 100644
2208     --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
2209     +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
2210     @@ -67,6 +67,7 @@ static void svc_rdma_detach(struct svc_xprt *xprt);
2211     static void svc_rdma_free(struct svc_xprt *xprt);
2212     static int svc_rdma_has_wspace(struct svc_xprt *xprt);
2213     static int svc_rdma_secure_port(struct svc_rqst *);
2214     +static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
2215    
2216     static struct svc_xprt_ops svc_rdma_ops = {
2217     .xpo_create = svc_rdma_create,
2218     @@ -79,6 +80,7 @@ static struct svc_xprt_ops svc_rdma_ops = {
2219     .xpo_has_wspace = svc_rdma_has_wspace,
2220     .xpo_accept = svc_rdma_accept,
2221     .xpo_secure_port = svc_rdma_secure_port,
2222     + .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
2223     };
2224    
2225     struct svc_xprt_class svc_rdma_class = {
2226     @@ -1285,6 +1287,10 @@ static int svc_rdma_secure_port(struct svc_rqst *rqstp)
2227     return 1;
2228     }
2229    
2230     +static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
2231     +{
2232     +}
2233     +
2234     int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
2235     {
2236     struct ib_send_wr *bad_wr, *n_wr;
2237     diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
2238     index 973e8c141567..17867e723a51 100755
2239     --- a/scripts/gcc-x86_64-has-stack-protector.sh
2240     +++ b/scripts/gcc-x86_64-has-stack-protector.sh
2241     @@ -1,6 +1,6 @@
2242     #!/bin/sh
2243    
2244     -echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
2245     +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
2246     if [ "$?" -eq "0" ] ; then
2247     echo y
2248     else
2249     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2250     index 26e866f65314..162818058a5d 100644
2251     --- a/sound/pci/hda/patch_realtek.c
2252     +++ b/sound/pci/hda/patch_realtek.c
2253     @@ -6905,8 +6905,6 @@ static const struct hda_fixup alc662_fixups[] = {
2254     .v.pins = (const struct hda_pintbl[]) {
2255     { 0x15, 0x40f000f0 }, /* disabled */
2256     { 0x16, 0x40f000f0 }, /* disabled */
2257     - { 0x18, 0x01014011 }, /* LO */
2258     - { 0x1a, 0x01014012 }, /* LO */
2259     { }
2260     }
2261     },
2262     diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
2263     index 6a23302297c9..4d9d320a7971 100644
2264     --- a/sound/pci/hda/thinkpad_helper.c
2265     +++ b/sound/pci/hda/thinkpad_helper.c
2266     @@ -13,7 +13,8 @@ static void (*old_vmaster_hook)(void *, int);
2267     static bool is_thinkpad(struct hda_codec *codec)
2268     {
2269     return (codec->core.subsystem_id >> 16 == 0x17aa) &&
2270     - (acpi_dev_found("LEN0068") || acpi_dev_found("IBM0068"));
2271     + (acpi_dev_found("LEN0068") || acpi_dev_found("LEN0268") ||
2272     + acpi_dev_found("IBM0068"));
2273     }
2274    
2275     static void update_tpacpi_mute_led(void *private_data, int enabled)
2276     diff --git a/sound/usb/card.c b/sound/usb/card.c
2277     index 9e5276d6dda0..2ddc034673a8 100644
2278     --- a/sound/usb/card.c
2279     +++ b/sound/usb/card.c
2280     @@ -315,7 +315,8 @@ static int snd_usb_audio_free(struct snd_usb_audio *chip)
2281     snd_usb_endpoint_free(ep);
2282    
2283     mutex_destroy(&chip->mutex);
2284     - dev_set_drvdata(&chip->dev->dev, NULL);
2285     + if (!atomic_read(&chip->shutdown))
2286     + dev_set_drvdata(&chip->dev->dev, NULL);
2287     kfree(chip);
2288     return 0;
2289     }
2290     diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
2291     index de15dbcdcecf..7214913861cf 100644
2292     --- a/tools/perf/util/hist.c
2293     +++ b/tools/perf/util/hist.c
2294     @@ -1596,18 +1596,18 @@ static void hists__hierarchy_output_resort(struct hists *hists,
2295     if (prog)
2296     ui_progress__update(prog, 1);
2297    
2298     + hists->nr_entries++;
2299     + if (!he->filtered) {
2300     + hists->nr_non_filtered_entries++;
2301     + hists__calc_col_len(hists, he);
2302     + }
2303     +
2304     if (!he->leaf) {
2305     hists__hierarchy_output_resort(hists, prog,
2306     &he->hroot_in,
2307     &he->hroot_out,
2308     min_callchain_hits,
2309     use_callchain);
2310     - hists->nr_entries++;
2311     - if (!he->filtered) {
2312     - hists->nr_non_filtered_entries++;
2313     - hists__calc_col_len(hists, he);
2314     - }
2315     -
2316     continue;
2317     }
2318    
2319     diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
2320     index 6e9c40eea208..69ccce308458 100644
2321     --- a/virt/kvm/arm/pmu.c
2322     +++ b/virt/kvm/arm/pmu.c
2323     @@ -305,7 +305,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
2324     continue;
2325     type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
2326     & ARMV8_PMU_EVTYPE_EVENT;
2327     - if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
2328     + if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
2329     && (enable & BIT(i))) {
2330     reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
2331     reg = lower_32_bits(reg);
2332     @@ -379,7 +379,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
2333     eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
2334    
2335     /* Software increment event does't need to be backed by a perf event */
2336     - if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
2337     + if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
2338     + select_idx != ARMV8_PMU_CYCLE_IDX)
2339     return;
2340    
2341     memset(&attr, 0, sizeof(struct perf_event_attr));
2342     @@ -391,7 +392,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
2343     attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
2344     attr.exclude_hv = 1; /* Don't count EL2 events */
2345     attr.exclude_host = 1; /* Don't count host events */
2346     - attr.config = eventsel;
2347     + attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
2348     + ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
2349    
2350     counter = kvm_pmu_get_counter_value(vcpu, select_idx);
2351     /* The initial sample period (overflow count) of an event. */