Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0160-4.9.61-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3045 - (show annotations) (download)
Wed Dec 20 11:49:27 2017 UTC (6 years, 4 months ago) by niro
File size: 86385 byte(s)
-linux-4.9.61
1 diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
2 index ad440a2b8051..e926aea1147d 100644
3 --- a/Documentation/devicetree/bindings/arm/arch_timer.txt
4 +++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
5 @@ -31,6 +31,12 @@ to deliver its interrupts via SPIs.
6 This also affects writes to the tval register, due to the implicit
7 counter read.
8
9 +- hisilicon,erratum-161010101 : A boolean property. Indicates the
10 + presence of Hisilicon erratum 161010101, which says that reading the
11 + counters is unreliable in some cases, and reads may return a value 32
12 + beyond the correct value. This also affects writes to the tval
13 + registers, due to the implicit counter read.
14 +
15 ** Optional properties:
16
17 - arm,cpu-registers-not-fw-configured : Firmware does not initialize
18 diff --git a/Makefile b/Makefile
19 index 2f7a386b1751..b56b99e20b30 100644
20 --- a/Makefile
21 +++ b/Makefile
22 @@ -1,6 +1,6 @@
23 VERSION = 4
24 PATCHLEVEL = 9
25 -SUBLEVEL = 60
26 +SUBLEVEL = 61
27 EXTRAVERSION =
28 NAME = Roaring Lionus
29
30 diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
31 index cc952cf8ec30..024f1b75b0a3 100644
32 --- a/arch/arm/boot/dts/armada-375.dtsi
33 +++ b/arch/arm/boot/dts/armada-375.dtsi
34 @@ -176,9 +176,9 @@
35 reg = <0x8000 0x1000>;
36 cache-unified;
37 cache-level = <2>;
38 - arm,double-linefill-incr = <1>;
39 + arm,double-linefill-incr = <0>;
40 arm,double-linefill-wrap = <0>;
41 - arm,double-linefill = <1>;
42 + arm,double-linefill = <0>;
43 prefetch-data = <1>;
44 };
45
46 diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
47 index 2d7668848c5a..c60cfe9fd033 100644
48 --- a/arch/arm/boot/dts/armada-38x.dtsi
49 +++ b/arch/arm/boot/dts/armada-38x.dtsi
50 @@ -143,9 +143,9 @@
51 reg = <0x8000 0x1000>;
52 cache-unified;
53 cache-level = <2>;
54 - arm,double-linefill-incr = <1>;
55 + arm,double-linefill-incr = <0>;
56 arm,double-linefill-wrap = <0>;
57 - arm,double-linefill = <1>;
58 + arm,double-linefill = <0>;
59 prefetch-data = <1>;
60 };
61
62 diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
63 index 34cba87f9200..aeecfa7e5ea3 100644
64 --- a/arch/arm/boot/dts/armada-39x.dtsi
65 +++ b/arch/arm/boot/dts/armada-39x.dtsi
66 @@ -111,9 +111,9 @@
67 reg = <0x8000 0x1000>;
68 cache-unified;
69 cache-level = <2>;
70 - arm,double-linefill-incr = <1>;
71 + arm,double-linefill-incr = <0>;
72 arm,double-linefill-wrap = <0>;
73 - arm,double-linefill = <1>;
74 + arm,double-linefill = <0>;
75 prefetch-data = <1>;
76 };
77
78 diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
79 index 55e0e3ea9cb6..bd12b98e2589 100644
80 --- a/arch/arm/include/asm/Kbuild
81 +++ b/arch/arm/include/asm/Kbuild
82 @@ -37,4 +37,3 @@ generic-y += termbits.h
83 generic-y += termios.h
84 generic-y += timex.h
85 generic-y += trace_clock.h
86 -generic-y += unaligned.h
87 diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
88 new file mode 100644
89 index 000000000000..ab905ffcf193
90 --- /dev/null
91 +++ b/arch/arm/include/asm/unaligned.h
92 @@ -0,0 +1,27 @@
93 +#ifndef __ASM_ARM_UNALIGNED_H
94 +#define __ASM_ARM_UNALIGNED_H
95 +
96 +/*
97 + * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
98 + * but we don't want to use linux/unaligned/access_ok.h since that can lead
99 + * to traps on unaligned stm/ldm or strd/ldrd.
100 + */
101 +#include <asm/byteorder.h>
102 +
103 +#if defined(__LITTLE_ENDIAN)
104 +# include <linux/unaligned/le_struct.h>
105 +# include <linux/unaligned/be_byteshift.h>
106 +# include <linux/unaligned/generic.h>
107 +# define get_unaligned __get_unaligned_le
108 +# define put_unaligned __put_unaligned_le
109 +#elif defined(__BIG_ENDIAN)
110 +# include <linux/unaligned/be_struct.h>
111 +# include <linux/unaligned/le_byteshift.h>
112 +# include <linux/unaligned/generic.h>
113 +# define get_unaligned __get_unaligned_be
114 +# define put_unaligned __put_unaligned_be
115 +#else
116 +# error need to define endianess
117 +#endif
118 +
119 +#endif /* __ASM_ARM_UNALIGNED_H */
120 diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
121 index 0064b86a2c87..30a13647c54c 100644
122 --- a/arch/arm/kvm/emulate.c
123 +++ b/arch/arm/kvm/emulate.c
124 @@ -227,7 +227,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
125 u32 return_offset = (is_thumb) ? 2 : 4;
126
127 kvm_update_psr(vcpu, UND_MODE);
128 - *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
129 + *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
130
131 /* Branch to exception vector */
132 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
133 @@ -239,10 +239,8 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
134 */
135 static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
136 {
137 - unsigned long cpsr = *vcpu_cpsr(vcpu);
138 - bool is_thumb = (cpsr & PSR_T_BIT);
139 u32 vect_offset;
140 - u32 return_offset = (is_thumb) ? 4 : 0;
141 + u32 return_offset = (is_pabt) ? 4 : 8;
142 bool is_lpae;
143
144 kvm_update_psr(vcpu, ABT_MODE);
145 diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
146 index 8679405b0b2b..92eab1d51785 100644
147 --- a/arch/arm/kvm/hyp/Makefile
148 +++ b/arch/arm/kvm/hyp/Makefile
149 @@ -2,7 +2,7 @@
150 # Makefile for Kernel-based Virtual Machine module, HYP part
151 #
152
153 -ccflags-y += -fno-stack-protector
154 +ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
155
156 KVM=../../../../virt/kvm
157
158 diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
159 index f22826135c73..c743d1fd8286 100644
160 --- a/arch/arm64/kernel/traps.c
161 +++ b/arch/arm64/kernel/traps.c
162 @@ -112,7 +112,7 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs)
163 for (i = -4; i < 1; i++) {
164 unsigned int val, bad;
165
166 - bad = __get_user(val, &((u32 *)addr)[i]);
167 + bad = get_user(val, &((u32 *)addr)[i]);
168
169 if (!bad)
170 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
171 diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
172 index 14c4e3b14bcb..48b03547a969 100644
173 --- a/arch/arm64/kvm/hyp/Makefile
174 +++ b/arch/arm64/kvm/hyp/Makefile
175 @@ -2,7 +2,7 @@
176 # Makefile for Kernel-based Virtual Machine module, HYP part
177 #
178
179 -ccflags-y += -fno-stack-protector
180 +ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
181
182 KVM=../../../../virt/kvm
183
184 diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
185 index da6a8cfa54a0..3556715a774e 100644
186 --- a/arch/arm64/kvm/inject_fault.c
187 +++ b/arch/arm64/kvm/inject_fault.c
188 @@ -33,12 +33,26 @@
189 #define LOWER_EL_AArch64_VECTOR 0x400
190 #define LOWER_EL_AArch32_VECTOR 0x600
191
192 +/*
193 + * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
194 + */
195 +static const u8 return_offsets[8][2] = {
196 + [0] = { 0, 0 }, /* Reset, unused */
197 + [1] = { 4, 2 }, /* Undefined */
198 + [2] = { 0, 0 }, /* SVC, unused */
199 + [3] = { 4, 4 }, /* Prefetch abort */
200 + [4] = { 8, 8 }, /* Data abort */
201 + [5] = { 0, 0 }, /* HVC, unused */
202 + [6] = { 4, 4 }, /* IRQ, unused */
203 + [7] = { 4, 4 }, /* FIQ, unused */
204 +};
205 +
206 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
207 {
208 unsigned long cpsr;
209 unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
210 bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
211 - u32 return_offset = (is_thumb) ? 4 : 0;
212 + u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
213 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
214
215 cpsr = mode | COMPAT_PSR_I_BIT;
216 diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
217 index d5ce34dcf4d9..1e28747d677f 100644
218 --- a/arch/powerpc/mm/init_64.c
219 +++ b/arch/powerpc/mm/init_64.c
220 @@ -42,6 +42,8 @@
221 #include <linux/memblock.h>
222 #include <linux/hugetlb.h>
223 #include <linux/slab.h>
224 +#include <linux/of_fdt.h>
225 +#include <linux/libfdt.h>
226
227 #include <asm/pgalloc.h>
228 #include <asm/page.h>
229 @@ -421,6 +423,28 @@ static int __init parse_disable_radix(char *p)
230 }
231 early_param("disable_radix", parse_disable_radix);
232
233 +/*
234 + * If we're running under a hypervisor, we currently can't do radix
235 + * since we don't have the code to do the H_REGISTER_PROC_TBL hcall.
236 + * We tell that we're running under a hypervisor by looking for the
237 + * /chosen/ibm,architecture-vec-5 property.
238 + */
239 +static void early_check_vec5(void)
240 +{
241 + unsigned long root, chosen;
242 + int size;
243 + const u8 *vec5;
244 +
245 + root = of_get_flat_dt_root();
246 + chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
247 + if (chosen == -FDT_ERR_NOTFOUND)
248 + return;
249 + vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
250 + if (!vec5)
251 + return;
252 + cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
253 +}
254 +
255 void __init mmu_early_init_devtree(void)
256 {
257 /* Disable radix mode based on kernel command line. */
258 @@ -428,6 +452,15 @@ void __init mmu_early_init_devtree(void)
259 if (disable_radix || !(mfmsr() & MSR_HV))
260 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
261
262 + /*
263 + * Check /chosen/ibm,architecture-vec-5 if running as a guest.
264 + * When running bare-metal, we can use radix if we like
265 + * even though the ibm,architecture-vec-5 property created by
266 + * skiboot doesn't have the necessary bits set.
267 + */
268 + if (early_radix_enabled() && !(mfmsr() & MSR_HV))
269 + early_check_vec5();
270 +
271 if (early_radix_enabled())
272 radix__early_init_devtree();
273 else
274 diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
275 index 303d28eb03a2..591cbdf615af 100644
276 --- a/arch/s390/crypto/aes_s390.c
277 +++ b/arch/s390/crypto/aes_s390.c
278 @@ -28,6 +28,7 @@
279 #include <linux/cpufeature.h>
280 #include <linux/init.h>
281 #include <linux/spinlock.h>
282 +#include <linux/fips.h>
283 #include <crypto/xts.h>
284 #include <asm/cpacf.h>
285
286 @@ -501,6 +502,12 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
287 if (err)
288 return err;
289
290 + /* In fips mode only 128 bit or 256 bit keys are valid */
291 + if (fips_enabled && key_len != 32 && key_len != 64) {
292 + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
293 + return -EINVAL;
294 + }
295 +
296 /* Pick the correct function code based on the key length */
297 fc = (key_len == 32) ? CPACF_KM_XTS_128 :
298 (key_len == 64) ? CPACF_KM_XTS_256 : 0;
299 diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
300 index 1113389d0a39..fe7368a41aa8 100644
301 --- a/arch/s390/crypto/prng.c
302 +++ b/arch/s390/crypto/prng.c
303 @@ -110,22 +110,30 @@ static const u8 initial_parm_block[32] __initconst = {
304
305 /*** helper functions ***/
306
307 +/*
308 + * generate_entropy:
309 + * This algorithm produces 64 bytes of entropy data based on 1024
310 + * individual stckf() invocations assuming that each stckf() value
311 + * contributes 0.25 bits of entropy. So the caller gets 256 bit
312 + * entropy per 64 byte or 4 bits entropy per byte.
313 + */
314 static int generate_entropy(u8 *ebuf, size_t nbytes)
315 {
316 int n, ret = 0;
317 - u8 *pg, *h, hash[32];
318 + u8 *pg, *h, hash[64];
319
320 - pg = (u8 *) __get_free_page(GFP_KERNEL);
321 + /* allocate 2 pages */
322 + pg = (u8 *) __get_free_pages(GFP_KERNEL, 1);
323 if (!pg) {
324 prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
325 return -ENOMEM;
326 }
327
328 while (nbytes) {
329 - /* fill page with urandom bytes */
330 - get_random_bytes(pg, PAGE_SIZE);
331 - /* exor page with stckf values */
332 - for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
333 + /* fill pages with urandom bytes */
334 + get_random_bytes(pg, 2*PAGE_SIZE);
335 + /* exor pages with 1024 stckf values */
336 + for (n = 0; n < 2 * PAGE_SIZE / sizeof(u64); n++) {
337 u64 *p = ((u64 *)pg) + n;
338 *p ^= get_tod_clock_fast();
339 }
340 @@ -134,8 +142,8 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
341 h = hash;
342 else
343 h = ebuf;
344 - /* generate sha256 from this page */
345 - cpacf_kimd(CPACF_KIMD_SHA_256, h, pg, PAGE_SIZE);
346 + /* hash over the filled pages */
347 + cpacf_kimd(CPACF_KIMD_SHA_512, h, pg, 2*PAGE_SIZE);
348 if (n < sizeof(hash))
349 memcpy(ebuf, hash, n);
350 ret += n;
351 @@ -143,7 +151,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
352 nbytes -= n;
353 }
354
355 - free_page((unsigned long)pg);
356 + free_pages((unsigned long)pg, 1);
357 return ret;
358 }
359
360 @@ -334,7 +342,7 @@ static int __init prng_sha512_selftest(void)
361 static int __init prng_sha512_instantiate(void)
362 {
363 int ret, datalen;
364 - u8 seed[64];
365 + u8 seed[64 + 32 + 16];
366
367 pr_debug("prng runs in SHA-512 mode "
368 "with chunksize=%d and reseed_limit=%u\n",
369 @@ -357,12 +365,12 @@ static int __init prng_sha512_instantiate(void)
370 if (ret)
371 goto outfree;
372
373 - /* generate initial seed bytestring, first 48 bytes of entropy */
374 - ret = generate_entropy(seed, 48);
375 - if (ret != 48)
376 + /* generate initial seed bytestring, with 256 + 128 bits entropy */
377 + ret = generate_entropy(seed, 64 + 32);
378 + if (ret != 64 + 32)
379 goto outfree;
380 /* followed by 16 bytes of unique nonce */
381 - get_tod_clock_ext(seed + 48);
382 + get_tod_clock_ext(seed + 64 + 32);
383
384 /* initial seed of the ppno drng */
385 cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
386 @@ -395,9 +403,9 @@ static void prng_sha512_deinstantiate(void)
387 static int prng_sha512_reseed(void)
388 {
389 int ret;
390 - u8 seed[32];
391 + u8 seed[64];
392
393 - /* generate 32 bytes of fresh entropy */
394 + /* fetch 256 bits of fresh entropy */
395 ret = generate_entropy(seed, sizeof(seed));
396 if (ret != sizeof(seed))
397 return ret;
398 diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
399 index 404d94c6c8bc..feba1b211898 100644
400 --- a/drivers/base/power/wakeirq.c
401 +++ b/drivers/base/power/wakeirq.c
402 @@ -141,6 +141,13 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
403 struct wake_irq *wirq = _wirq;
404 int res;
405
406 + /* Maybe abort suspend? */
407 + if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
408 + pm_wakeup_event(wirq->dev, 0);
409 +
410 + return IRQ_HANDLED;
411 + }
412 +
413 /* We don't want RPM_ASYNC or RPM_NOWAIT here */
414 res = pm_runtime_resume(wirq->dev);
415 if (res < 0)
416 diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
417 index 51d4bac97ab3..01d0594c9716 100644
418 --- a/drivers/clk/sunxi-ng/ccu_common.c
419 +++ b/drivers/clk/sunxi-ng/ccu_common.c
420 @@ -70,6 +70,11 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
421 goto err_clk_unreg;
422
423 reset = kzalloc(sizeof(*reset), GFP_KERNEL);
424 + if (!reset) {
425 + ret = -ENOMEM;
426 + goto err_alloc_reset;
427 + }
428 +
429 reset->rcdev.of_node = node;
430 reset->rcdev.ops = &ccu_reset_ops;
431 reset->rcdev.owner = THIS_MODULE;
432 @@ -85,6 +90,16 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
433 return 0;
434
435 err_of_clk_unreg:
436 + kfree(reset);
437 +err_alloc_reset:
438 + of_clk_del_provider(node);
439 err_clk_unreg:
440 + while (--i >= 0) {
441 + struct clk_hw *hw = desc->hw_clks->hws[i];
442 +
443 + if (!hw)
444 + continue;
445 + clk_hw_unregister(hw);
446 + }
447 return ret;
448 }
449 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
450 index 286d4d61bd0b..530f255a898b 100644
451 --- a/drivers/cpufreq/cpufreq.c
452 +++ b/drivers/cpufreq/cpufreq.c
453 @@ -1172,8 +1172,6 @@ static int cpufreq_online(unsigned int cpu)
454 if (new_policy) {
455 /* related_cpus should at least include policy->cpus. */
456 cpumask_copy(policy->related_cpus, policy->cpus);
457 - /* Clear mask of registered CPUs */
458 - cpumask_clear(policy->real_cpus);
459 }
460
461 /*
462 diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
463 index 17b19a68e269..71980c41283b 100644
464 --- a/drivers/crypto/ccp/ccp-dev-v5.c
465 +++ b/drivers/crypto/ccp/ccp-dev-v5.c
466 @@ -278,8 +278,7 @@ static int ccp5_perform_aes(struct ccp_op *op)
467 CCP_AES_ENCRYPT(&function) = op->u.aes.action;
468 CCP_AES_MODE(&function) = op->u.aes.mode;
469 CCP_AES_TYPE(&function) = op->u.aes.type;
470 - if (op->u.aes.mode == CCP_AES_MODE_CFB)
471 - CCP_AES_SIZE(&function) = 0x7f;
472 + CCP_AES_SIZE(&function) = op->u.aes.size;
473
474 CCP5_CMD_FUNCTION(&desc) = function.raw;
475
476 diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
477 index e23c36c7691c..347b77108baa 100644
478 --- a/drivers/crypto/ccp/ccp-dev.h
479 +++ b/drivers/crypto/ccp/ccp-dev.h
480 @@ -470,6 +470,7 @@ struct ccp_aes_op {
481 enum ccp_aes_type type;
482 enum ccp_aes_mode mode;
483 enum ccp_aes_action action;
484 + unsigned int size;
485 };
486
487 struct ccp_xts_aes_op {
488 diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
489 index 64deb006c3be..7d4cd518e602 100644
490 --- a/drivers/crypto/ccp/ccp-ops.c
491 +++ b/drivers/crypto/ccp/ccp-ops.c
492 @@ -692,6 +692,14 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
493 goto e_ctx;
494 }
495 }
496 + switch (aes->mode) {
497 + case CCP_AES_MODE_CFB: /* CFB128 only */
498 + case CCP_AES_MODE_CTR:
499 + op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
500 + break;
501 + default:
502 + op.u.aes.size = 0;
503 + }
504
505 /* Prepare the input and output data workareas. For in-place
506 * operations we need to set the dma direction to BIDIRECTIONAL
507 diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
508 index ed37e5908b91..12d417a4d4a8 100644
509 --- a/drivers/gpio/Kconfig
510 +++ b/drivers/gpio/Kconfig
511 @@ -1187,6 +1187,8 @@ config GPIO_MCP23S08
512 tristate "Microchip MCP23xxx I/O expander"
513 depends on OF_GPIO
514 select GPIOLIB_IRQCHIP
515 + select REGMAP_I2C if I2C
516 + select REGMAP if SPI_MASTER
517 help
518 SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
519 I/O expanders.
520 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
521 index 7fe8fd884f06..743a12df6971 100644
522 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
523 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
524 @@ -315,6 +315,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
525 amdgpu_dpm_enable_vce(adev, false);
526 } else {
527 amdgpu_asic_set_vce_clocks(adev, 0, 0);
528 + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
529 + AMD_PG_STATE_GATE);
530 + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
531 + AMD_CG_STATE_GATE);
532 }
533 } else {
534 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
535 @@ -340,6 +344,11 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
536 amdgpu_dpm_enable_vce(adev, true);
537 } else {
538 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
539 + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
540 + AMD_CG_STATE_UNGATE);
541 + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
542 + AMD_PG_STATE_UNGATE);
543 +
544 }
545 }
546 mutex_unlock(&adev->vce.idle_mutex);
547 diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
548 index ab3df6d75656..3f445df9124d 100644
549 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
550 +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
551 @@ -89,6 +89,10 @@ static int uvd_v6_0_early_init(void *handle)
552 {
553 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
554
555 + if (!(adev->flags & AMD_IS_APU) &&
556 + (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
557 + return -ENOENT;
558 +
559 uvd_v6_0_set_ring_funcs(adev);
560 uvd_v6_0_set_irq_funcs(adev);
561
562 diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
563 index fbd13fabdf2d..603d8425cca6 100644
564 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
565 +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
566 @@ -1193,6 +1193,17 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
567 if (!node)
568 return -ENOMEM;
569
570 + /*
571 + * To avoid an integer overflow for the later size computations, we
572 + * enforce a maximum number of submitted commands here. This limit is
573 + * sufficient for all conceivable usage cases of the G2D.
574 + */
575 + if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
576 + req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
577 + dev_err(dev, "number of submitted G2D commands exceeds limit\n");
578 + return -EINVAL;
579 + }
580 +
581 node->event = NULL;
582
583 if (req->event_type != G2D_EVENT_NOT) {
584 @@ -1250,7 +1261,11 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
585 cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
586 }
587
588 - /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
589 + /*
590 + * Check the size of cmdlist. The 2 that is added last comes from
591 + * the implicit G2D_BITBLT_START that is appended once we have
592 + * checked all the submitted commands.
593 + */
594 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
595 if (size > G2D_CMDLIST_DATA_NUM) {
596 dev_err(dev, "cmdlist size is too big\n");
597 diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
598 index 3194e544ee27..faacc813254c 100644
599 --- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
600 +++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
601 @@ -89,9 +89,13 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
602 goto err_node_put;
603 }
604
605 - of_node_put(np);
606 - clk_prepare_enable(tcon->ipg_clk);
607 + ret = clk_prepare_enable(tcon->ipg_clk);
608 + if (ret) {
609 + dev_err(dev, "Couldn't enable the TCON clock\n");
610 + goto err_node_put;
611 + }
612
613 + of_node_put(np);
614 dev_info(dev, "Using TCON in bypass mode\n");
615
616 return tcon;
617 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
618 index afa3d010c650..7fdc42e5aac8 100644
619 --- a/drivers/gpu/drm/i915/intel_dp.c
620 +++ b/drivers/gpu/drm/i915/intel_dp.c
621 @@ -3558,9 +3558,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
622 dev_priv->psr.psr2_support ? "supported" : "not supported");
623 }
624
625 - /* Read the eDP Display control capabilities registers */
626 - if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
627 - drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
628 + /*
629 + * Read the eDP display control registers.
630 + *
631 + * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
632 + * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
633 + * set, but require eDP 1.4+ detection (e.g. for supported link rates
634 + * method). The display control registers should read zero if they're
635 + * not supported anyway.
636 + */
637 + if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
638 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
639 sizeof(intel_dp->edp_dpcd))
640 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
641 diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
642 index 393973016b52..322c7ca188e9 100644
643 --- a/drivers/gpu/drm/msm/msm_gem_submit.c
644 +++ b/drivers/gpu/drm/msm/msm_gem_submit.c
645 @@ -31,11 +31,14 @@
646 #define BO_PINNED 0x2000
647
648 static struct msm_gem_submit *submit_create(struct drm_device *dev,
649 - struct msm_gpu *gpu, int nr_bos, int nr_cmds)
650 + struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
651 {
652 struct msm_gem_submit *submit;
653 - int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
654 - (nr_cmds * sizeof(*submit->cmd));
655 + uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
656 + ((u64)nr_cmds * sizeof(submit->cmd[0]));
657 +
658 + if (sz > SIZE_MAX)
659 + return NULL;
660
661 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
662 if (!submit)
663 diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
664 index 6263ea82d6ac..8f11d347b3ec 100644
665 --- a/drivers/i2c/busses/i2c-riic.c
666 +++ b/drivers/i2c/busses/i2c-riic.c
667 @@ -80,6 +80,7 @@
668 #define ICIER_TEIE 0x40
669 #define ICIER_RIE 0x20
670 #define ICIER_NAKIE 0x10
671 +#define ICIER_SPIE 0x08
672
673 #define ICSR2_NACKF 0x10
674
675 @@ -216,11 +217,10 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
676 return IRQ_NONE;
677 }
678
679 - if (riic->is_last || riic->err)
680 + if (riic->is_last || riic->err) {
681 + riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
682 writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
683 -
684 - writeb(0, riic->base + RIIC_ICIER);
685 - complete(&riic->msg_done);
686 + }
687
688 return IRQ_HANDLED;
689 }
690 @@ -240,13 +240,13 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
691
692 if (riic->bytes_left == 1) {
693 /* STOP must come before we set ACKBT! */
694 - if (riic->is_last)
695 + if (riic->is_last) {
696 + riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
697 writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
698 + }
699
700 riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3);
701
702 - writeb(0, riic->base + RIIC_ICIER);
703 - complete(&riic->msg_done);
704 } else {
705 riic_clear_set_bit(riic, ICMR3_ACKBT, 0, RIIC_ICMR3);
706 }
707 @@ -259,6 +259,21 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
708 return IRQ_HANDLED;
709 }
710
711 +static irqreturn_t riic_stop_isr(int irq, void *data)
712 +{
713 + struct riic_dev *riic = data;
714 +
715 + /* read back registers to confirm writes have fully propagated */
716 + writeb(0, riic->base + RIIC_ICSR2);
717 + readb(riic->base + RIIC_ICSR2);
718 + writeb(0, riic->base + RIIC_ICIER);
719 + readb(riic->base + RIIC_ICIER);
720 +
721 + complete(&riic->msg_done);
722 +
723 + return IRQ_HANDLED;
724 +}
725 +
726 static u32 riic_func(struct i2c_adapter *adap)
727 {
728 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
729 @@ -326,6 +341,7 @@ static struct riic_irq_desc riic_irqs[] = {
730 { .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
731 { .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
732 { .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" },
733 + { .res_num = 3, .isr = riic_stop_isr, .name = "riic-stop" },
734 { .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" },
735 };
736
737 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
738 index 282c9fb0ba95..786f640fc462 100644
739 --- a/drivers/infiniband/hw/mlx5/main.c
740 +++ b/drivers/infiniband/hw/mlx5/main.c
741 @@ -325,6 +325,27 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
742 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
743 }
744
745 +int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
746 + int index, enum ib_gid_type *gid_type)
747 +{
748 + struct ib_gid_attr attr;
749 + union ib_gid gid;
750 + int ret;
751 +
752 + ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
753 + if (ret)
754 + return ret;
755 +
756 + if (!attr.ndev)
757 + return -ENODEV;
758 +
759 + dev_put(attr.ndev);
760 +
761 + *gid_type = attr.gid_type;
762 +
763 + return 0;
764 +}
765 +
766 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
767 {
768 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
769 diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
770 index 7d689903c87c..86e1e08125ff 100644
771 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
772 +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
773 @@ -892,6 +892,8 @@ int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
774
775 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
776 int index);
777 +int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
778 + int index, enum ib_gid_type *gid_type);
779
780 /* GSI QP helper functions */
781 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
782 diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
783 index aee3942ec68d..2665414b4875 100644
784 --- a/drivers/infiniband/hw/mlx5/qp.c
785 +++ b/drivers/infiniband/hw/mlx5/qp.c
786 @@ -2226,6 +2226,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
787 {
788 enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
789 int err;
790 + enum ib_gid_type gid_type;
791
792 if (attr_mask & IB_QP_PKEY_INDEX)
793 path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
794 @@ -2244,10 +2245,16 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
795 if (ll == IB_LINK_LAYER_ETHERNET) {
796 if (!(ah->ah_flags & IB_AH_GRH))
797 return -EINVAL;
798 + err = mlx5_get_roce_gid_type(dev, port, ah->grh.sgid_index,
799 + &gid_type);
800 + if (err)
801 + return err;
802 memcpy(path->rmac, ah->dmac, sizeof(ah->dmac));
803 path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
804 ah->grh.sgid_index);
805 path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
806 + if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
807 + path->ecn_dscp = (ah->grh.traffic_class >> 2) & 0x3f;
808 } else {
809 path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
810 path->fl_free_ar |=
811 diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
812 index e69d338ab9be..ae550a180364 100644
813 --- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
814 +++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
815 @@ -680,6 +680,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
816 /* DST is not a frontend, attaching the ASIC */
817 if (dvb_attach(dst_attach, state, &card->dvb_adapter) == NULL) {
818 pr_err("%s: Could not find a Twinhan DST\n", __func__);
819 + kfree(state);
820 break;
821 }
822 /* Attach other DST peripherals if any */
823 diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
824 index 518ad34f80d7..7f92144a1de3 100644
825 --- a/drivers/media/platform/exynos4-is/fimc-is.c
826 +++ b/drivers/media/platform/exynos4-is/fimc-is.c
827 @@ -825,12 +825,13 @@ static int fimc_is_probe(struct platform_device *pdev)
828 is->irq = irq_of_parse_and_map(dev->of_node, 0);
829 if (!is->irq) {
830 dev_err(dev, "no irq found\n");
831 - return -EINVAL;
832 + ret = -EINVAL;
833 + goto err_iounmap;
834 }
835
836 ret = fimc_is_get_clocks(is);
837 if (ret < 0)
838 - return ret;
839 + goto err_iounmap;
840
841 platform_set_drvdata(pdev, is);
842
843 @@ -891,6 +892,8 @@ static int fimc_is_probe(struct platform_device *pdev)
844 free_irq(is->irq, is);
845 err_clk:
846 fimc_is_put_clocks(is);
847 +err_iounmap:
848 + iounmap(is->pmu_regs);
849 return ret;
850 }
851
852 @@ -947,6 +950,7 @@ static int fimc_is_remove(struct platform_device *pdev)
853 fimc_is_unregister_subdevs(is);
854 vb2_dma_contig_clear_max_seg_size(dev);
855 fimc_is_put_clocks(is);
856 + iounmap(is->pmu_regs);
857 fimc_is_debugfs_remove(is);
858 release_firmware(is->fw.f_w);
859 fimc_is_free_cpu_memory(is);
860 diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
861 index 8b099fe1d592..71b65ab573ac 100644
862 --- a/drivers/media/usb/cx231xx/cx231xx-core.c
863 +++ b/drivers/media/usb/cx231xx/cx231xx-core.c
864 @@ -356,7 +356,12 @@ int cx231xx_send_vendor_cmd(struct cx231xx *dev,
865 */
866 if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) ||
867 (ven_req->bRequest == 0x5) ||
868 - (ven_req->bRequest == 0x6))) {
869 + (ven_req->bRequest == 0x6) ||
870 +
871 + /* Internal Master 3 Bus can send
872 + * and receive only 4 bytes per time
873 + */
874 + (ven_req->bRequest == 0x2))) {
875 unsend_size = 0;
876 pdata = ven_req->pBuff;
877
878 diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
879 index 207cc497958a..8062d37b4ba4 100644
880 --- a/drivers/mfd/ab8500-sysctrl.c
881 +++ b/drivers/mfd/ab8500-sysctrl.c
882 @@ -98,7 +98,7 @@ int ab8500_sysctrl_read(u16 reg, u8 *value)
883 u8 bank;
884
885 if (sysctrl_dev == NULL)
886 - return -EINVAL;
887 + return -EPROBE_DEFER;
888
889 bank = (reg >> 8);
890 if (!valid_bank(bank))
891 @@ -114,11 +114,13 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
892 u8 bank;
893
894 if (sysctrl_dev == NULL)
895 - return -EINVAL;
896 + return -EPROBE_DEFER;
897
898 bank = (reg >> 8);
899 - if (!valid_bank(bank))
900 + if (!valid_bank(bank)) {
901 + pr_err("invalid bank\n");
902 return -EINVAL;
903 + }
904
905 return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
906 (u8)(reg & 0xFF), mask, value);
907 @@ -145,9 +147,15 @@ static int ab8500_sysctrl_remove(struct platform_device *pdev)
908 return 0;
909 }
910
911 +static const struct of_device_id ab8500_sysctrl_match[] = {
912 + { .compatible = "stericsson,ab8500-sysctrl", },
913 + {}
914 +};
915 +
916 static struct platform_driver ab8500_sysctrl_driver = {
917 .driver = {
918 .name = "ab8500-sysctrl",
919 + .of_match_table = ab8500_sysctrl_match,
920 },
921 .probe = ab8500_sysctrl_probe,
922 .remove = ab8500_sysctrl_remove,
923 diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
924 index ba130be32e61..9617fc323e15 100644
925 --- a/drivers/mfd/axp20x.c
926 +++ b/drivers/mfd/axp20x.c
927 @@ -205,14 +205,14 @@ static struct resource axp22x_pek_resources[] = {
928 static struct resource axp288_power_button_resources[] = {
929 {
930 .name = "PEK_DBR",
931 - .start = AXP288_IRQ_POKN,
932 - .end = AXP288_IRQ_POKN,
933 + .start = AXP288_IRQ_POKP,
934 + .end = AXP288_IRQ_POKP,
935 .flags = IORESOURCE_IRQ,
936 },
937 {
938 .name = "PEK_DBF",
939 - .start = AXP288_IRQ_POKP,
940 - .end = AXP288_IRQ_POKP,
941 + .start = AXP288_IRQ_POKN,
942 + .end = AXP288_IRQ_POKN,
943 .flags = IORESOURCE_IRQ,
944 },
945 };
946 diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
947 index e2af61f7e3b6..451d417eb451 100644
948 --- a/drivers/misc/mei/client.c
949 +++ b/drivers/misc/mei/client.c
950 @@ -1320,6 +1320,9 @@ int mei_cl_notify_request(struct mei_cl *cl,
951 return -EOPNOTSUPP;
952 }
953
954 + if (!mei_cl_is_connected(cl))
955 + return -ENODEV;
956 +
957 rets = pm_runtime_get(dev->dev);
958 if (rets < 0 && rets != -EINPROGRESS) {
959 pm_runtime_put_noidle(dev->dev);
960 diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
961 index c531deef3258..8f27fe35e8af 100644
962 --- a/drivers/mmc/host/s3cmci.c
963 +++ b/drivers/mmc/host/s3cmci.c
964 @@ -21,6 +21,7 @@
965 #include <linux/debugfs.h>
966 #include <linux/seq_file.h>
967 #include <linux/gpio.h>
968 +#include <linux/interrupt.h>
969 #include <linux/irq.h>
970 #include <linux/io.h>
971
972 diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
973 index 8b8470c4e6d0..f9b2a771096b 100644
974 --- a/drivers/mtd/nand/sunxi_nand.c
975 +++ b/drivers/mtd/nand/sunxi_nand.c
976 @@ -320,6 +320,10 @@ static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
977
978 ret = wait_for_completion_timeout(&nfc->complete,
979 msecs_to_jiffies(timeout_ms));
980 + if (!ret)
981 + ret = -ETIMEDOUT;
982 + else
983 + ret = 0;
984
985 writel(0, nfc->regs + NFC_REG_INT);
986 } else {
987 diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
988 index 3066d9c99984..e2512ab41168 100644
989 --- a/drivers/net/ethernet/amazon/ena/ena_com.c
990 +++ b/drivers/net/ethernet/amazon/ena/ena_com.c
991 @@ -36,9 +36,9 @@
992 /*****************************************************************************/
993
994 /* Timeout in micro-sec */
995 -#define ADMIN_CMD_TIMEOUT_US (1000000)
996 +#define ADMIN_CMD_TIMEOUT_US (3000000)
997
998 -#define ENA_ASYNC_QUEUE_DEPTH 4
999 +#define ENA_ASYNC_QUEUE_DEPTH 16
1000 #define ENA_ADMIN_QUEUE_DEPTH 32
1001
1002 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
1003 diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
1004 index 69d7e9ed5bc8..c5eaf7616939 100644
1005 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
1006 +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
1007 @@ -100,7 +100,7 @@
1008 /* Number of queues to check for missing queues per timer service */
1009 #define ENA_MONITORED_TX_QUEUES 4
1010 /* Max timeout packets before device reset */
1011 -#define MAX_NUM_OF_TIMEOUTED_PACKETS 32
1012 +#define MAX_NUM_OF_TIMEOUTED_PACKETS 128
1013
1014 #define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
1015
1016 @@ -116,9 +116,9 @@
1017 #define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
1018
1019 /* ENA device should send keep alive msg every 1 sec.
1020 - * We wait for 3 sec just to be on the safe side.
1021 + * We wait for 6 sec just to be on the safe side.
1022 */
1023 -#define ENA_DEVICE_KALIVE_TIMEOUT (3 * HZ)
1024 +#define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ)
1025
1026 #define ENA_MMIO_DISABLE_REG_READ BIT(0)
1027
1028 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1029 index 20e569bd978a..333df540b375 100644
1030 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1031 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1032 @@ -97,6 +97,8 @@ enum board_idx {
1033 BCM57407_NPAR,
1034 BCM57414_NPAR,
1035 BCM57416_NPAR,
1036 + BCM57452,
1037 + BCM57454,
1038 NETXTREME_E_VF,
1039 NETXTREME_C_VF,
1040 };
1041 @@ -131,6 +133,8 @@ static const struct {
1042 { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
1043 { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
1044 { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
1045 + { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
1046 + { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
1047 { "Broadcom NetXtreme-E Ethernet Virtual Function" },
1048 { "Broadcom NetXtreme-C Ethernet Virtual Function" },
1049 };
1050 @@ -166,6 +170,8 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
1051 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1052 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
1053 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
1054 + { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
1055 + { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
1056 #ifdef CONFIG_BNXT_SRIOV
1057 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
1058 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
1059 diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
1060 index 707bc4680b9b..6ea10a9f33e8 100644
1061 --- a/drivers/net/ethernet/marvell/mvneta.c
1062 +++ b/drivers/net/ethernet/marvell/mvneta.c
1063 @@ -28,6 +28,7 @@
1064 #include <linux/of_mdio.h>
1065 #include <linux/of_net.h>
1066 #include <linux/phy.h>
1067 +#include <linux/phy_fixed.h>
1068 #include <linux/platform_device.h>
1069 #include <linux/skbuff.h>
1070 #include <net/hwbm.h>
1071 diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
1072 index 01cf094bee18..8f8496102926 100644
1073 --- a/drivers/net/phy/dp83867.c
1074 +++ b/drivers/net/phy/dp83867.c
1075 @@ -33,6 +33,7 @@
1076
1077 /* Extended Registers */
1078 #define DP83867_RGMIICTL 0x0032
1079 +#define DP83867_STRAP_STS1 0x006E
1080 #define DP83867_RGMIIDCTL 0x0086
1081
1082 #define DP83867_SW_RESET BIT(15)
1083 @@ -56,9 +57,13 @@
1084 #define DP83867_RGMII_TX_CLK_DELAY_EN BIT(1)
1085 #define DP83867_RGMII_RX_CLK_DELAY_EN BIT(0)
1086
1087 +/* STRAP_STS1 bits */
1088 +#define DP83867_STRAP_STS1_RESERVED BIT(11)
1089 +
1090 /* PHY CTRL bits */
1091 #define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
1092 #define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
1093 +#define DP83867_PHYCR_RESERVED_MASK BIT(11)
1094
1095 /* RGMIIDCTL bits */
1096 #define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
1097 @@ -141,7 +146,7 @@ static int dp83867_of_init(struct phy_device *phydev)
1098 static int dp83867_config_init(struct phy_device *phydev)
1099 {
1100 struct dp83867_private *dp83867;
1101 - int ret, val;
1102 + int ret, val, bs;
1103 u16 delay;
1104
1105 if (!phydev->priv) {
1106 @@ -164,6 +169,22 @@ static int dp83867_config_init(struct phy_device *phydev)
1107 return val;
1108 val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
1109 val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
1110 +
1111 + /* The code below checks if "port mirroring" N/A MODE4 has been
1112 + * enabled during power on bootstrap.
1113 + *
1114 + * Such N/A mode enabled by mistake can put PHY IC in some
1115 + * internal testing mode and disable RGMII transmission.
1116 + *
1117 + * In this particular case one needs to check STRAP_STS1
1118 + * register's bit 11 (marked as RESERVED).
1119 + */
1120 +
1121 + bs = phy_read_mmd_indirect(phydev, DP83867_STRAP_STS1,
1122 + DP83867_DEVADDR);
1123 + if (bs & DP83867_STRAP_STS1_RESERVED)
1124 + val &= ~DP83867_PHYCR_RESERVED_MASK;
1125 +
1126 ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
1127 if (ret)
1128 return ret;
1129 diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
1130 index 766c63bf05c4..45226dbee5ce 100644
1131 --- a/drivers/net/wireless/ath/ath10k/ahb.c
1132 +++ b/drivers/net/wireless/ath/ath10k/ahb.c
1133 @@ -33,6 +33,9 @@ static const struct of_device_id ath10k_ahb_of_match[] = {
1134
1135 MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
1136
1137 +#define QCA4019_SRAM_ADDR 0x000C0000
1138 +#define QCA4019_SRAM_LEN 0x00040000 /* 256 kb */
1139 +
1140 static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
1141 {
1142 return &((struct ath10k_pci *)ar->drv_priv)->ahb[0];
1143 @@ -699,6 +702,25 @@ static int ath10k_ahb_hif_power_up(struct ath10k *ar)
1144 return ret;
1145 }
1146
1147 +static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
1148 +{
1149 + u32 val = 0, region = addr & 0xfffff;
1150 +
1151 + val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
1152 +
1153 + if (region >= QCA4019_SRAM_ADDR && region <=
1154 + (QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) {
1155 + /* SRAM contents for QCA4019 can be directly accessed and
1156 + * no conversions are required
1157 + */
1158 + val |= region;
1159 + } else {
1160 + val |= 0x100000 | region;
1161 + }
1162 +
1163 + return val;
1164 +}
1165 +
1166 static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
1167 .tx_sg = ath10k_pci_hif_tx_sg,
1168 .diag_read = ath10k_pci_hif_diag_read,
1169 @@ -766,6 +788,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
1170 ar_pci->mem_len = ar_ahb->mem_len;
1171 ar_pci->ar = ar;
1172 ar_pci->bus_ops = &ath10k_ahb_bus_ops;
1173 + ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
1174
1175 ret = ath10k_pci_setup_resource(ar);
1176 if (ret) {
1177 diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
1178 index 410bcdaa9e87..25b8d501d437 100644
1179 --- a/drivers/net/wireless/ath/ath10k/pci.c
1180 +++ b/drivers/net/wireless/ath/ath10k/pci.c
1181 @@ -840,31 +840,35 @@ void ath10k_pci_rx_replenish_retry(unsigned long ptr)
1182 ath10k_pci_rx_post(ar);
1183 }
1184
1185 -static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
1186 +static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
1187 {
1188 - u32 val = 0;
1189 + u32 val = 0, region = addr & 0xfffff;
1190
1191 - switch (ar->hw_rev) {
1192 - case ATH10K_HW_QCA988X:
1193 - case ATH10K_HW_QCA9887:
1194 - case ATH10K_HW_QCA6174:
1195 - case ATH10K_HW_QCA9377:
1196 - val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1197 - CORE_CTRL_ADDRESS) &
1198 - 0x7ff) << 21;
1199 - break;
1200 - case ATH10K_HW_QCA9888:
1201 - case ATH10K_HW_QCA99X0:
1202 - case ATH10K_HW_QCA9984:
1203 - case ATH10K_HW_QCA4019:
1204 - val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
1205 - break;
1206 - }
1207 + val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
1208 + & 0x7ff) << 21;
1209 + val |= 0x100000 | region;
1210 + return val;
1211 +}
1212 +
1213 +static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
1214 +{
1215 + u32 val = 0, region = addr & 0xfffff;
1216
1217 - val |= 0x100000 | (addr & 0xfffff);
1218 + val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
1219 + val |= 0x100000 | region;
1220 return val;
1221 }
1222
1223 +static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
1224 +{
1225 + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1226 +
1227 + if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
1228 + return -ENOTSUPP;
1229 +
1230 + return ar_pci->targ_cpu_to_ce_addr(ar, addr);
1231 +}
1232 +
1233 /*
1234 * Diagnostic read/write access is provided for startup/config/debug usage.
1235 * Caller must guarantee proper alignment, when applicable, and single user
1236 @@ -3171,6 +3175,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
1237 bool pci_ps;
1238 int (*pci_soft_reset)(struct ath10k *ar);
1239 int (*pci_hard_reset)(struct ath10k *ar);
1240 + u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
1241
1242 switch (pci_dev->device) {
1243 case QCA988X_2_0_DEVICE_ID:
1244 @@ -3178,12 +3183,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
1245 pci_ps = false;
1246 pci_soft_reset = ath10k_pci_warm_reset;
1247 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
1248 + targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
1249 break;
1250 case QCA9887_1_0_DEVICE_ID:
1251 hw_rev = ATH10K_HW_QCA9887;
1252 pci_ps = false;
1253 pci_soft_reset = ath10k_pci_warm_reset;
1254 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
1255 + targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
1256 break;
1257 case QCA6164_2_1_DEVICE_ID:
1258 case QCA6174_2_1_DEVICE_ID:
1259 @@ -3191,30 +3198,35 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
1260 pci_ps = true;
1261 pci_soft_reset = ath10k_pci_warm_reset;
1262 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
1263 + targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
1264 break;
1265 case QCA99X0_2_0_DEVICE_ID:
1266 hw_rev = ATH10K_HW_QCA99X0;
1267 pci_ps = false;
1268 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
1269 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
1270 + targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
1271 break;
1272 case QCA9984_1_0_DEVICE_ID:
1273 hw_rev = ATH10K_HW_QCA9984;
1274 pci_ps = false;
1275 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
1276 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
1277 + targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
1278 break;
1279 case QCA9888_2_0_DEVICE_ID:
1280 hw_rev = ATH10K_HW_QCA9888;
1281 pci_ps = false;
1282 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
1283 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
1284 + targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
1285 break;
1286 case QCA9377_1_0_DEVICE_ID:
1287 hw_rev = ATH10K_HW_QCA9377;
1288 pci_ps = true;
1289 pci_soft_reset = NULL;
1290 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
1291 + targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
1292 break;
1293 default:
1294 WARN_ON(1);
1295 @@ -3241,6 +3253,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
1296 ar_pci->bus_ops = &ath10k_pci_bus_ops;
1297 ar_pci->pci_soft_reset = pci_soft_reset;
1298 ar_pci->pci_hard_reset = pci_hard_reset;
1299 + ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
1300
1301 ar->id.vendor = pdev->vendor;
1302 ar->id.device = pdev->device;
1303 diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
1304 index 9854ad56b2de..577bb87ab2f6 100644
1305 --- a/drivers/net/wireless/ath/ath10k/pci.h
1306 +++ b/drivers/net/wireless/ath/ath10k/pci.h
1307 @@ -238,6 +238,11 @@ struct ath10k_pci {
1308 /* Chip specific pci full reset function */
1309 int (*pci_hard_reset)(struct ath10k *ar);
1310
1311 + /* chip specific methods for converting target CPU virtual address
1312 + * space to CE address space
1313 + */
1314 + u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
1315 +
1316 /* Keep this entry in the last, memory for struct ath10k_ahb is
1317 * allocated (ahb support enabled case) in the continuation of
1318 * this struct.
1319 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
1320 index e64557c35553..6f8a4b074c31 100644
1321 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
1322 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
1323 @@ -32,16 +32,25 @@ static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
1324 {
1325 void *dump;
1326 size_t ramsize;
1327 + int err;
1328
1329 ramsize = brcmf_bus_get_ramsize(bus);
1330 - if (ramsize) {
1331 - dump = vzalloc(len + ramsize);
1332 - if (!dump)
1333 - return -ENOMEM;
1334 - memcpy(dump, data, len);
1335 - brcmf_bus_get_memdump(bus, dump + len, ramsize);
1336 - dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
1337 + if (!ramsize)
1338 + return -ENOTSUPP;
1339 +
1340 + dump = vzalloc(len + ramsize);
1341 + if (!dump)
1342 + return -ENOMEM;
1343 +
1344 + memcpy(dump, data, len);
1345 + err = brcmf_bus_get_memdump(bus, dump + len, ramsize);
1346 + if (err) {
1347 + vfree(dump);
1348 + return err;
1349 }
1350 +
1351 + dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
1352 +
1353 return 0;
1354 }
1355
1356 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
1357 index 0556d139b719..092ae0024f22 100644
1358 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
1359 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
1360 @@ -499,15 +499,17 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
1361 switch (info->control.vif->type) {
1362 case NL80211_IFTYPE_AP:
1363 /*
1364 - * handle legacy hostapd as well, where station may be added
1365 - * only after assoc.
1366 + * Handle legacy hostapd as well, where station may be added
1367 + * only after assoc. Take care of the case where we send a
1368 + * deauth to a station that we don't have.
1369 */
1370 - if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc))
1371 + if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
1372 + ieee80211_is_deauth(fc))
1373 return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
1374 if (info->hw_queue == info->control.vif->cab_queue)
1375 return info->hw_queue;
1376
1377 - WARN_ON_ONCE(1);
1378 + WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc));
1379 return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
1380 case NL80211_IFTYPE_P2P_DEVICE:
1381 if (ieee80211_is_mgmt(fc))
1382 diff --git a/drivers/pci/access.c b/drivers/pci/access.c
1383 index d11cdbb8fba3..7b5cf6d1181a 100644
1384 --- a/drivers/pci/access.c
1385 +++ b/drivers/pci/access.c
1386 @@ -672,8 +672,9 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
1387 WARN_ON(!dev->block_cfg_access);
1388
1389 dev->block_cfg_access = 0;
1390 - wake_up_all(&pci_cfg_wait);
1391 raw_spin_unlock_irqrestore(&pci_lock, flags);
1392 +
1393 + wake_up_all(&pci_cfg_wait);
1394 }
1395 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
1396
1397 diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
1398 index 3455f752d5e4..0e9a9dbeb184 100644
1399 --- a/drivers/pci/msi.c
1400 +++ b/drivers/pci/msi.c
1401 @@ -730,7 +730,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
1402 ret = 0;
1403 out:
1404 kfree(masks);
1405 - return 0;
1406 + return ret;
1407 }
1408
1409 static void msix_program_entries(struct pci_dev *dev,
1410 diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
1411 index 9f713b832ba3..5c768c4627d3 100644
1412 --- a/drivers/platform/x86/intel_mid_thermal.c
1413 +++ b/drivers/platform/x86/intel_mid_thermal.c
1414 @@ -550,6 +550,7 @@ static const struct platform_device_id therm_id_table[] = {
1415 { "msic_thermal", 1 },
1416 { }
1417 };
1418 +MODULE_DEVICE_TABLE(platform, therm_id_table);
1419
1420 static struct platform_driver mid_thermal_driver = {
1421 .driver = {
1422 diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
1423 index 1de089019268..5ecd40884f01 100644
1424 --- a/drivers/s390/block/dasd.c
1425 +++ b/drivers/s390/block/dasd.c
1426 @@ -1704,8 +1704,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1427 /* check for for attention message */
1428 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
1429 device = dasd_device_from_cdev_locked(cdev);
1430 - device->discipline->check_attention(device, irb->esw.esw1.lpum);
1431 - dasd_put_device(device);
1432 + if (!IS_ERR(device)) {
1433 + device->discipline->check_attention(device,
1434 + irb->esw.esw1.lpum);
1435 + dasd_put_device(device);
1436 + }
1437 }
1438
1439 if (!cqr)
1440 diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
1441 index 6678d1fd897b..065f11a1964d 100644
1442 --- a/drivers/scsi/aacraid/aachba.c
1443 +++ b/drivers/scsi/aacraid/aachba.c
1444 @@ -2954,16 +2954,11 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1445 return;
1446
1447 BUG_ON(fibptr == NULL);
1448 - dev = fibptr->dev;
1449 -
1450 - scsi_dma_unmap(scsicmd);
1451
1452 - /* expose physical device if expose_physicald flag is on */
1453 - if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
1454 - && expose_physicals > 0)
1455 - aac_expose_phy_device(scsicmd);
1456 + dev = fibptr->dev;
1457
1458 srbreply = (struct aac_srb_reply *) fib_data(fibptr);
1459 +
1460 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
1461
1462 if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
1463 @@ -2976,158 +2971,176 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1464 */
1465 scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
1466 - le32_to_cpu(srbreply->data_xfer_length));
1467 - /*
1468 - * First check the fib status
1469 - */
1470 + }
1471
1472 - if (le32_to_cpu(srbreply->status) != ST_OK) {
1473 - int len;
1474
1475 - printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
1476 - len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
1477 - SCSI_SENSE_BUFFERSIZE);
1478 - scsicmd->result = DID_ERROR << 16
1479 - | COMMAND_COMPLETE << 8
1480 - | SAM_STAT_CHECK_CONDITION;
1481 - memcpy(scsicmd->sense_buffer,
1482 - srbreply->sense_data, len);
1483 - }
1484 + scsi_dma_unmap(scsicmd);
1485
1486 - /*
1487 - * Next check the srb status
1488 - */
1489 - switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
1490 - case SRB_STATUS_ERROR_RECOVERY:
1491 - case SRB_STATUS_PENDING:
1492 - case SRB_STATUS_SUCCESS:
1493 - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1494 - break;
1495 - case SRB_STATUS_DATA_OVERRUN:
1496 - switch (scsicmd->cmnd[0]) {
1497 - case READ_6:
1498 - case WRITE_6:
1499 - case READ_10:
1500 - case WRITE_10:
1501 - case READ_12:
1502 - case WRITE_12:
1503 - case READ_16:
1504 - case WRITE_16:
1505 - if (le32_to_cpu(srbreply->data_xfer_length)
1506 - < scsicmd->underflow)
1507 - printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
1508 - else
1509 - printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
1510 - scsicmd->result = DID_ERROR << 16
1511 - | COMMAND_COMPLETE << 8;
1512 - break;
1513 - case INQUIRY: {
1514 - scsicmd->result = DID_OK << 16
1515 - | COMMAND_COMPLETE << 8;
1516 - break;
1517 - }
1518 - default:
1519 - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1520 - break;
1521 - }
1522 - break;
1523 - case SRB_STATUS_ABORTED:
1524 - scsicmd->result = DID_ABORT << 16 | ABORT << 8;
1525 - break;
1526 - case SRB_STATUS_ABORT_FAILED:
1527 - /*
1528 - * Not sure about this one - but assuming the
1529 - * hba was trying to abort for some reason
1530 - */
1531 - scsicmd->result = DID_ERROR << 16 | ABORT << 8;
1532 - break;
1533 - case SRB_STATUS_PARITY_ERROR:
1534 - scsicmd->result = DID_PARITY << 16
1535 - | MSG_PARITY_ERROR << 8;
1536 - break;
1537 - case SRB_STATUS_NO_DEVICE:
1538 - case SRB_STATUS_INVALID_PATH_ID:
1539 - case SRB_STATUS_INVALID_TARGET_ID:
1540 - case SRB_STATUS_INVALID_LUN:
1541 - case SRB_STATUS_SELECTION_TIMEOUT:
1542 - scsicmd->result = DID_NO_CONNECT << 16
1543 - | COMMAND_COMPLETE << 8;
1544 - break;
1545 + /* expose physical device if expose_physicald flag is on */
1546 + if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
1547 + && expose_physicals > 0)
1548 + aac_expose_phy_device(scsicmd);
1549
1550 - case SRB_STATUS_COMMAND_TIMEOUT:
1551 - case SRB_STATUS_TIMEOUT:
1552 - scsicmd->result = DID_TIME_OUT << 16
1553 - | COMMAND_COMPLETE << 8;
1554 - break;
1555 + /*
1556 + * First check the fib status
1557 + */
1558
1559 - case SRB_STATUS_BUSY:
1560 - scsicmd->result = DID_BUS_BUSY << 16
1561 - | COMMAND_COMPLETE << 8;
1562 - break;
1563 + if (le32_to_cpu(srbreply->status) != ST_OK) {
1564 + int len;
1565
1566 - case SRB_STATUS_BUS_RESET:
1567 - scsicmd->result = DID_RESET << 16
1568 - | COMMAND_COMPLETE << 8;
1569 - break;
1570 + pr_warn("aac_srb_callback: srb failed, status = %d\n",
1571 + le32_to_cpu(srbreply->status));
1572 + len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
1573 + SCSI_SENSE_BUFFERSIZE);
1574 + scsicmd->result = DID_ERROR << 16
1575 + | COMMAND_COMPLETE << 8
1576 + | SAM_STAT_CHECK_CONDITION;
1577 + memcpy(scsicmd->sense_buffer,
1578 + srbreply->sense_data, len);
1579 + }
1580
1581 - case SRB_STATUS_MESSAGE_REJECTED:
1582 + /*
1583 + * Next check the srb status
1584 + */
1585 + switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
1586 + case SRB_STATUS_ERROR_RECOVERY:
1587 + case SRB_STATUS_PENDING:
1588 + case SRB_STATUS_SUCCESS:
1589 + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1590 + break;
1591 + case SRB_STATUS_DATA_OVERRUN:
1592 + switch (scsicmd->cmnd[0]) {
1593 + case READ_6:
1594 + case WRITE_6:
1595 + case READ_10:
1596 + case WRITE_10:
1597 + case READ_12:
1598 + case WRITE_12:
1599 + case READ_16:
1600 + case WRITE_16:
1601 + if (le32_to_cpu(srbreply->data_xfer_length)
1602 + < scsicmd->underflow)
1603 + pr_warn("aacraid: SCSI CMD underflow\n");
1604 + else
1605 + pr_warn("aacraid: SCSI CMD Data Overrun\n");
1606 scsicmd->result = DID_ERROR << 16
1607 - | MESSAGE_REJECT << 8;
1608 + | COMMAND_COMPLETE << 8;
1609 + break;
1610 + case INQUIRY:
1611 + scsicmd->result = DID_OK << 16
1612 + | COMMAND_COMPLETE << 8;
1613 break;
1614 - case SRB_STATUS_REQUEST_FLUSHED:
1615 - case SRB_STATUS_ERROR:
1616 - case SRB_STATUS_INVALID_REQUEST:
1617 - case SRB_STATUS_REQUEST_SENSE_FAILED:
1618 - case SRB_STATUS_NO_HBA:
1619 - case SRB_STATUS_UNEXPECTED_BUS_FREE:
1620 - case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
1621 - case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
1622 - case SRB_STATUS_DELAYED_RETRY:
1623 - case SRB_STATUS_BAD_FUNCTION:
1624 - case SRB_STATUS_NOT_STARTED:
1625 - case SRB_STATUS_NOT_IN_USE:
1626 - case SRB_STATUS_FORCE_ABORT:
1627 - case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
1628 default:
1629 + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1630 + break;
1631 + }
1632 + break;
1633 + case SRB_STATUS_ABORTED:
1634 + scsicmd->result = DID_ABORT << 16 | ABORT << 8;
1635 + break;
1636 + case SRB_STATUS_ABORT_FAILED:
1637 + /*
1638 + * Not sure about this one - but assuming the
1639 + * hba was trying to abort for some reason
1640 + */
1641 + scsicmd->result = DID_ERROR << 16 | ABORT << 8;
1642 + break;
1643 + case SRB_STATUS_PARITY_ERROR:
1644 + scsicmd->result = DID_PARITY << 16
1645 + | MSG_PARITY_ERROR << 8;
1646 + break;
1647 + case SRB_STATUS_NO_DEVICE:
1648 + case SRB_STATUS_INVALID_PATH_ID:
1649 + case SRB_STATUS_INVALID_TARGET_ID:
1650 + case SRB_STATUS_INVALID_LUN:
1651 + case SRB_STATUS_SELECTION_TIMEOUT:
1652 + scsicmd->result = DID_NO_CONNECT << 16
1653 + | COMMAND_COMPLETE << 8;
1654 + break;
1655 +
1656 + case SRB_STATUS_COMMAND_TIMEOUT:
1657 + case SRB_STATUS_TIMEOUT:
1658 + scsicmd->result = DID_TIME_OUT << 16
1659 + | COMMAND_COMPLETE << 8;
1660 + break;
1661 +
1662 + case SRB_STATUS_BUSY:
1663 + scsicmd->result = DID_BUS_BUSY << 16
1664 + | COMMAND_COMPLETE << 8;
1665 + break;
1666 +
1667 + case SRB_STATUS_BUS_RESET:
1668 + scsicmd->result = DID_RESET << 16
1669 + | COMMAND_COMPLETE << 8;
1670 + break;
1671 +
1672 + case SRB_STATUS_MESSAGE_REJECTED:
1673 + scsicmd->result = DID_ERROR << 16
1674 + | MESSAGE_REJECT << 8;
1675 + break;
1676 + case SRB_STATUS_REQUEST_FLUSHED:
1677 + case SRB_STATUS_ERROR:
1678 + case SRB_STATUS_INVALID_REQUEST:
1679 + case SRB_STATUS_REQUEST_SENSE_FAILED:
1680 + case SRB_STATUS_NO_HBA:
1681 + case SRB_STATUS_UNEXPECTED_BUS_FREE:
1682 + case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
1683 + case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
1684 + case SRB_STATUS_DELAYED_RETRY:
1685 + case SRB_STATUS_BAD_FUNCTION:
1686 + case SRB_STATUS_NOT_STARTED:
1687 + case SRB_STATUS_NOT_IN_USE:
1688 + case SRB_STATUS_FORCE_ABORT:
1689 + case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
1690 + default:
1691 #ifdef AAC_DETAILED_STATUS_INFO
1692 - printk(KERN_INFO "aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
1693 - le32_to_cpu(srbreply->srb_status) & 0x3F,
1694 - aac_get_status_string(
1695 - le32_to_cpu(srbreply->srb_status) & 0x3F),
1696 - scsicmd->cmnd[0],
1697 - le32_to_cpu(srbreply->scsi_status));
1698 + pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
1699 + le32_to_cpu(srbreply->srb_status) & 0x3F,
1700 + aac_get_status_string(
1701 + le32_to_cpu(srbreply->srb_status) & 0x3F),
1702 + scsicmd->cmnd[0],
1703 + le32_to_cpu(srbreply->scsi_status));
1704 #endif
1705 - if ((scsicmd->cmnd[0] == ATA_12)
1706 - || (scsicmd->cmnd[0] == ATA_16)) {
1707 - if (scsicmd->cmnd[2] & (0x01 << 5)) {
1708 - scsicmd->result = DID_OK << 16
1709 - | COMMAND_COMPLETE << 8;
1710 - break;
1711 - } else {
1712 - scsicmd->result = DID_ERROR << 16
1713 - | COMMAND_COMPLETE << 8;
1714 - break;
1715 - }
1716 + /*
1717 + * When the CC bit is SET by the host in ATA pass thru CDB,
1718 + * driver is supposed to return DID_OK
1719 + *
1720 + * When the CC bit is RESET by the host, driver should
1721 + * return DID_ERROR
1722 + */
1723 + if ((scsicmd->cmnd[0] == ATA_12)
1724 + || (scsicmd->cmnd[0] == ATA_16)) {
1725 +
1726 + if (scsicmd->cmnd[2] & (0x01 << 5)) {
1727 + scsicmd->result = DID_OK << 16
1728 + | COMMAND_COMPLETE << 8;
1729 + break;
1730 } else {
1731 scsicmd->result = DID_ERROR << 16
1732 | COMMAND_COMPLETE << 8;
1733 - break;
1734 + break;
1735 }
1736 + } else {
1737 + scsicmd->result = DID_ERROR << 16
1738 + | COMMAND_COMPLETE << 8;
1739 + break;
1740 }
1741 - if (le32_to_cpu(srbreply->scsi_status)
1742 - == SAM_STAT_CHECK_CONDITION) {
1743 - int len;
1744 + }
1745 + if (le32_to_cpu(srbreply->scsi_status)
1746 + == SAM_STAT_CHECK_CONDITION) {
1747 + int len;
1748
1749 - scsicmd->result |= SAM_STAT_CHECK_CONDITION;
1750 - len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
1751 - SCSI_SENSE_BUFFERSIZE);
1752 + scsicmd->result |= SAM_STAT_CHECK_CONDITION;
1753 + len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
1754 + SCSI_SENSE_BUFFERSIZE);
1755 #ifdef AAC_DETAILED_STATUS_INFO
1756 - printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
1757 - le32_to_cpu(srbreply->status), len);
1758 + pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
1759 + le32_to_cpu(srbreply->status), len);
1760 #endif
1761 - memcpy(scsicmd->sense_buffer,
1762 - srbreply->sense_data, len);
1763 - }
1764 + memcpy(scsicmd->sense_buffer,
1765 + srbreply->sense_data, len);
1766 }
1767 +
1768 /*
1769 * OR in the scsi status (already shifted up a bit)
1770 */
1771 diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
1772 index bd04bd01d34a..a156451553a7 100644
1773 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
1774 +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
1775 @@ -1960,7 +1960,8 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
1776 */
1777 static void
1778 megasas_build_syspd_fusion(struct megasas_instance *instance,
1779 - struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
1780 + struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd,
1781 + bool fp_possible)
1782 {
1783 u32 device_id;
1784 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1785 @@ -2064,6 +2065,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
1786 u16 sge_count;
1787 u8 cmd_type;
1788 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
1789 + struct MR_PRIV_DEVICE *mr_device_priv_data;
1790 + mr_device_priv_data = scp->device->hostdata;
1791
1792 /* Zero out some fields so they don't get reused */
1793 memset(io_request->LUN, 0x0, 8);
1794 @@ -2092,12 +2095,14 @@ megasas_build_io_fusion(struct megasas_instance *instance,
1795 megasas_build_ld_nonrw_fusion(instance, scp, cmd);
1796 break;
1797 case READ_WRITE_SYSPDIO:
1798 + megasas_build_syspd_fusion(instance, scp, cmd, true);
1799 + break;
1800 case NON_READ_WRITE_SYSPDIO:
1801 - if (instance->secure_jbod_support &&
1802 - (cmd_type == NON_READ_WRITE_SYSPDIO))
1803 - megasas_build_syspd_fusion(instance, scp, cmd, 0);
1804 + if (instance->secure_jbod_support ||
1805 + mr_device_priv_data->is_tm_capable)
1806 + megasas_build_syspd_fusion(instance, scp, cmd, false);
1807 else
1808 - megasas_build_syspd_fusion(instance, scp, cmd, 1);
1809 + megasas_build_syspd_fusion(instance, scp, cmd, true);
1810 break;
1811 default:
1812 break;
1813 diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
1814 index 3d46b1b1fa18..7de992c19ff6 100644
1815 --- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
1816 +++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
1817 @@ -17,6 +17,7 @@
1818 #include <linux/irqdomain.h>
1819 #include <linux/msi.h>
1820 #include "../include/mc-bus.h"
1821 +#include "fsl-mc-private.h"
1822
1823 /*
1824 * Generate a unique ID identifying the interrupt (only used within the MSI
1825 diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
1826 index 7a6ac640752f..eaeb3c51e14b 100644
1827 --- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
1828 +++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
1829 @@ -17,6 +17,7 @@
1830 #include <linux/of.h>
1831 #include <linux/of_irq.h>
1832 #include "../include/mc-bus.h"
1833 +#include "fsl-mc-private.h"
1834
1835 static struct irq_chip its_msi_irq_chip = {
1836 .name = "fsl-mc-bus-msi",
1837 diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
1838 index 6fc985571cba..e533088c017c 100644
1839 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
1840 +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
1841 @@ -1213,23 +1213,21 @@ struct hsm_action_item {
1842 * \retval buffer
1843 */
1844 static inline char *hai_dump_data_field(struct hsm_action_item *hai,
1845 - char *buffer, int len)
1846 + char *buffer, size_t len)
1847 {
1848 - int i, sz, data_len;
1849 + int i, data_len;
1850 char *ptr;
1851
1852 ptr = buffer;
1853 - sz = len;
1854 data_len = hai->hai_len - sizeof(*hai);
1855 - for (i = 0 ; (i < data_len) && (sz > 0) ; i++) {
1856 - int cnt;
1857 -
1858 - cnt = snprintf(ptr, sz, "%.2X",
1859 - (unsigned char)hai->hai_data[i]);
1860 - ptr += cnt;
1861 - sz -= cnt;
1862 + for (i = 0; (i < data_len) && (len > 2); i++) {
1863 + snprintf(ptr, 3, "%02X", (unsigned char)hai->hai_data[i]);
1864 + ptr += 2;
1865 + len -= 2;
1866 }
1867 +
1868 *ptr = '\0';
1869 +
1870 return buffer;
1871 }
1872
1873 diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
1874 index 3c48b4fb96f1..d18ab3f28c70 100644
1875 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
1876 +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
1877 @@ -546,6 +546,13 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
1878 if (!lock)
1879 return NULL;
1880
1881 + if (lock->l_export && lock->l_export->exp_failed) {
1882 + CDEBUG(D_INFO, "lock export failed: lock %p, exp %p\n",
1883 + lock, lock->l_export);
1884 + LDLM_LOCK_PUT(lock);
1885 + return NULL;
1886 + }
1887 +
1888 /* It's unlikely but possible that someone marked the lock as
1889 * destroyed after we did handle2object on it
1890 */
1891 diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
1892 index 26f3a37873a7..0cb70c3a1a0b 100644
1893 --- a/drivers/staging/lustre/lustre/llite/rw26.c
1894 +++ b/drivers/staging/lustre/lustre/llite/rw26.c
1895 @@ -354,6 +354,10 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
1896 if (!lli->lli_has_smd)
1897 return -EBADF;
1898
1899 + /* Check EOF by ourselves */
1900 + if (iov_iter_rw(iter) == READ && file_offset >= i_size_read(inode))
1901 + return 0;
1902 +
1903 /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
1904 if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
1905 return -EINVAL;
1906 diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
1907 index 7dbb2b946acf..cd19ce811e62 100644
1908 --- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
1909 +++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
1910 @@ -744,16 +744,18 @@ static int lmv_hsm_req_count(struct lmv_obd *lmv,
1911 /* count how many requests must be sent to the given target */
1912 for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
1913 curr_tgt = lmv_find_target(lmv, &hur->hur_user_item[i].hui_fid);
1914 + if (IS_ERR(curr_tgt))
1915 + return PTR_ERR(curr_tgt);
1916 if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid))
1917 nr++;
1918 }
1919 return nr;
1920 }
1921
1922 -static void lmv_hsm_req_build(struct lmv_obd *lmv,
1923 - struct hsm_user_request *hur_in,
1924 - const struct lmv_tgt_desc *tgt_mds,
1925 - struct hsm_user_request *hur_out)
1926 +static int lmv_hsm_req_build(struct lmv_obd *lmv,
1927 + struct hsm_user_request *hur_in,
1928 + const struct lmv_tgt_desc *tgt_mds,
1929 + struct hsm_user_request *hur_out)
1930 {
1931 int i, nr_out;
1932 struct lmv_tgt_desc *curr_tgt;
1933 @@ -764,6 +766,8 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv,
1934 for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) {
1935 curr_tgt = lmv_find_target(lmv,
1936 &hur_in->hur_user_item[i].hui_fid);
1937 + if (IS_ERR(curr_tgt))
1938 + return PTR_ERR(curr_tgt);
1939 if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) {
1940 hur_out->hur_user_item[nr_out] =
1941 hur_in->hur_user_item[i];
1942 @@ -773,6 +777,8 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv,
1943 hur_out->hur_request.hr_itemcount = nr_out;
1944 memcpy(hur_data(hur_out), hur_data(hur_in),
1945 hur_in->hur_request.hr_data_len);
1946 +
1947 + return 0;
1948 }
1949
1950 static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
1951 @@ -1052,15 +1058,17 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
1952 } else {
1953 /* split fid list to their respective MDS */
1954 for (i = 0; i < count; i++) {
1955 - unsigned int nr, reqlen;
1956 - int rc1;
1957 struct hsm_user_request *req;
1958 + size_t reqlen;
1959 + int nr, rc1;
1960
1961 tgt = lmv->tgts[i];
1962 if (!tgt || !tgt->ltd_exp)
1963 continue;
1964
1965 nr = lmv_hsm_req_count(lmv, hur, tgt);
1966 + if (nr < 0)
1967 + return nr;
1968 if (nr == 0) /* nothing for this MDS */
1969 continue;
1970
1971 @@ -1072,10 +1080,13 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
1972 if (!req)
1973 return -ENOMEM;
1974
1975 - lmv_hsm_req_build(lmv, hur, tgt, req);
1976 + rc1 = lmv_hsm_req_build(lmv, hur, tgt, req);
1977 + if (rc1 < 0)
1978 + goto hsm_req_err;
1979
1980 rc1 = obd_iocontrol(cmd, tgt->ltd_exp, reqlen,
1981 req, uarg);
1982 +hsm_req_err:
1983 if (rc1 != 0 && rc == 0)
1984 rc = rc1;
1985 kvfree(req);
1986 diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
1987 index 72f39308eebb..9d34848d5458 100644
1988 --- a/drivers/staging/lustre/lustre/ptlrpc/service.c
1989 +++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
1990 @@ -1264,20 +1264,15 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
1991 */
1992 if (req->rq_ops->hpreq_check) {
1993 rc = req->rq_ops->hpreq_check(req);
1994 - /**
1995 - * XXX: Out of all current
1996 - * ptlrpc_hpreq_ops::hpreq_check(), only
1997 - * ldlm_cancel_hpreq_check() can return an error code;
1998 - * other functions assert in similar places, which seems
1999 - * odd. What also does not seem right is that handlers
2000 - * for those RPCs do not assert on the same checks, but
2001 - * rather handle the error cases. e.g. see
2002 - * ost_rw_hpreq_check(), and ost_brw_read(),
2003 - * ost_brw_write().
2004 + if (rc == -ESTALE) {
2005 + req->rq_status = rc;
2006 + ptlrpc_error(req);
2007 + }
2008 + /** can only return error,
2009 + * 0 for normal request,
2010 + * or 1 for high priority request
2011 */
2012 - if (rc < 0)
2013 - return rc;
2014 - LASSERT(rc == 0 || rc == 1);
2015 + LASSERT(rc <= 1);
2016 }
2017
2018 spin_lock_bh(&req->rq_export->exp_rpc_lock);
2019 diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
2020 index 67ab58084e8a..68fd65e80906 100644
2021 --- a/drivers/staging/rtl8712/ieee80211.h
2022 +++ b/drivers/staging/rtl8712/ieee80211.h
2023 @@ -138,51 +138,51 @@ struct ieee_ibss_seq {
2024 };
2025
2026 struct ieee80211_hdr {
2027 - u16 frame_ctl;
2028 - u16 duration_id;
2029 + __le16 frame_ctl;
2030 + __le16 duration_id;
2031 u8 addr1[ETH_ALEN];
2032 u8 addr2[ETH_ALEN];
2033 u8 addr3[ETH_ALEN];
2034 - u16 seq_ctl;
2035 + __le16 seq_ctl;
2036 u8 addr4[ETH_ALEN];
2037 -} __packed;
2038 +} __packed __aligned(2);
2039
2040 struct ieee80211_hdr_3addr {
2041 - u16 frame_ctl;
2042 - u16 duration_id;
2043 + __le16 frame_ctl;
2044 + __le16 duration_id;
2045 u8 addr1[ETH_ALEN];
2046 u8 addr2[ETH_ALEN];
2047 u8 addr3[ETH_ALEN];
2048 - u16 seq_ctl;
2049 -} __packed;
2050 + __le16 seq_ctl;
2051 +} __packed __aligned(2);
2052
2053 struct ieee80211_hdr_qos {
2054 - u16 frame_ctl;
2055 - u16 duration_id;
2056 + __le16 frame_ctl;
2057 + __le16 duration_id;
2058 u8 addr1[ETH_ALEN];
2059 u8 addr2[ETH_ALEN];
2060 u8 addr3[ETH_ALEN];
2061 - u16 seq_ctl;
2062 + __le16 seq_ctl;
2063 u8 addr4[ETH_ALEN];
2064 - u16 qc;
2065 -} __packed;
2066 + __le16 qc;
2067 +} __packed __aligned(2);
2068
2069 struct ieee80211_hdr_3addr_qos {
2070 - u16 frame_ctl;
2071 - u16 duration_id;
2072 + __le16 frame_ctl;
2073 + __le16 duration_id;
2074 u8 addr1[ETH_ALEN];
2075 u8 addr2[ETH_ALEN];
2076 u8 addr3[ETH_ALEN];
2077 - u16 seq_ctl;
2078 - u16 qc;
2079 + __le16 seq_ctl;
2080 + __le16 qc;
2081 } __packed;
2082
2083 struct eapol {
2084 u8 snap[6];
2085 - u16 ethertype;
2086 + __be16 ethertype;
2087 u8 version;
2088 u8 type;
2089 - u16 length;
2090 + __le16 length;
2091 } __packed;
2092
2093 enum eap_type {
2094 @@ -514,13 +514,13 @@ struct ieee80211_security {
2095 */
2096
2097 struct ieee80211_header_data {
2098 - u16 frame_ctl;
2099 - u16 duration_id;
2100 + __le16 frame_ctl;
2101 + __le16 duration_id;
2102 u8 addr1[6];
2103 u8 addr2[6];
2104 u8 addr3[6];
2105 - u16 seq_ctrl;
2106 -};
2107 + __le16 seq_ctrl;
2108 +} __packed __aligned(2);
2109
2110 #define BEACON_PROBE_SSID_ID_POSITION 12
2111
2112 @@ -552,18 +552,18 @@ struct ieee80211_info_element {
2113 /*
2114 * These are the data types that can make up management packets
2115 *
2116 - u16 auth_algorithm;
2117 - u16 auth_sequence;
2118 - u16 beacon_interval;
2119 - u16 capability;
2120 + __le16 auth_algorithm;
2121 + __le16 auth_sequence;
2122 + __le16 beacon_interval;
2123 + __le16 capability;
2124 u8 current_ap[ETH_ALEN];
2125 - u16 listen_interval;
2126 + __le16 listen_interval;
2127 struct {
2128 u16 association_id:14, reserved:2;
2129 } __packed;
2130 - u32 time_stamp[2];
2131 - u16 reason;
2132 - u16 status;
2133 + __le32 time_stamp[2];
2134 + __le16 reason;
2135 + __le16 status;
2136 */
2137
2138 #define IEEE80211_DEFAULT_TX_ESSID "Penguin"
2139 @@ -571,16 +571,16 @@ struct ieee80211_info_element {
2140
2141 struct ieee80211_authentication {
2142 struct ieee80211_header_data header;
2143 - u16 algorithm;
2144 - u16 transaction;
2145 - u16 status;
2146 + __le16 algorithm;
2147 + __le16 transaction;
2148 + __le16 status;
2149 } __packed;
2150
2151 struct ieee80211_probe_response {
2152 struct ieee80211_header_data header;
2153 - u32 time_stamp[2];
2154 - u16 beacon_interval;
2155 - u16 capability;
2156 + __le32 time_stamp[2];
2157 + __le16 beacon_interval;
2158 + __le16 capability;
2159 struct ieee80211_info_element info_element;
2160 } __packed;
2161
2162 @@ -590,16 +590,16 @@ struct ieee80211_probe_request {
2163
2164 struct ieee80211_assoc_request_frame {
2165 struct ieee80211_hdr_3addr header;
2166 - u16 capability;
2167 - u16 listen_interval;
2168 + __le16 capability;
2169 + __le16 listen_interval;
2170 struct ieee80211_info_element_hdr info_element;
2171 } __packed;
2172
2173 struct ieee80211_assoc_response_frame {
2174 struct ieee80211_hdr_3addr header;
2175 - u16 capability;
2176 - u16 status;
2177 - u16 aid;
2178 + __le16 capability;
2179 + __le16 status;
2180 + __le16 aid;
2181 } __packed;
2182
2183 struct ieee80211_txb {
2184 diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
2185 index be38364c8a7c..c47863936858 100644
2186 --- a/drivers/staging/rtl8712/rtl871x_xmit.c
2187 +++ b/drivers/staging/rtl8712/rtl871x_xmit.c
2188 @@ -344,7 +344,8 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
2189 * some settings above.
2190 */
2191 if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
2192 - pattrib->priority = (txdesc.txdw1 >> QSEL_SHT) & 0x1f;
2193 + pattrib->priority =
2194 + (le32_to_cpu(txdesc.txdw1) >> QSEL_SHT) & 0x1f;
2195 return _SUCCESS;
2196 }
2197
2198 @@ -485,7 +486,7 @@ static sint make_wlanhdr(struct _adapter *padapter, u8 *hdr,
2199 struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
2200 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
2201 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
2202 - u16 *fctrl = &pwlanhdr->frame_ctl;
2203 + __le16 *fctrl = &pwlanhdr->frame_ctl;
2204
2205 memset(hdr, 0, WLANHDR_OFFSET);
2206 SetFrameSubType(fctrl, pattrib->subtype);
2207 @@ -574,7 +575,7 @@ static sint r8712_put_snap(u8 *data, u16 h_proto)
2208 snap->oui[0] = oui[0];
2209 snap->oui[1] = oui[1];
2210 snap->oui[2] = oui[2];
2211 - *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
2212 + *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
2213 return SNAP_SIZE + sizeof(u16);
2214 }
2215
2216 diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
2217 index 26e5e8507f03..9122ba25bb00 100644
2218 --- a/drivers/xen/manage.c
2219 +++ b/drivers/xen/manage.c
2220 @@ -277,8 +277,16 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
2221 err = xenbus_transaction_start(&xbt);
2222 if (err)
2223 return;
2224 - if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
2225 - pr_err("Unable to read sysrq code in control/sysrq\n");
2226 + err = xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key);
2227 + if (err < 0) {
2228 + /*
2229 + * The Xenstore watch fires directly after registering it and
2230 + * after a suspend/resume cycle. So ENOENT is no error but
2231 + * might happen in those cases.
2232 + */
2233 + if (err != -ENOENT)
2234 + pr_err("Error %d reading sysrq code in control/sysrq\n",
2235 + err);
2236 xenbus_transaction_end(xbt, 1);
2237 return;
2238 }
2239 diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
2240 index dd3e236d7a2b..d9cbda269462 100644
2241 --- a/fs/cifs/dir.c
2242 +++ b/fs/cifs/dir.c
2243 @@ -193,7 +193,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
2244 struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
2245 int i;
2246
2247 - if (unlikely(direntry->d_name.len >
2248 + if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
2249 + direntry->d_name.len >
2250 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
2251 return -ENAMETOOLONG;
2252
2253 @@ -509,7 +510,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
2254
2255 rc = check_name(direntry, tcon);
2256 if (rc)
2257 - goto out_free_xid;
2258 + goto out;
2259
2260 server = tcon->ses->server;
2261
2262 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
2263 index 846b57ff58de..64056c6eb857 100644
2264 --- a/fs/ext4/mballoc.c
2265 +++ b/fs/ext4/mballoc.c
2266 @@ -2136,8 +2136,10 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2267 * We search using buddy data only if the order of the request
2268 * is greater than equal to the sbi_s_mb_order2_reqs
2269 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2270 + * We also support searching for power-of-two requests only for
2271 + * requests upto maximum buddy size we have constructed.
2272 */
2273 - if (i >= sbi->s_mb_order2_reqs) {
2274 + if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
2275 /*
2276 * This should tell if fe_len is exactly power of 2
2277 */
2278 @@ -2207,7 +2209,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2279 }
2280
2281 ac->ac_groups_scanned++;
2282 - if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2)
2283 + if (cr == 0)
2284 ext4_mb_simple_scan_group(ac, &e4b);
2285 else if (cr == 1 && sbi->s_stripe &&
2286 !(ac->ac_g_ex.fe_len % sbi->s_stripe))
2287 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2288 index f72535e1898f..1f581791b39d 100644
2289 --- a/fs/ext4/super.c
2290 +++ b/fs/ext4/super.c
2291 @@ -2628,9 +2628,9 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
2292
2293 if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
2294 ret = sbi->s_stripe;
2295 - else if (stripe_width <= sbi->s_blocks_per_group)
2296 + else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
2297 ret = stripe_width;
2298 - else if (stride <= sbi->s_blocks_per_group)
2299 + else if (stride && stride <= sbi->s_blocks_per_group)
2300 ret = stride;
2301 else
2302 ret = 0;
2303 diff --git a/fs/namei.c b/fs/namei.c
2304 index 66209f720146..e7d125c23aa6 100644
2305 --- a/fs/namei.c
2306 +++ b/fs/namei.c
2307 @@ -2971,10 +2971,16 @@ static inline int open_to_namei_flags(int flag)
2308
2309 static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t mode)
2310 {
2311 + struct user_namespace *s_user_ns;
2312 int error = security_path_mknod(dir, dentry, mode, 0);
2313 if (error)
2314 return error;
2315
2316 + s_user_ns = dir->dentry->d_sb->s_user_ns;
2317 + if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
2318 + !kgid_has_mapping(s_user_ns, current_fsgid()))
2319 + return -EOVERFLOW;
2320 +
2321 error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
2322 if (error)
2323 return error;
2324 diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
2325 index f72712f6c28d..06089becca60 100644
2326 --- a/fs/ocfs2/alloc.c
2327 +++ b/fs/ocfs2/alloc.c
2328 @@ -7310,13 +7310,24 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
2329
2330 static int ocfs2_trim_extent(struct super_block *sb,
2331 struct ocfs2_group_desc *gd,
2332 - u32 start, u32 count)
2333 + u64 group, u32 start, u32 count)
2334 {
2335 u64 discard, bcount;
2336 + struct ocfs2_super *osb = OCFS2_SB(sb);
2337
2338 bcount = ocfs2_clusters_to_blocks(sb, count);
2339 - discard = le64_to_cpu(gd->bg_blkno) +
2340 - ocfs2_clusters_to_blocks(sb, start);
2341 + discard = ocfs2_clusters_to_blocks(sb, start);
2342 +
2343 + /*
2344 + * For the first cluster group, the gd->bg_blkno is not at the start
2345 + * of the group, but at an offset from the start. If we add it while
2346 + * calculating discard for first group, we will wrongly start fstrim a
2347 + * few blocks after the desried start block and the range can cross
2348 + * over into the next cluster group. So, add it only if this is not
2349 + * the first cluster group.
2350 + */
2351 + if (group != osb->first_cluster_group_blkno)
2352 + discard += le64_to_cpu(gd->bg_blkno);
2353
2354 trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
2355
2356 @@ -7324,7 +7335,7 @@ static int ocfs2_trim_extent(struct super_block *sb,
2357 }
2358
2359 static int ocfs2_trim_group(struct super_block *sb,
2360 - struct ocfs2_group_desc *gd,
2361 + struct ocfs2_group_desc *gd, u64 group,
2362 u32 start, u32 max, u32 minbits)
2363 {
2364 int ret = 0, count = 0, next;
2365 @@ -7343,7 +7354,7 @@ static int ocfs2_trim_group(struct super_block *sb,
2366 next = ocfs2_find_next_bit(bitmap, max, start);
2367
2368 if ((next - start) >= minbits) {
2369 - ret = ocfs2_trim_extent(sb, gd,
2370 + ret = ocfs2_trim_extent(sb, gd, group,
2371 start, next - start);
2372 if (ret < 0) {
2373 mlog_errno(ret);
2374 @@ -7441,7 +7452,8 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
2375 }
2376
2377 gd = (struct ocfs2_group_desc *)gd_bh->b_data;
2378 - cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen);
2379 + cnt = ocfs2_trim_group(sb, gd, group,
2380 + first_bit, last_bit, minlen);
2381 brelse(gd_bh);
2382 gd_bh = NULL;
2383 if (cnt < 0) {
2384 diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
2385 index 0bd8a611eb83..fef5d2e114be 100644
2386 --- a/lib/asn1_decoder.c
2387 +++ b/lib/asn1_decoder.c
2388 @@ -284,6 +284,9 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
2389 if (unlikely(len > datalen - dp))
2390 goto data_overrun_error;
2391 }
2392 + } else {
2393 + if (unlikely(len > datalen - dp))
2394 + goto data_overrun_error;
2395 }
2396
2397 if (flags & FLAG_CONS) {
2398 diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
2399 index 880a7d1d27d2..4ccff66523c9 100644
2400 --- a/samples/trace_events/trace-events-sample.c
2401 +++ b/samples/trace_events/trace-events-sample.c
2402 @@ -78,28 +78,36 @@ static int simple_thread_fn(void *arg)
2403 }
2404
2405 static DEFINE_MUTEX(thread_mutex);
2406 +static int simple_thread_cnt;
2407
2408 void foo_bar_reg(void)
2409 {
2410 + mutex_lock(&thread_mutex);
2411 + if (simple_thread_cnt++)
2412 + goto out;
2413 +
2414 pr_info("Starting thread for foo_bar_fn\n");
2415 /*
2416 * We shouldn't be able to start a trace when the module is
2417 * unloading (there's other locks to prevent that). But
2418 * for consistency sake, we still take the thread_mutex.
2419 */
2420 - mutex_lock(&thread_mutex);
2421 simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
2422 + out:
2423 mutex_unlock(&thread_mutex);
2424 }
2425
2426 void foo_bar_unreg(void)
2427 {
2428 - pr_info("Killing thread for foo_bar_fn\n");
2429 - /* protect against module unloading */
2430 mutex_lock(&thread_mutex);
2431 + if (--simple_thread_cnt)
2432 + goto out;
2433 +
2434 + pr_info("Killing thread for foo_bar_fn\n");
2435 if (simple_tsk_fn)
2436 kthread_stop(simple_tsk_fn);
2437 simple_tsk_fn = NULL;
2438 + out:
2439 mutex_unlock(&thread_mutex);
2440 }
2441
2442 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
2443 index 32969f630438..4e9b4d23e20e 100644
2444 --- a/security/keys/keyring.c
2445 +++ b/security/keys/keyring.c
2446 @@ -452,34 +452,33 @@ static long keyring_read(const struct key *keyring,
2447 char __user *buffer, size_t buflen)
2448 {
2449 struct keyring_read_iterator_context ctx;
2450 - unsigned long nr_keys;
2451 - int ret;
2452 + long ret;
2453
2454 kenter("{%d},,%zu", key_serial(keyring), buflen);
2455
2456 if (buflen & (sizeof(key_serial_t) - 1))
2457 return -EINVAL;
2458
2459 - nr_keys = keyring->keys.nr_leaves_on_tree;
2460 - if (nr_keys == 0)
2461 - return 0;
2462 -
2463 - /* Calculate how much data we could return */
2464 - if (!buffer || !buflen)
2465 - return nr_keys * sizeof(key_serial_t);
2466 -
2467 - /* Copy the IDs of the subscribed keys into the buffer */
2468 - ctx.buffer = (key_serial_t __user *)buffer;
2469 - ctx.buflen = buflen;
2470 - ctx.count = 0;
2471 - ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
2472 - if (ret < 0) {
2473 - kleave(" = %d [iterate]", ret);
2474 - return ret;
2475 + /* Copy as many key IDs as fit into the buffer */
2476 + if (buffer && buflen) {
2477 + ctx.buffer = (key_serial_t __user *)buffer;
2478 + ctx.buflen = buflen;
2479 + ctx.count = 0;
2480 + ret = assoc_array_iterate(&keyring->keys,
2481 + keyring_read_iterator, &ctx);
2482 + if (ret < 0) {
2483 + kleave(" = %ld [iterate]", ret);
2484 + return ret;
2485 + }
2486 }
2487
2488 - kleave(" = %zu [ok]", ctx.count);
2489 - return ctx.count;
2490 + /* Return the size of the buffer needed */
2491 + ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
2492 + if (ret <= buflen)
2493 + kleave("= %ld [ok]", ret);
2494 + else
2495 + kleave("= %ld [buffer too small]", ret);
2496 + return ret;
2497 }
2498
2499 /*
2500 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
2501 index c41148353e19..45ef5915462c 100644
2502 --- a/sound/core/seq/seq_clientmgr.c
2503 +++ b/sound/core/seq/seq_clientmgr.c
2504 @@ -663,7 +663,7 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
2505 if (atomic)
2506 read_lock(&grp->list_lock);
2507 else
2508 - down_read(&grp->list_mutex);
2509 + down_read_nested(&grp->list_mutex, hop);
2510 list_for_each_entry(subs, &grp->list_head, src_list) {
2511 /* both ports ready? */
2512 if (atomic_read(&subs->ref_count) != 2)
2513 diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
2514 index 6a437eb66115..59127b6ef39e 100644
2515 --- a/sound/core/timer_compat.c
2516 +++ b/sound/core/timer_compat.c
2517 @@ -133,7 +133,8 @@ enum {
2518 #endif /* CONFIG_X86_X32 */
2519 };
2520
2521 -static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
2522 +static long __snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
2523 + unsigned long arg)
2524 {
2525 void __user *argp = compat_ptr(arg);
2526
2527 @@ -153,7 +154,7 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
2528 case SNDRV_TIMER_IOCTL_PAUSE:
2529 case SNDRV_TIMER_IOCTL_PAUSE_OLD:
2530 case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
2531 - return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
2532 + return __snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
2533 case SNDRV_TIMER_IOCTL_GPARAMS32:
2534 return snd_timer_user_gparams_compat(file, argp);
2535 case SNDRV_TIMER_IOCTL_INFO32:
2536 @@ -167,3 +168,15 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
2537 }
2538 return -ENOIOCTLCMD;
2539 }
2540 +
2541 +static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
2542 + unsigned long arg)
2543 +{
2544 + struct snd_timer_user *tu = file->private_data;
2545 + long ret;
2546 +
2547 + mutex_lock(&tu->ioctl_lock);
2548 + ret = __snd_timer_user_ioctl_compat(file, cmd, arg);
2549 + mutex_unlock(&tu->ioctl_lock);
2550 + return ret;
2551 +}
2552 diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
2553 index 439aa3ff1f99..79dcb1e34baa 100644
2554 --- a/sound/soc/codecs/adau17x1.c
2555 +++ b/sound/soc/codecs/adau17x1.c
2556 @@ -91,6 +91,27 @@ static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
2557 return 0;
2558 }
2559
2560 +static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w,
2561 + struct snd_kcontrol *kcontrol, int event)
2562 +{
2563 + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
2564 + struct adau *adau = snd_soc_codec_get_drvdata(codec);
2565 +
2566 + /*
2567 + * If we are capturing, toggle the ADOSR bit in Converter Control 0 to
2568 + * avoid losing SNR (workaround from ADI). This must be done after
2569 + * the ADC(s) have been enabled. According to the data sheet, it is
2570 + * normally illegal to set this bit when the sampling rate is 96 kHz,
2571 + * but according to ADI it is acceptable for this workaround.
2572 + */
2573 + regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
2574 + ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR);
2575 + regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
2576 + ADAU17X1_CONVERTER0_ADOSR, 0);
2577 +
2578 + return 0;
2579 +}
2580 +
2581 static const char * const adau17x1_mono_stereo_text[] = {
2582 "Stereo",
2583 "Mono Left Channel (L+R)",
2584 @@ -122,7 +143,8 @@ static const struct snd_soc_dapm_widget adau17x1_dapm_widgets[] = {
2585 SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0,
2586 &adau17x1_dac_mode_mux),
2587
2588 - SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0),
2589 + SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0,
2590 + adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU),
2591 SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0),
2592 SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0),
2593 SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0),
2594 diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
2595 index bf04b7efee40..db350035fad7 100644
2596 --- a/sound/soc/codecs/adau17x1.h
2597 +++ b/sound/soc/codecs/adau17x1.h
2598 @@ -129,5 +129,7 @@ bool adau17x1_has_dsp(struct adau *adau);
2599
2600 #define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7
2601
2602 +#define ADAU17X1_CONVERTER0_ADOSR BIT(3)
2603 +
2604
2605 #endif
2606 diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
2607 index bd19fad2d91b..c17f262f0834 100644
2608 --- a/sound/soc/intel/boards/bytcr_rt5640.c
2609 +++ b/sound/soc/intel/boards/bytcr_rt5640.c
2610 @@ -807,7 +807,6 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
2611 static struct platform_driver snd_byt_rt5640_mc_driver = {
2612 .driver = {
2613 .name = "bytcr_rt5640",
2614 - .pm = &snd_soc_pm_ops,
2615 },
2616 .probe = snd_byt_rt5640_mc_probe,
2617 };
2618 diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
2619 index eabff3a857d0..ae49f8199e45 100644
2620 --- a/sound/soc/intel/boards/bytcr_rt5651.c
2621 +++ b/sound/soc/intel/boards/bytcr_rt5651.c
2622 @@ -317,7 +317,6 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
2623 static struct platform_driver snd_byt_rt5651_mc_driver = {
2624 .driver = {
2625 .name = "bytcr_rt5651",
2626 - .pm = &snd_soc_pm_ops,
2627 },
2628 .probe = snd_byt_rt5651_mc_probe,
2629 };
2630 diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
2631 index 4e778eae1510..415a9c38d9f0 100644
2632 --- a/tools/perf/util/parse-events.c
2633 +++ b/tools/perf/util/parse-events.c
2634 @@ -309,10 +309,11 @@ __add_event(struct list_head *list, int *idx,
2635
2636 event_attr_init(attr);
2637
2638 - evsel = perf_evsel__new_idx(attr, (*idx)++);
2639 + evsel = perf_evsel__new_idx(attr, *idx);
2640 if (!evsel)
2641 return NULL;
2642
2643 + (*idx)++;
2644 evsel->cpus = cpu_map__get(cpus);
2645 evsel->own_cpus = cpu_map__get(cpus);
2646