Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0170-4.9.71-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (show annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 181157 byte(s)
-added kerenl-alx-legacy pkg
1 diff --git a/Makefile b/Makefile
2 index 7ad3271a1a1d..5f2736bb4877 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 70
9 +SUBLEVEL = 71
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
14 index 3635b8662724..92110c2c6c59 100644
15 --- a/arch/arm64/Makefile
16 +++ b/arch/arm64/Makefile
17 @@ -14,8 +14,12 @@ LDFLAGS_vmlinux :=-p --no-undefined -X
18 CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
19 GZFLAGS :=-9
20
21 -ifneq ($(CONFIG_RELOCATABLE),)
22 -LDFLAGS_vmlinux += -pie -shared -Bsymbolic
23 +ifeq ($(CONFIG_RELOCATABLE), y)
24 +# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
25 +# for relative relocs, since this leads to better Image compression
26 +# with the relocation offsets always being zero.
27 +LDFLAGS_vmlinux += -pie -shared -Bsymbolic \
28 + $(call ld-option, --no-apply-dynamic-relocs)
29 endif
30
31 ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
32 diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
33 index 3c1bd640042a..88c4b77ec8d2 100644
34 --- a/arch/blackfin/Kconfig
35 +++ b/arch/blackfin/Kconfig
36 @@ -319,11 +319,14 @@ config BF53x
37
38 config GPIO_ADI
39 def_bool y
40 + depends on !PINCTRL
41 depends on (BF51x || BF52x || BF53x || BF538 || BF539 || BF561)
42
43 -config PINCTRL
44 +config PINCTRL_BLACKFIN_ADI2
45 def_bool y
46 - depends on BF54x || BF60x
47 + depends on (BF54x || BF60x)
48 + select PINCTRL
49 + select PINCTRL_ADI2
50
51 config MEM_MT48LC64M4A2FB_7E
52 bool
53 diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
54 index f3337ee03621..a93cf06a4d6f 100644
55 --- a/arch/blackfin/Kconfig.debug
56 +++ b/arch/blackfin/Kconfig.debug
57 @@ -17,6 +17,7 @@ config DEBUG_VERBOSE
58
59 config DEBUG_MMRS
60 tristate "Generate Blackfin MMR tree"
61 + depends on !PINCTRL
62 select DEBUG_FS
63 help
64 Create a tree of Blackfin MMRs via the debugfs tree. If
65 diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
66 index 140faa16685a..1311e6b13991 100644
67 --- a/arch/openrisc/include/asm/uaccess.h
68 +++ b/arch/openrisc/include/asm/uaccess.h
69 @@ -211,7 +211,7 @@ do { \
70 case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
71 case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
72 case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
73 - case 8: __get_user_asm2(x, ptr, retval); \
74 + case 8: __get_user_asm2(x, ptr, retval); break; \
75 default: (x) = __get_user_bad(); \
76 } \
77 } while (0)
78 diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
79 index 7b2ca16b1eb4..991c6a517ddc 100644
80 --- a/arch/powerpc/perf/hv-24x7.c
81 +++ b/arch/powerpc/perf/hv-24x7.c
82 @@ -516,7 +516,7 @@ static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
83 {
84 if (s1 < s2)
85 return 1;
86 - if (s2 > s1)
87 + if (s1 > s2)
88 return -1;
89
90 return memcmp(d1, d2, s1);
91 diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c
92 index 83bebeec0fea..0f7b16e29347 100644
93 --- a/arch/powerpc/platforms/powernv/opal-async.c
94 +++ b/arch/powerpc/platforms/powernv/opal-async.c
95 @@ -39,18 +39,18 @@ int __opal_async_get_token(void)
96 int token;
97
98 spin_lock_irqsave(&opal_async_comp_lock, flags);
99 - token = find_first_bit(opal_async_complete_map, opal_max_async_tokens);
100 + token = find_first_zero_bit(opal_async_token_map, opal_max_async_tokens);
101 if (token >= opal_max_async_tokens) {
102 token = -EBUSY;
103 goto out;
104 }
105
106 - if (__test_and_set_bit(token, opal_async_token_map)) {
107 + if (!__test_and_clear_bit(token, opal_async_complete_map)) {
108 token = -EBUSY;
109 goto out;
110 }
111
112 - __clear_bit(token, opal_async_complete_map);
113 + __set_bit(token, opal_async_token_map);
114
115 out:
116 spin_unlock_irqrestore(&opal_async_comp_lock, flags);
117 diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
118 index efe8b6bb168b..b33faa0015cc 100644
119 --- a/arch/powerpc/platforms/powernv/setup.c
120 +++ b/arch/powerpc/platforms/powernv/setup.c
121 @@ -289,7 +289,7 @@ static unsigned long pnv_get_proc_freq(unsigned int cpu)
122 {
123 unsigned long ret_freq;
124
125 - ret_freq = cpufreq_quick_get(cpu) * 1000ul;
126 + ret_freq = cpufreq_get(cpu) * 1000ul;
127
128 /*
129 * If the backend cpufreq driver does not exist,
130 diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
131 index f267ee0afc08..716353b247de 100644
132 --- a/arch/powerpc/sysdev/ipic.c
133 +++ b/arch/powerpc/sysdev/ipic.c
134 @@ -845,12 +845,12 @@ void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq)
135
136 u32 ipic_get_mcp_status(void)
137 {
138 - return ipic_read(primary_ipic->regs, IPIC_SERMR);
139 + return ipic_read(primary_ipic->regs, IPIC_SERSR);
140 }
141
142 void ipic_clear_mcp_status(u32 mask)
143 {
144 - ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
145 + ipic_write(primary_ipic->regs, IPIC_SERSR, mask);
146 }
147
148 /* Return an interrupt vector or 0 if no interrupt is pending. */
149 diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
150 index 399a29d067d6..cb91a64a99e7 100644
151 --- a/arch/x86/crypto/salsa20_glue.c
152 +++ b/arch/x86/crypto/salsa20_glue.c
153 @@ -59,13 +59,6 @@ static int encrypt(struct blkcipher_desc *desc,
154
155 salsa20_ivsetup(ctx, walk.iv);
156
157 - if (likely(walk.nbytes == nbytes))
158 - {
159 - salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
160 - walk.dst.virt.addr, nbytes);
161 - return blkcipher_walk_done(desc, &walk, 0);
162 - }
163 -
164 while (walk.nbytes >= 64) {
165 salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
166 walk.dst.virt.addr,
167 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
168 index b89bef95f63b..11cc600f4df0 100644
169 --- a/arch/x86/kernel/acpi/boot.c
170 +++ b/arch/x86/kernel/acpi/boot.c
171 @@ -720,7 +720,7 @@ static void __init acpi_set_irq_model_ioapic(void)
172 #ifdef CONFIG_ACPI_HOTPLUG_CPU
173 #include <acpi/processor.h>
174
175 -int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
176 +static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
177 {
178 #ifdef CONFIG_ACPI_NUMA
179 int nid;
180 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
181 index 9aa62ab13ae8..a929ca03b7ed 100644
182 --- a/arch/x86/kvm/vmx.c
183 +++ b/arch/x86/kvm/vmx.c
184 @@ -9543,10 +9543,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
185 return false;
186
187 page = nested_get_page(vcpu, vmcs12->msr_bitmap);
188 - if (!page) {
189 - WARN_ON(1);
190 + if (!page)
191 return false;
192 - }
193 msr_bitmap_l1 = (unsigned long *)kmap(page);
194 if (!msr_bitmap_l1) {
195 nested_release_page_clean(page);
196 diff --git a/block/badblocks.c b/block/badblocks.c
197 index 6ebcef282314..2fe6c117ac96 100644
198 --- a/block/badblocks.c
199 +++ b/block/badblocks.c
200 @@ -178,7 +178,7 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
201
202 if (bb->shift < 0)
203 /* badblocks are disabled */
204 - return 0;
205 + return 1;
206
207 if (bb->shift) {
208 /* round the start down, and the end up */
209 diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
210 index dcf5ce3ba4bf..4bc701b32ce2 100644
211 --- a/block/blk-mq-tag.c
212 +++ b/block/blk-mq-tag.c
213 @@ -311,6 +311,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
214 for (i = 0; i < set->nr_hw_queues; i++) {
215 struct blk_mq_tags *tags = set->tags[i];
216
217 + if (!tags)
218 + continue;
219 +
220 for (j = 0; j < tags->nr_tags; j++) {
221 if (!tags->rqs[j])
222 continue;
223 diff --git a/crypto/hmac.c b/crypto/hmac.c
224 index 72e38c098bb3..ba07fb6221ae 100644
225 --- a/crypto/hmac.c
226 +++ b/crypto/hmac.c
227 @@ -194,11 +194,15 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
228 salg = shash_attr_alg(tb[1], 0, 0);
229 if (IS_ERR(salg))
230 return PTR_ERR(salg);
231 + alg = &salg->base;
232
233 + /* The underlying hash algorithm must be unkeyed */
234 err = -EINVAL;
235 + if (crypto_shash_alg_has_setkey(salg))
236 + goto out_put_alg;
237 +
238 ds = salg->digestsize;
239 ss = salg->statesize;
240 - alg = &salg->base;
241 if (ds > alg->cra_blocksize ||
242 ss < alg->cra_blocksize)
243 goto out_put_alg;
244 diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
245 index 0b66dc824606..cad395d70d78 100644
246 --- a/crypto/rsa_helper.c
247 +++ b/crypto/rsa_helper.c
248 @@ -30,7 +30,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
249 return -EINVAL;
250
251 if (fips_enabled) {
252 - while (!*ptr && n_sz) {
253 + while (n_sz && !*ptr) {
254 ptr++;
255 n_sz--;
256 }
257 diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
258 index f550b5d94630..d7da0eea5622 100644
259 --- a/crypto/salsa20_generic.c
260 +++ b/crypto/salsa20_generic.c
261 @@ -188,13 +188,6 @@ static int encrypt(struct blkcipher_desc *desc,
262
263 salsa20_ivsetup(ctx, walk.iv);
264
265 - if (likely(walk.nbytes == nbytes))
266 - {
267 - salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
268 - walk.src.virt.addr, nbytes);
269 - return blkcipher_walk_done(desc, &walk, 0);
270 - }
271 -
272 while (walk.nbytes >= 64) {
273 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
274 walk.src.virt.addr,
275 diff --git a/crypto/shash.c b/crypto/shash.c
276 index 4d8a671d1614..9bd5044d467b 100644
277 --- a/crypto/shash.c
278 +++ b/crypto/shash.c
279 @@ -24,11 +24,12 @@
280
281 static const struct crypto_type crypto_shash_type;
282
283 -static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
284 - unsigned int keylen)
285 +int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
286 + unsigned int keylen)
287 {
288 return -ENOSYS;
289 }
290 +EXPORT_SYMBOL_GPL(shash_no_setkey);
291
292 static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
293 unsigned int keylen)
294 diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
295 index ae22f05d5936..e3af318af2db 100644
296 --- a/crypto/tcrypt.c
297 +++ b/crypto/tcrypt.c
298 @@ -342,7 +342,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
299 }
300
301 sg_init_aead(sg, xbuf,
302 - *b_size + (enc ? authsize : 0));
303 + *b_size + (enc ? 0 : authsize));
304
305 sg_init_aead(sgout, xoutbuf,
306 *b_size + (enc ? authsize : 0));
307 @@ -350,7 +350,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
308 sg_set_buf(&sg[0], assoc, aad_size);
309 sg_set_buf(&sgout[0], assoc, aad_size);
310
311 - aead_request_set_crypt(req, sg, sgout, *b_size, iv);
312 + aead_request_set_crypt(req, sg, sgout,
313 + *b_size + (enc ? 0 : authsize),
314 + iv);
315 aead_request_set_ad(req, aad_size);
316
317 if (secs)
318 diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
319 index 3de3b6b8f0f1..f43a586236ea 100644
320 --- a/drivers/acpi/acpi_processor.c
321 +++ b/drivers/acpi/acpi_processor.c
322 @@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu)
323
324 void __weak arch_unregister_cpu(int cpu) {}
325
326 -int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
327 -{
328 - return -ENODEV;
329 -}
330 -
331 static int acpi_processor_hotadd_init(struct acpi_processor *pr)
332 {
333 unsigned long long sta;
334 diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
335 index 56190d00fd87..0a3ca20f99af 100644
336 --- a/drivers/acpi/bus.c
337 +++ b/drivers/acpi/bus.c
338 @@ -1197,7 +1197,6 @@ static int __init acpi_init(void)
339 acpi_wakeup_device_init();
340 acpi_debugger_init();
341 acpi_setup_sb_notify_handler();
342 - acpi_set_processor_mapping();
343 return 0;
344 }
345
346 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
347 index 5c78ee1860b0..fd59ae871db3 100644
348 --- a/drivers/acpi/processor_core.c
349 +++ b/drivers/acpi/processor_core.c
350 @@ -280,79 +280,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
351 }
352 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
353
354 -#ifdef CONFIG_ACPI_HOTPLUG_CPU
355 -static bool __init
356 -map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
357 -{
358 - int type, id;
359 - u32 acpi_id;
360 - acpi_status status;
361 - acpi_object_type acpi_type;
362 - unsigned long long tmp;
363 - union acpi_object object = { 0 };
364 - struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
365 -
366 - status = acpi_get_type(handle, &acpi_type);
367 - if (ACPI_FAILURE(status))
368 - return false;
369 -
370 - switch (acpi_type) {
371 - case ACPI_TYPE_PROCESSOR:
372 - status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
373 - if (ACPI_FAILURE(status))
374 - return false;
375 - acpi_id = object.processor.proc_id;
376 -
377 - /* validate the acpi_id */
378 - if(acpi_processor_validate_proc_id(acpi_id))
379 - return false;
380 - break;
381 - case ACPI_TYPE_DEVICE:
382 - status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
383 - if (ACPI_FAILURE(status))
384 - return false;
385 - acpi_id = tmp;
386 - break;
387 - default:
388 - return false;
389 - }
390 -
391 - type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
392 -
393 - *phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
394 - id = acpi_map_cpuid(*phys_id, acpi_id);
395 -
396 - if (id < 0)
397 - return false;
398 - *cpuid = id;
399 - return true;
400 -}
401 -
402 -static acpi_status __init
403 -set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
404 - void **rv)
405 -{
406 - phys_cpuid_t phys_id;
407 - int cpu_id;
408 -
409 - if (!map_processor(handle, &phys_id, &cpu_id))
410 - return AE_ERROR;
411 -
412 - acpi_map_cpu2node(handle, cpu_id, phys_id);
413 - return AE_OK;
414 -}
415 -
416 -void __init acpi_set_processor_mapping(void)
417 -{
418 - /* Set persistent cpu <-> node mapping for all processors. */
419 - acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
420 - ACPI_UINT32_MAX, set_processor_node_mapping,
421 - NULL, NULL, NULL);
422 -}
423 -#else
424 -void __init acpi_set_processor_mapping(void) {}
425 -#endif /* CONFIG_ACPI_HOTPLUG_CPU */
426 -
427 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
428 static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
429 u64 *phys_addr, int *ioapic_id)
430 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
431 index 693028659ccc..3ae950c82922 100644
432 --- a/drivers/bluetooth/btusb.c
433 +++ b/drivers/bluetooth/btusb.c
434 @@ -1059,6 +1059,10 @@ static int btusb_open(struct hci_dev *hdev)
435 }
436
437 data->intf->needs_remote_wakeup = 1;
438 + /* device specific wakeup source enabled and required for USB
439 + * remote wakeup while host is suspended
440 + */
441 + device_wakeup_enable(&data->udev->dev);
442
443 if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
444 goto done;
445 @@ -1122,6 +1126,7 @@ static int btusb_close(struct hci_dev *hdev)
446 goto failed;
447
448 data->intf->needs_remote_wakeup = 0;
449 + device_wakeup_disable(&data->udev->dev);
450 usb_autopm_put_interface(data->intf);
451
452 failed:
453 diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
454 index f0249899fc96..45d7ecc66b22 100644
455 --- a/drivers/bus/arm-ccn.c
456 +++ b/drivers/bus/arm-ccn.c
457 @@ -1280,6 +1280,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
458
459 /* Perf driver registration */
460 ccn->dt.pmu = (struct pmu) {
461 + .module = THIS_MODULE,
462 .attr_groups = arm_ccn_pmu_attr_groups,
463 .task_ctx_nr = perf_invalid_context,
464 .event_init = arm_ccn_pmu_event_init,
465 diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
466 index c0e8e1f196aa..2bfaf22e6ffc 100644
467 --- a/drivers/clk/hisilicon/clk-hi6220.c
468 +++ b/drivers/clk/hisilicon/clk-hi6220.c
469 @@ -144,7 +144,7 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = {
470 { HI6220_BBPPLL_SEL, "bbppll_sel", "pll0_bbp_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 9, 0, },
471 { HI6220_MEDIA_PLL_SRC, "media_pll_src", "pll_media_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 10, 0, },
472 { HI6220_MMC2_SEL, "mmc2_sel", "mmc2_mux1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 11, 0, },
473 - { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 12, 0, },
474 + { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IS_CRITICAL, 0x270, 12, 0, },
475 };
476
477 static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = {
478 diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
479 index ce8ea10407e4..93a19667003d 100644
480 --- a/drivers/clk/imx/clk-imx6q.c
481 +++ b/drivers/clk/imx/clk-imx6q.c
482 @@ -487,7 +487,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
483 clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
484 clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26);
485 clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0);
486 - clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "video_27m", base + 0x70, 4);
487 + clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "mipi_core_cfg", base + 0x70, 4);
488 clk[IMX6QDL_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6);
489 clk[IMX6QDL_CLK_I2C2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8);
490 clk[IMX6QDL_CLK_I2C3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10);
491 diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
492 index 9f24fcfa304f..e425e50173c5 100644
493 --- a/drivers/clk/mediatek/clk-mtk.h
494 +++ b/drivers/clk/mediatek/clk-mtk.h
495 @@ -185,6 +185,7 @@ struct mtk_pll_data {
496 uint32_t pcw_reg;
497 int pcw_shift;
498 const struct mtk_pll_div_table *div_table;
499 + const char *parent_name;
500 };
501
502 void mtk_clk_register_plls(struct device_node *node,
503 diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
504 index 0c2deac17ce9..1502384a3093 100644
505 --- a/drivers/clk/mediatek/clk-pll.c
506 +++ b/drivers/clk/mediatek/clk-pll.c
507 @@ -302,7 +302,10 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
508
509 init.name = data->name;
510 init.ops = &mtk_pll_ops;
511 - init.parent_names = &parent_name;
512 + if (data->parent_name)
513 + init.parent_names = &data->parent_name;
514 + else
515 + init.parent_names = &parent_name;
516 init.num_parents = 1;
517
518 clk = clk_register(NULL, &pll->hw);
519 diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
520 index 8e2db5ead8da..af520d81525f 100644
521 --- a/drivers/clk/tegra/clk-tegra30.c
522 +++ b/drivers/clk/tegra/clk-tegra30.c
523 @@ -963,7 +963,7 @@ static void __init tegra30_super_clk_init(void)
524 * U71 divider of cclk_lp.
525 */
526 clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3",
527 - clk_base + SUPER_CCLKG_DIVIDER, 0,
528 + clk_base + SUPER_CCLKLP_DIVIDER, 0,
529 TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
530 clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL);
531
532 diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
533 index 6b535262ac5d..3db94e81bc14 100644
534 --- a/drivers/dma/dmaengine.c
535 +++ b/drivers/dma/dmaengine.c
536 @@ -1107,12 +1107,14 @@ static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
537 switch (order) {
538 case 0 ... 1:
539 return &unmap_pool[0];
540 +#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
541 case 2 ... 4:
542 return &unmap_pool[1];
543 case 5 ... 7:
544 return &unmap_pool[2];
545 case 8:
546 return &unmap_pool[3];
547 +#endif
548 default:
549 BUG();
550 return NULL;
551 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
552 index fbb75514dfb4..e0bd578a253a 100644
553 --- a/drivers/dma/dmatest.c
554 +++ b/drivers/dma/dmatest.c
555 @@ -158,6 +158,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
556 #define PATTERN_OVERWRITE 0x20
557 #define PATTERN_COUNT_MASK 0x1f
558
559 +/* poor man's completion - we want to use wait_event_freezable() on it */
560 +struct dmatest_done {
561 + bool done;
562 + wait_queue_head_t *wait;
563 +};
564 +
565 struct dmatest_thread {
566 struct list_head node;
567 struct dmatest_info *info;
568 @@ -166,6 +172,8 @@ struct dmatest_thread {
569 u8 **srcs;
570 u8 **dsts;
571 enum dma_transaction_type type;
572 + wait_queue_head_t done_wait;
573 + struct dmatest_done test_done;
574 bool done;
575 };
576
577 @@ -326,18 +334,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
578 return error_count;
579 }
580
581 -/* poor man's completion - we want to use wait_event_freezable() on it */
582 -struct dmatest_done {
583 - bool done;
584 - wait_queue_head_t *wait;
585 -};
586
587 static void dmatest_callback(void *arg)
588 {
589 struct dmatest_done *done = arg;
590 -
591 - done->done = true;
592 - wake_up_all(done->wait);
593 + struct dmatest_thread *thread =
594 + container_of(arg, struct dmatest_thread, done_wait);
595 + if (!thread->done) {
596 + done->done = true;
597 + wake_up_all(done->wait);
598 + } else {
599 + /*
600 + * If thread->done, it means that this callback occurred
601 + * after the parent thread has cleaned up. This can
602 + * happen in the case that driver doesn't implement
603 + * the terminate_all() functionality and a dma operation
604 + * did not occur within the timeout period
605 + */
606 + WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
607 + }
608 }
609
610 static unsigned int min_odd(unsigned int x, unsigned int y)
611 @@ -408,9 +423,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
612 */
613 static int dmatest_func(void *data)
614 {
615 - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
616 struct dmatest_thread *thread = data;
617 - struct dmatest_done done = { .wait = &done_wait };
618 + struct dmatest_done *done = &thread->test_done;
619 struct dmatest_info *info;
620 struct dmatest_params *params;
621 struct dma_chan *chan;
622 @@ -637,9 +651,9 @@ static int dmatest_func(void *data)
623 continue;
624 }
625
626 - done.done = false;
627 + done->done = false;
628 tx->callback = dmatest_callback;
629 - tx->callback_param = &done;
630 + tx->callback_param = done;
631 cookie = tx->tx_submit(tx);
632
633 if (dma_submit_error(cookie)) {
634 @@ -652,21 +666,12 @@ static int dmatest_func(void *data)
635 }
636 dma_async_issue_pending(chan);
637
638 - wait_event_freezable_timeout(done_wait, done.done,
639 + wait_event_freezable_timeout(thread->done_wait, done->done,
640 msecs_to_jiffies(params->timeout));
641
642 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
643
644 - if (!done.done) {
645 - /*
646 - * We're leaving the timed out dma operation with
647 - * dangling pointer to done_wait. To make this
648 - * correct, we'll need to allocate wait_done for
649 - * each test iteration and perform "who's gonna
650 - * free it this time?" dancing. For now, just
651 - * leave it dangling.
652 - */
653 - WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
654 + if (!done->done) {
655 dmaengine_unmap_put(um);
656 result("test timed out", total_tests, src_off, dst_off,
657 len, 0);
658 @@ -747,7 +752,7 @@ static int dmatest_func(void *data)
659 dmatest_KBs(runtime, total_len), ret);
660
661 /* terminate all transfers on specified channels */
662 - if (ret)
663 + if (ret || failed_tests)
664 dmaengine_terminate_all(chan);
665
666 thread->done = true;
667 @@ -807,6 +812,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
668 thread->info = info;
669 thread->chan = dtc->chan;
670 thread->type = type;
671 + thread->test_done.wait = &thread->done_wait;
672 + init_waitqueue_head(&thread->done_wait);
673 smp_wmb();
674 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
675 dma_chan_name(chan), op, i);
676 diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
677 index 88a00d06def6..43e88d85129e 100644
678 --- a/drivers/dma/ti-dma-crossbar.c
679 +++ b/drivers/dma/ti-dma-crossbar.c
680 @@ -49,12 +49,12 @@ struct ti_am335x_xbar_data {
681
682 struct ti_am335x_xbar_map {
683 u16 dma_line;
684 - u16 mux_val;
685 + u8 mux_val;
686 };
687
688 -static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val)
689 +static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
690 {
691 - writeb_relaxed(val & 0x1f, iomem + event);
692 + writeb_relaxed(val, iomem + event);
693 }
694
695 static void ti_am335x_xbar_free(struct device *dev, void *route_data)
696 @@ -105,7 +105,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
697 }
698
699 map->dma_line = (u16)dma_spec->args[0];
700 - map->mux_val = (u16)dma_spec->args[2];
701 + map->mux_val = (u8)dma_spec->args[2];
702
703 dma_spec->args[2] = 0;
704 dma_spec->args_count = 2;
705 diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
706 index 2f48f848865f..2f47c5b5f4cb 100644
707 --- a/drivers/firmware/efi/efi.c
708 +++ b/drivers/firmware/efi/efi.c
709 @@ -384,7 +384,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
710 return 0;
711 }
712 }
713 - pr_err_once("requested map not found.\n");
714 return -ENOENT;
715 }
716
717 diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
718 index 307ec1c11276..311c9d0e8cbb 100644
719 --- a/drivers/firmware/efi/esrt.c
720 +++ b/drivers/firmware/efi/esrt.c
721 @@ -251,7 +251,7 @@ void __init efi_esrt_init(void)
722
723 rc = efi_mem_desc_lookup(efi.esrt, &md);
724 if (rc < 0) {
725 - pr_err("ESRT header is not in the memory map.\n");
726 + pr_warn("ESRT header is not in the memory map.\n");
727 return;
728 }
729
730 diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
731 index 8363cb57915b..8a08e81ee90d 100644
732 --- a/drivers/gpu/drm/amd/acp/Makefile
733 +++ b/drivers/gpu/drm/amd/acp/Makefile
734 @@ -3,6 +3,4 @@
735 # of AMDSOC/AMDGPU drm driver.
736 # It provides the HW control for ACP related functionalities.
737
738 -subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
739 -
740 AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
741 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
742 index bfb4b91869e7..f26d1fd53bef 100644
743 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
744 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
745 @@ -240,6 +240,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
746 for (; i >= 0; i--)
747 drm_free_large(p->chunks[i].kdata);
748 kfree(p->chunks);
749 + p->chunks = NULL;
750 + p->nchunks = 0;
751 put_ctx:
752 amdgpu_ctx_put(p->ctx);
753 free_chunk:
754 diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
755 index af267c35d813..ee5883f59be5 100644
756 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
757 +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
758 @@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
759 struct drm_gem_object *obj = buffer->priv;
760 int ret = 0;
761
762 - if (WARN_ON(!obj->filp))
763 - return -EINVAL;
764 -
765 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
766 if (ret < 0)
767 return ret;
768 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
769 index 13ba73fd9b68..8bd9e6c371d1 100644
770 --- a/drivers/gpu/drm/radeon/si_dpm.c
771 +++ b/drivers/gpu/drm/radeon/si_dpm.c
772 @@ -3029,6 +3029,16 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
773 max_sclk = 75000;
774 max_mclk = 80000;
775 }
776 + } else if (rdev->family == CHIP_OLAND) {
777 + if ((rdev->pdev->revision == 0xC7) ||
778 + (rdev->pdev->revision == 0x80) ||
779 + (rdev->pdev->revision == 0x81) ||
780 + (rdev->pdev->revision == 0x83) ||
781 + (rdev->pdev->revision == 0x87) ||
782 + (rdev->pdev->device == 0x6604) ||
783 + (rdev->pdev->device == 0x6605)) {
784 + max_sclk = 75000;
785 + }
786 }
787 /* Apply dpm quirks */
788 while (p && p->chip_device != 0) {
789 diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
790 index e06c1344c913..7af77818efc3 100644
791 --- a/drivers/hid/hid-cp2112.c
792 +++ b/drivers/hid/hid-cp2112.c
793 @@ -188,6 +188,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
794 HID_REQ_GET_REPORT);
795 if (ret != CP2112_GPIO_CONFIG_LENGTH) {
796 hid_err(hdev, "error requesting GPIO config: %d\n", ret);
797 + if (ret >= 0)
798 + ret = -EIO;
799 goto exit;
800 }
801
802 @@ -197,8 +199,10 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
803 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
804 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
805 HID_REQ_SET_REPORT);
806 - if (ret < 0) {
807 + if (ret != CP2112_GPIO_CONFIG_LENGTH) {
808 hid_err(hdev, "error setting GPIO config: %d\n", ret);
809 + if (ret >= 0)
810 + ret = -EIO;
811 goto exit;
812 }
813
814 @@ -206,7 +210,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
815
816 exit:
817 mutex_unlock(&dev->lock);
818 - return ret < 0 ? ret : -EIO;
819 + return ret;
820 }
821
822 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
823 diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
824 index 75126e4e3f05..44420073edda 100644
825 --- a/drivers/hv/hv_fcopy.c
826 +++ b/drivers/hv/hv_fcopy.c
827 @@ -61,7 +61,6 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
828 static const char fcopy_devname[] = "vmbus/hv_fcopy";
829 static u8 *recv_buffer;
830 static struct hvutil_transport *hvt;
831 -static struct completion release_event;
832 /*
833 * This state maintains the version number registered by the daemon.
834 */
835 @@ -322,7 +321,6 @@ static void fcopy_on_reset(void)
836
837 if (cancel_delayed_work_sync(&fcopy_timeout_work))
838 fcopy_respond_to_host(HV_E_FAIL);
839 - complete(&release_event);
840 }
841
842 int hv_fcopy_init(struct hv_util_service *srv)
843 @@ -330,7 +328,6 @@ int hv_fcopy_init(struct hv_util_service *srv)
844 recv_buffer = srv->recv_buffer;
845 fcopy_transaction.recv_channel = srv->channel;
846
847 - init_completion(&release_event);
848 /*
849 * When this driver loads, the user level daemon that
850 * processes the host requests may not yet be running.
851 @@ -352,5 +349,4 @@ void hv_fcopy_deinit(void)
852 fcopy_transaction.state = HVUTIL_DEVICE_DYING;
853 cancel_delayed_work_sync(&fcopy_timeout_work);
854 hvutil_transport_destroy(hvt);
855 - wait_for_completion(&release_event);
856 }
857 diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
858 index 3abfc5983c97..5e1fdc8d32ab 100644
859 --- a/drivers/hv/hv_kvp.c
860 +++ b/drivers/hv/hv_kvp.c
861 @@ -88,7 +88,6 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
862 static const char kvp_devname[] = "vmbus/hv_kvp";
863 static u8 *recv_buffer;
864 static struct hvutil_transport *hvt;
865 -static struct completion release_event;
866 /*
867 * Register the kernel component with the user-level daemon.
868 * As part of this registration, pass the LIC version number.
869 @@ -717,7 +716,6 @@ static void kvp_on_reset(void)
870 if (cancel_delayed_work_sync(&kvp_timeout_work))
871 kvp_respond_to_host(NULL, HV_E_FAIL);
872 kvp_transaction.state = HVUTIL_DEVICE_INIT;
873 - complete(&release_event);
874 }
875
876 int
877 @@ -726,7 +724,6 @@ hv_kvp_init(struct hv_util_service *srv)
878 recv_buffer = srv->recv_buffer;
879 kvp_transaction.recv_channel = srv->channel;
880
881 - init_completion(&release_event);
882 /*
883 * When this driver loads, the user level daemon that
884 * processes the host requests may not yet be running.
885 @@ -750,5 +747,4 @@ void hv_kvp_deinit(void)
886 cancel_delayed_work_sync(&kvp_timeout_work);
887 cancel_work_sync(&kvp_sendkey_work);
888 hvutil_transport_destroy(hvt);
889 - wait_for_completion(&release_event);
890 }
891 diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
892 index a76e3db0d01f..a6707133c297 100644
893 --- a/drivers/hv/hv_snapshot.c
894 +++ b/drivers/hv/hv_snapshot.c
895 @@ -66,7 +66,6 @@ static int dm_reg_value;
896 static const char vss_devname[] = "vmbus/hv_vss";
897 static __u8 *recv_buffer;
898 static struct hvutil_transport *hvt;
899 -static struct completion release_event;
900
901 static void vss_timeout_func(struct work_struct *dummy);
902 static void vss_handle_request(struct work_struct *dummy);
903 @@ -331,13 +330,11 @@ static void vss_on_reset(void)
904 if (cancel_delayed_work_sync(&vss_timeout_work))
905 vss_respond_to_host(HV_E_FAIL);
906 vss_transaction.state = HVUTIL_DEVICE_INIT;
907 - complete(&release_event);
908 }
909
910 int
911 hv_vss_init(struct hv_util_service *srv)
912 {
913 - init_completion(&release_event);
914 if (vmbus_proto_version < VERSION_WIN8_1) {
915 pr_warn("Integration service 'Backup (volume snapshot)'"
916 " not supported on this host version.\n");
917 @@ -368,5 +365,4 @@ void hv_vss_deinit(void)
918 cancel_delayed_work_sync(&vss_timeout_work);
919 cancel_work_sync(&vss_handle_request_work);
920 hvutil_transport_destroy(hvt);
921 - wait_for_completion(&release_event);
922 }
923 diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
924 index c235a9515267..4402a71e23f7 100644
925 --- a/drivers/hv/hv_utils_transport.c
926 +++ b/drivers/hv/hv_utils_transport.c
927 @@ -182,10 +182,11 @@ static int hvt_op_release(struct inode *inode, struct file *file)
928 * connects back.
929 */
930 hvt_reset(hvt);
931 - mutex_unlock(&hvt->lock);
932
933 if (mode_old == HVUTIL_TRANSPORT_DESTROY)
934 - hvt_transport_free(hvt);
935 + complete(&hvt->release);
936 +
937 + mutex_unlock(&hvt->lock);
938
939 return 0;
940 }
941 @@ -304,6 +305,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name,
942
943 init_waitqueue_head(&hvt->outmsg_q);
944 mutex_init(&hvt->lock);
945 + init_completion(&hvt->release);
946
947 spin_lock(&hvt_list_lock);
948 list_add(&hvt->list, &hvt_list);
949 @@ -351,6 +353,8 @@ void hvutil_transport_destroy(struct hvutil_transport *hvt)
950 if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
951 cn_del_callback(&hvt->cn_id);
952
953 - if (mode_old != HVUTIL_TRANSPORT_CHARDEV)
954 - hvt_transport_free(hvt);
955 + if (mode_old == HVUTIL_TRANSPORT_CHARDEV)
956 + wait_for_completion(&hvt->release);
957 +
958 + hvt_transport_free(hvt);
959 }
960 diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h
961 index d98f5225c3e6..79afb626e166 100644
962 --- a/drivers/hv/hv_utils_transport.h
963 +++ b/drivers/hv/hv_utils_transport.h
964 @@ -41,6 +41,7 @@ struct hvutil_transport {
965 int outmsg_len; /* its length */
966 wait_queue_head_t outmsg_q; /* poll/read wait queue */
967 struct mutex lock; /* protects struct members */
968 + struct completion release; /* synchronize with fd release */
969 };
970
971 struct hvutil_transport *hvutil_transport_init(const char *name,
972 diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
973 index 63b5db4e4070..e0f3244505d3 100644
974 --- a/drivers/hwtracing/intel_th/pci.c
975 +++ b/drivers/hwtracing/intel_th/pci.c
976 @@ -95,6 +95,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
977 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
978 .driver_data = (kernel_ulong_t)0,
979 },
980 + {
981 + /* Gemini Lake */
982 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
983 + .driver_data = (kernel_ulong_t)0,
984 + },
985 { 0 },
986 };
987
988 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
989 index 809a02800102..a09d6eed3b88 100644
990 --- a/drivers/infiniband/core/cma.c
991 +++ b/drivers/infiniband/core/cma.c
992 @@ -1482,7 +1482,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
993 return id_priv;
994 }
995
996 -static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
997 +static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
998 {
999 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
1000 }
1001 @@ -1877,7 +1877,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1002 struct rdma_id_private *listen_id, *conn_id = NULL;
1003 struct rdma_cm_event event;
1004 struct net_device *net_dev;
1005 - int offset, ret;
1006 + u8 offset;
1007 + int ret;
1008
1009 listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
1010 if (IS_ERR(listen_id))
1011 @@ -3309,7 +3310,8 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
1012 struct ib_cm_sidr_req_param req;
1013 struct ib_cm_id *id;
1014 void *private_data;
1015 - int offset, ret;
1016 + u8 offset;
1017 + int ret;
1018
1019 memset(&req, 0, sizeof req);
1020 offset = cma_user_data_offset(id_priv);
1021 @@ -3366,7 +3368,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
1022 struct rdma_route *route;
1023 void *private_data;
1024 struct ib_cm_id *id;
1025 - int offset, ret;
1026 + u8 offset;
1027 + int ret;
1028
1029 memset(&req, 0, sizeof req);
1030 offset = cma_user_data_offset(id_priv);
1031 diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
1032 index 862381aa83c8..b55adf53c758 100644
1033 --- a/drivers/infiniband/hw/cxgb4/t4.h
1034 +++ b/drivers/infiniband/hw/cxgb4/t4.h
1035 @@ -171,7 +171,7 @@ struct t4_cqe {
1036 __be32 msn;
1037 } rcqe;
1038 struct {
1039 - u32 stag;
1040 + __be32 stag;
1041 u16 nada2;
1042 u16 cidx;
1043 } scqe;
1044 diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
1045 index 24d0820873cf..4682909b021b 100644
1046 --- a/drivers/infiniband/hw/hfi1/chip.c
1047 +++ b/drivers/infiniband/hw/hfi1/chip.c
1048 @@ -9769,7 +9769,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
1049 goto unimplemented;
1050
1051 case HFI1_IB_CFG_OP_VLS:
1052 - val = ppd->vls_operational;
1053 + val = ppd->actual_vls_operational;
1054 break;
1055 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
1056 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
1057 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1058 index 830fecb6934c..335bd2c9e16e 100644
1059 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1060 +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1061 @@ -1177,10 +1177,15 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1062 ipoib_ib_dev_down(dev);
1063
1064 if (level == IPOIB_FLUSH_HEAVY) {
1065 + rtnl_lock();
1066 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1067 ipoib_ib_dev_stop(dev);
1068 - if (ipoib_ib_dev_open(dev) != 0)
1069 +
1070 + result = ipoib_ib_dev_open(dev);
1071 + rtnl_unlock();
1072 + if (result)
1073 return;
1074 +
1075 if (netif_queue_stopped(dev))
1076 netif_start_queue(dev);
1077 }
1078 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1079 index dbf09836ff30..d1051e3ce819 100644
1080 --- a/drivers/input/serio/i8042-x86ia64io.h
1081 +++ b/drivers/input/serio/i8042-x86ia64io.h
1082 @@ -520,6 +520,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
1083 DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
1084 },
1085 },
1086 + {
1087 + /* TUXEDO BU1406 */
1088 + .matches = {
1089 + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
1090 + DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
1091 + },
1092 + },
1093 { }
1094 };
1095
1096 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1097 index 1a0b110f12c0..0c910a863581 100644
1098 --- a/drivers/iommu/amd_iommu.c
1099 +++ b/drivers/iommu/amd_iommu.c
1100 @@ -3211,7 +3211,7 @@ static void amd_iommu_apply_dm_region(struct device *dev,
1101 unsigned long start, end;
1102
1103 start = IOVA_PFN(region->start);
1104 - end = IOVA_PFN(region->start + region->length);
1105 + end = IOVA_PFN(region->start + region->length - 1);
1106
1107 WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
1108 }
1109 diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
1110 index f50e51c1a9c8..d68a552cfe8d 100644
1111 --- a/drivers/iommu/io-pgtable-arm-v7s.c
1112 +++ b/drivers/iommu/io-pgtable-arm-v7s.c
1113 @@ -418,8 +418,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
1114 pte |= ARM_V7S_ATTR_NS_TABLE;
1115
1116 __arm_v7s_set_pte(ptep, pte, 1, cfg);
1117 - } else {
1118 + } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
1119 cptep = iopte_deref(pte, lvl);
1120 + } else {
1121 + /* We require an unmap first */
1122 + WARN_ON(!selftest_running);
1123 + return -EEXIST;
1124 }
1125
1126 /* Rinse, repeat */
1127 diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
1128 index b8aeb0768483..68c6050d1efb 100644
1129 --- a/drivers/iommu/mtk_iommu_v1.c
1130 +++ b/drivers/iommu/mtk_iommu_v1.c
1131 @@ -703,7 +703,7 @@ static struct platform_driver mtk_iommu_driver = {
1132 .probe = mtk_iommu_probe,
1133 .remove = mtk_iommu_remove,
1134 .driver = {
1135 - .name = "mtk-iommu",
1136 + .name = "mtk-iommu-v1",
1137 .of_match_table = mtk_iommu_of_ids,
1138 .pm = &mtk_iommu_pm_ops,
1139 }
1140 diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
1141 index bc0af3307bbf..910cb5e23371 100644
1142 --- a/drivers/irqchip/Kconfig
1143 +++ b/drivers/irqchip/Kconfig
1144 @@ -258,6 +258,7 @@ config IRQ_MXS
1145
1146 config MVEBU_ODMI
1147 bool
1148 + select GENERIC_MSI_IRQ_DOMAIN
1149
1150 config MVEBU_PIC
1151 bool
1152 diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
1153 index ab8a1b36af21..edb8d1a1a69f 100644
1154 --- a/drivers/md/bcache/request.c
1155 +++ b/drivers/md/bcache/request.c
1156 @@ -468,6 +468,7 @@ struct search {
1157 unsigned recoverable:1;
1158 unsigned write:1;
1159 unsigned read_dirty_data:1;
1160 + unsigned cache_missed:1;
1161
1162 unsigned long start_time;
1163
1164 @@ -653,6 +654,7 @@ static inline struct search *search_alloc(struct bio *bio,
1165
1166 s->orig_bio = bio;
1167 s->cache_miss = NULL;
1168 + s->cache_missed = 0;
1169 s->d = d;
1170 s->recoverable = 1;
1171 s->write = op_is_write(bio_op(bio));
1172 @@ -771,7 +773,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
1173 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1174
1175 bch_mark_cache_accounting(s->iop.c, s->d,
1176 - !s->cache_miss, s->iop.bypass);
1177 + !s->cache_missed, s->iop.bypass);
1178 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
1179
1180 if (s->iop.error)
1181 @@ -790,6 +792,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
1182 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1183 struct bio *miss, *cache_bio;
1184
1185 + s->cache_missed = 1;
1186 +
1187 if (s->cache_miss || s->iop.bypass) {
1188 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
1189 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
1190 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
1191 index f4557f558b24..28ce342348a9 100644
1192 --- a/drivers/md/bcache/super.c
1193 +++ b/drivers/md/bcache/super.c
1194 @@ -2091,6 +2091,7 @@ static void bcache_exit(void)
1195 if (bcache_major)
1196 unregister_blkdev(bcache_major, "bcache");
1197 unregister_reboot_notifier(&reboot);
1198 + mutex_destroy(&bch_register_lock);
1199 }
1200
1201 static int __init bcache_init(void)
1202 @@ -2109,14 +2110,15 @@ static int __init bcache_init(void)
1203 bcache_major = register_blkdev(0, "bcache");
1204 if (bcache_major < 0) {
1205 unregister_reboot_notifier(&reboot);
1206 + mutex_destroy(&bch_register_lock);
1207 return bcache_major;
1208 }
1209
1210 if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
1211 !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
1212 - sysfs_create_files(bcache_kobj, files) ||
1213 bch_request_init() ||
1214 - bch_debug_init(bcache_kobj))
1215 + bch_debug_init(bcache_kobj) ||
1216 + sysfs_create_files(bcache_kobj, files))
1217 goto err;
1218
1219 return 0;
1220 diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
1221 index 2b13117fb918..ba7edcdd09ce 100644
1222 --- a/drivers/md/md-cluster.c
1223 +++ b/drivers/md/md-cluster.c
1224 @@ -974,6 +974,7 @@ static int leave(struct mddev *mddev)
1225 lockres_free(cinfo->bitmap_lockres);
1226 unlock_all_bitmaps(mddev);
1227 dlm_release_lockspace(cinfo->lockspace, 2);
1228 + kfree(cinfo);
1229 return 0;
1230 }
1231
1232 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1233 index 7aea0221530c..475a7a1bcfe0 100644
1234 --- a/drivers/md/raid5.c
1235 +++ b/drivers/md/raid5.c
1236 @@ -1689,8 +1689,11 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
1237 struct r5dev *dev = &sh->dev[i];
1238
1239 if (dev->written || i == pd_idx || i == qd_idx) {
1240 - if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
1241 + if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) {
1242 set_bit(R5_UPTODATE, &dev->flags);
1243 + if (test_bit(STRIPE_EXPAND_READY, &sh->state))
1244 + set_bit(R5_Expanded, &dev->flags);
1245 + }
1246 if (fua)
1247 set_bit(R5_WantFUA, &dev->flags);
1248 if (sync)
1249 diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
1250 index 77b2675cf8f5..92e176009ffe 100644
1251 --- a/drivers/mfd/fsl-imx25-tsadc.c
1252 +++ b/drivers/mfd/fsl-imx25-tsadc.c
1253 @@ -183,6 +183,19 @@ static int mx25_tsadc_probe(struct platform_device *pdev)
1254 return 0;
1255 }
1256
1257 +static int mx25_tsadc_remove(struct platform_device *pdev)
1258 +{
1259 + struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
1260 + int irq = platform_get_irq(pdev, 0);
1261 +
1262 + if (irq) {
1263 + irq_set_chained_handler_and_data(irq, NULL, NULL);
1264 + irq_domain_remove(tsadc->domain);
1265 + }
1266 +
1267 + return 0;
1268 +}
1269 +
1270 static const struct of_device_id mx25_tsadc_ids[] = {
1271 { .compatible = "fsl,imx25-tsadc" },
1272 { /* Sentinel */ }
1273 @@ -194,6 +207,7 @@ static struct platform_driver mx25_tsadc_driver = {
1274 .of_match_table = of_match_ptr(mx25_tsadc_ids),
1275 },
1276 .probe = mx25_tsadc_probe,
1277 + .remove = mx25_tsadc_remove,
1278 };
1279 module_platform_driver(mx25_tsadc_driver);
1280
1281 diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
1282 index 19c10dc56513..d8a485f1798b 100644
1283 --- a/drivers/misc/eeprom/at24.c
1284 +++ b/drivers/misc/eeprom/at24.c
1285 @@ -783,7 +783,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
1286 at24->nvmem_config.reg_read = at24_read;
1287 at24->nvmem_config.reg_write = at24_write;
1288 at24->nvmem_config.priv = at24;
1289 - at24->nvmem_config.stride = 4;
1290 + at24->nvmem_config.stride = 1;
1291 at24->nvmem_config.word_size = 1;
1292 at24->nvmem_config.size = chip.byte_len;
1293
1294 diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
1295 index 84e9afcb5c09..6f9535e5e584 100644
1296 --- a/drivers/mmc/host/mtk-sd.c
1297 +++ b/drivers/mmc/host/mtk-sd.c
1298 @@ -579,7 +579,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
1299 }
1300 }
1301 sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
1302 - (mode << 8) | (div % 0xff));
1303 + (mode << 8) | div);
1304 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
1305 while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
1306 cpu_relax();
1307 @@ -1562,7 +1562,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
1308 host->src_clk_freq = clk_get_rate(host->src_clk);
1309 /* Set host parameters to mmc */
1310 mmc->ops = &mt_msdc_ops;
1311 - mmc->f_min = host->src_clk_freq / (4 * 255);
1312 + mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
1313
1314 mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
1315 /* MMC core transfer sizes tunable parameters */
1316 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1317 index 5d2cf56aed0e..0b894d76aa41 100644
1318 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1319 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1320 @@ -5132,8 +5132,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
1321 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
1322 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
1323 }
1324 - link_info->support_auto_speeds =
1325 - le16_to_cpu(resp->supported_speeds_auto_mode);
1326 + if (resp->supported_speeds_auto_mode)
1327 + link_info->support_auto_speeds =
1328 + le16_to_cpu(resp->supported_speeds_auto_mode);
1329
1330 hwrm_phy_qcaps_exit:
1331 mutex_unlock(&bp->hwrm_cmd_lock);
1332 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1333 index 0975af2903ef..3480b3078775 100644
1334 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1335 +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1336 @@ -1,7 +1,7 @@
1337 /*
1338 * Broadcom GENET (Gigabit Ethernet) controller driver
1339 *
1340 - * Copyright (c) 2014 Broadcom Corporation
1341 + * Copyright (c) 2014-2017 Broadcom
1342 *
1343 * This program is free software; you can redistribute it and/or modify
1344 * it under the terms of the GNU General Public License version 2 as
1345 @@ -778,8 +778,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
1346 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
1347 /* Misc UniMAC counters */
1348 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
1349 - UMAC_RBUF_OVFL_CNT),
1350 - STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
1351 + UMAC_RBUF_OVFL_CNT_V1),
1352 + STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
1353 + UMAC_RBUF_ERR_CNT_V1),
1354 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
1355 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
1356 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
1357 @@ -821,6 +822,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
1358 }
1359 }
1360
1361 +static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
1362 +{
1363 + u16 new_offset;
1364 + u32 val;
1365 +
1366 + switch (offset) {
1367 + case UMAC_RBUF_OVFL_CNT_V1:
1368 + if (GENET_IS_V2(priv))
1369 + new_offset = RBUF_OVFL_CNT_V2;
1370 + else
1371 + new_offset = RBUF_OVFL_CNT_V3PLUS;
1372 +
1373 + val = bcmgenet_rbuf_readl(priv, new_offset);
1374 + /* clear if overflowed */
1375 + if (val == ~0)
1376 + bcmgenet_rbuf_writel(priv, 0, new_offset);
1377 + break;
1378 + case UMAC_RBUF_ERR_CNT_V1:
1379 + if (GENET_IS_V2(priv))
1380 + new_offset = RBUF_ERR_CNT_V2;
1381 + else
1382 + new_offset = RBUF_ERR_CNT_V3PLUS;
1383 +
1384 + val = bcmgenet_rbuf_readl(priv, new_offset);
1385 + /* clear if overflowed */
1386 + if (val == ~0)
1387 + bcmgenet_rbuf_writel(priv, 0, new_offset);
1388 + break;
1389 + default:
1390 + val = bcmgenet_umac_readl(priv, offset);
1391 + /* clear if overflowed */
1392 + if (val == ~0)
1393 + bcmgenet_umac_writel(priv, 0, offset);
1394 + break;
1395 + }
1396 +
1397 + return val;
1398 +}
1399 +
1400 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
1401 {
1402 int i, j = 0;
1403 @@ -836,19 +876,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
1404 case BCMGENET_STAT_NETDEV:
1405 case BCMGENET_STAT_SOFT:
1406 continue;
1407 - case BCMGENET_STAT_MIB_RX:
1408 - case BCMGENET_STAT_MIB_TX:
1409 case BCMGENET_STAT_RUNT:
1410 - if (s->type != BCMGENET_STAT_MIB_RX)
1411 - offset = BCMGENET_STAT_OFFSET;
1412 + offset += BCMGENET_STAT_OFFSET;
1413 + /* fall through */
1414 + case BCMGENET_STAT_MIB_TX:
1415 + offset += BCMGENET_STAT_OFFSET;
1416 + /* fall through */
1417 + case BCMGENET_STAT_MIB_RX:
1418 val = bcmgenet_umac_readl(priv,
1419 UMAC_MIB_START + j + offset);
1420 + offset = 0; /* Reset Offset */
1421 break;
1422 case BCMGENET_STAT_MISC:
1423 - val = bcmgenet_umac_readl(priv, s->reg_offset);
1424 - /* clear if overflowed */
1425 - if (val == ~0)
1426 - bcmgenet_umac_writel(priv, 0, s->reg_offset);
1427 + if (GENET_IS_V1(priv)) {
1428 + val = bcmgenet_umac_readl(priv, s->reg_offset);
1429 + /* clear if overflowed */
1430 + if (val == ~0)
1431 + bcmgenet_umac_writel(priv, 0,
1432 + s->reg_offset);
1433 + } else {
1434 + val = bcmgenet_update_stat_misc(priv,
1435 + s->reg_offset);
1436 + }
1437 break;
1438 }
1439
1440 @@ -2464,24 +2513,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1441 /* Interrupt bottom half */
1442 static void bcmgenet_irq_task(struct work_struct *work)
1443 {
1444 + unsigned long flags;
1445 + unsigned int status;
1446 struct bcmgenet_priv *priv = container_of(
1447 work, struct bcmgenet_priv, bcmgenet_irq_work);
1448
1449 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
1450
1451 - if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
1452 - priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
1453 + spin_lock_irqsave(&priv->lock, flags);
1454 + status = priv->irq0_stat;
1455 + priv->irq0_stat = 0;
1456 + spin_unlock_irqrestore(&priv->lock, flags);
1457 +
1458 + if (status & UMAC_IRQ_MPD_R) {
1459 netif_dbg(priv, wol, priv->dev,
1460 "magic packet detected, waking up\n");
1461 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
1462 }
1463
1464 /* Link UP/DOWN event */
1465 - if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
1466 + if (status & UMAC_IRQ_LINK_EVENT)
1467 phy_mac_interrupt(priv->phydev,
1468 - !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
1469 - priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
1470 - }
1471 + !!(status & UMAC_IRQ_LINK_UP));
1472 }
1473
1474 /* bcmgenet_isr1: handle Rx and Tx priority queues */
1475 @@ -2490,22 +2543,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
1476 struct bcmgenet_priv *priv = dev_id;
1477 struct bcmgenet_rx_ring *rx_ring;
1478 struct bcmgenet_tx_ring *tx_ring;
1479 - unsigned int index;
1480 + unsigned int index, status;
1481
1482 - /* Save irq status for bottom-half processing. */
1483 - priv->irq1_stat =
1484 - bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
1485 + /* Read irq status */
1486 + status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
1487 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1488
1489 /* clear interrupts */
1490 - bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
1491 + bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
1492
1493 netif_dbg(priv, intr, priv->dev,
1494 - "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
1495 + "%s: IRQ=0x%x\n", __func__, status);
1496
1497 /* Check Rx priority queue interrupts */
1498 for (index = 0; index < priv->hw_params->rx_queues; index++) {
1499 - if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
1500 + if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
1501 continue;
1502
1503 rx_ring = &priv->rx_rings[index];
1504 @@ -2518,7 +2570,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
1505
1506 /* Check Tx priority queue interrupts */
1507 for (index = 0; index < priv->hw_params->tx_queues; index++) {
1508 - if (!(priv->irq1_stat & BIT(index)))
1509 + if (!(status & BIT(index)))
1510 continue;
1511
1512 tx_ring = &priv->tx_rings[index];
1513 @@ -2538,19 +2590,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
1514 struct bcmgenet_priv *priv = dev_id;
1515 struct bcmgenet_rx_ring *rx_ring;
1516 struct bcmgenet_tx_ring *tx_ring;
1517 + unsigned int status;
1518 + unsigned long flags;
1519
1520 - /* Save irq status for bottom-half processing. */
1521 - priv->irq0_stat =
1522 - bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
1523 + /* Read irq status */
1524 + status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
1525 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1526
1527 /* clear interrupts */
1528 - bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1529 + bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
1530
1531 netif_dbg(priv, intr, priv->dev,
1532 - "IRQ=0x%x\n", priv->irq0_stat);
1533 + "IRQ=0x%x\n", status);
1534
1535 - if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
1536 + if (status & UMAC_IRQ_RXDMA_DONE) {
1537 rx_ring = &priv->rx_rings[DESC_INDEX];
1538
1539 if (likely(napi_schedule_prep(&rx_ring->napi))) {
1540 @@ -2559,7 +2612,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
1541 }
1542 }
1543
1544 - if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
1545 + if (status & UMAC_IRQ_TXDMA_DONE) {
1546 tx_ring = &priv->tx_rings[DESC_INDEX];
1547
1548 if (likely(napi_schedule_prep(&tx_ring->napi))) {
1549 @@ -2568,22 +2621,23 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
1550 }
1551 }
1552
1553 - if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
1554 - UMAC_IRQ_PHY_DET_F |
1555 - UMAC_IRQ_LINK_EVENT |
1556 - UMAC_IRQ_HFB_SM |
1557 - UMAC_IRQ_HFB_MM |
1558 - UMAC_IRQ_MPD_R)) {
1559 - /* all other interested interrupts handled in bottom half */
1560 - schedule_work(&priv->bcmgenet_irq_work);
1561 - }
1562 -
1563 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
1564 - priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
1565 - priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1566 + status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
1567 wake_up(&priv->wq);
1568 }
1569
1570 + /* all other interested interrupts handled in bottom half */
1571 + status &= (UMAC_IRQ_LINK_EVENT |
1572 + UMAC_IRQ_MPD_R);
1573 + if (status) {
1574 + /* Save irq status for bottom-half processing. */
1575 + spin_lock_irqsave(&priv->lock, flags);
1576 + priv->irq0_stat |= status;
1577 + spin_unlock_irqrestore(&priv->lock, flags);
1578 +
1579 + schedule_work(&priv->bcmgenet_irq_work);
1580 + }
1581 +
1582 return IRQ_HANDLED;
1583 }
1584
1585 @@ -2808,6 +2862,8 @@ static int bcmgenet_open(struct net_device *dev)
1586 err_fini_dma:
1587 bcmgenet_fini_dma(priv);
1588 err_clk_disable:
1589 + if (priv->internal_phy)
1590 + bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
1591 clk_disable_unprepare(priv->clk);
1592 return ret;
1593 }
1594 @@ -3184,6 +3240,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
1595 */
1596 gphy_rev = reg & 0xffff;
1597
1598 + /* This is reserved so should require special treatment */
1599 + if (gphy_rev == 0 || gphy_rev == 0x01ff) {
1600 + pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
1601 + return;
1602 + }
1603 +
1604 /* This is the good old scheme, just GPHY major, no minor nor patch */
1605 if ((gphy_rev & 0xf0) != 0)
1606 priv->gphy_rev = gphy_rev << 8;
1607 @@ -3192,12 +3254,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
1608 else if ((gphy_rev & 0xff00) != 0)
1609 priv->gphy_rev = gphy_rev;
1610
1611 - /* This is reserved so should require special treatment */
1612 - else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
1613 - pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
1614 - return;
1615 - }
1616 -
1617 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1618 if (!(params->flags & GENET_HAS_40BITS))
1619 pr_warn("GENET does not support 40-bits PA\n");
1620 @@ -3240,6 +3296,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
1621 const void *macaddr;
1622 struct resource *r;
1623 int err = -EIO;
1624 + const char *phy_mode_str;
1625
1626 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
1627 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
1628 @@ -3283,6 +3340,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
1629 goto err;
1630 }
1631
1632 + spin_lock_init(&priv->lock);
1633 +
1634 SET_NETDEV_DEV(dev, &pdev->dev);
1635 dev_set_drvdata(&pdev->dev, dev);
1636 ether_addr_copy(dev->dev_addr, macaddr);
1637 @@ -3345,6 +3404,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
1638 priv->clk_eee = NULL;
1639 }
1640
1641 + /* If this is an internal GPHY, power it on now, before UniMAC is
1642 + * brought out of reset as absolutely no UniMAC activity is allowed
1643 + */
1644 + if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
1645 + !strcasecmp(phy_mode_str, "internal"))
1646 + bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
1647 +
1648 err = reset_umac(priv);
1649 if (err)
1650 goto err_clk_disable;
1651 @@ -3511,6 +3577,8 @@ static int bcmgenet_resume(struct device *d)
1652 return 0;
1653
1654 out_clk_disable:
1655 + if (priv->internal_phy)
1656 + bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
1657 clk_disable_unprepare(priv->clk);
1658 return ret;
1659 }
1660 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
1661 index 1e2dc34d331a..db7f289d65ae 100644
1662 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
1663 +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
1664 @@ -1,5 +1,5 @@
1665 /*
1666 - * Copyright (c) 2014 Broadcom Corporation
1667 + * Copyright (c) 2014-2017 Broadcom
1668 *
1669 * This program is free software; you can redistribute it and/or modify
1670 * it under the terms of the GNU General Public License version 2 as
1671 @@ -214,7 +214,9 @@ struct bcmgenet_mib_counters {
1672 #define MDIO_REG_SHIFT 16
1673 #define MDIO_REG_MASK 0x1F
1674
1675 -#define UMAC_RBUF_OVFL_CNT 0x61C
1676 +#define UMAC_RBUF_OVFL_CNT_V1 0x61C
1677 +#define RBUF_OVFL_CNT_V2 0x80
1678 +#define RBUF_OVFL_CNT_V3PLUS 0x94
1679
1680 #define UMAC_MPD_CTRL 0x620
1681 #define MPD_EN (1 << 0)
1682 @@ -224,7 +226,9 @@ struct bcmgenet_mib_counters {
1683
1684 #define UMAC_MPD_PW_MS 0x624
1685 #define UMAC_MPD_PW_LS 0x628
1686 -#define UMAC_RBUF_ERR_CNT 0x634
1687 +#define UMAC_RBUF_ERR_CNT_V1 0x634
1688 +#define RBUF_ERR_CNT_V2 0x84
1689 +#define RBUF_ERR_CNT_V3PLUS 0x98
1690 #define UMAC_MDF_ERR_CNT 0x638
1691 #define UMAC_MDF_CTRL 0x650
1692 #define UMAC_MDF_ADDR 0x654
1693 @@ -619,11 +623,13 @@ struct bcmgenet_priv {
1694 struct work_struct bcmgenet_irq_work;
1695 int irq0;
1696 int irq1;
1697 - unsigned int irq0_stat;
1698 - unsigned int irq1_stat;
1699 int wol_irq;
1700 bool wol_irq_disabled;
1701
1702 + /* shared status */
1703 + spinlock_t lock;
1704 + unsigned int irq0_stat;
1705 +
1706 /* HW descriptors/checksum variables */
1707 bool desc_64b_en;
1708 bool desc_rxchk_en;
1709 diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
1710 index e36bebcab3f2..dae9dcfa8f36 100644
1711 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
1712 +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
1713 @@ -2304,6 +2304,17 @@ static int sync_toggles(struct mlx4_dev *dev)
1714 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
1715 if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
1716 /* PCI might be offline */
1717 +
1718 + /* If device removal has been requested,
1719 + * do not continue retrying.
1720 + */
1721 + if (dev->persist->interface_state &
1722 + MLX4_INTERFACE_STATE_NOWAIT) {
1723 + mlx4_warn(dev,
1724 + "communication channel is offline\n");
1725 + return -EIO;
1726 + }
1727 +
1728 msleep(100);
1729 wr_toggle = swab32(readl(&priv->mfunc.comm->
1730 slave_write));
1731 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
1732 index 727122de7df0..5411ca48978a 100644
1733 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
1734 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
1735 @@ -1940,6 +1940,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1736 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
1737 if (!offline_bit)
1738 return 0;
1739 +
1740 + /* If device removal has been requested,
1741 + * do not continue retrying.
1742 + */
1743 + if (dev->persist->interface_state &
1744 + MLX4_INTERFACE_STATE_NOWAIT)
1745 + break;
1746 +
1747 /* There are cases as part of AER/Reset flow that PF needs
1748 * around 100 msec to load. We therefore sleep for 100 msec
1749 * to allow other tasks to make use of that CPU during this
1750 @@ -3954,6 +3962,9 @@ static void mlx4_remove_one(struct pci_dev *pdev)
1751 struct devlink *devlink = priv_to_devlink(priv);
1752 int active_vfs = 0;
1753
1754 + if (mlx4_is_slave(dev))
1755 + persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
1756 +
1757 mutex_lock(&persist->interface_state_mutex);
1758 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
1759 mutex_unlock(&persist->interface_state_mutex);
1760 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1761 index 4de3c28b0547..331a6ca4856d 100644
1762 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1763 +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1764 @@ -1015,7 +1015,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
1765 u32 *match_criteria)
1766 {
1767 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1768 - struct list_head *prev = ft->node.children.prev;
1769 + struct list_head *prev = &ft->node.children;
1770 unsigned int candidate_index = 0;
1771 struct mlx5_flow_group *fg;
1772 void *match_criteria_addr;
1773 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1774 index b3309f2ed7dc..981cd1d84a5b 100644
1775 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
1776 +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1777 @@ -1283,6 +1283,7 @@ static int init_one(struct pci_dev *pdev,
1778 if (err)
1779 goto clean_load;
1780
1781 + pci_save_state(pdev);
1782 return 0;
1783
1784 clean_load:
1785 @@ -1331,9 +1332,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1786
1787 mlx5_enter_error_state(dev);
1788 mlx5_unload_one(dev, priv, false);
1789 - /* In case of kernel call save the pci state and drain the health wq */
1790 + /* In case of kernel call drain the health wq */
1791 if (state) {
1792 - pci_save_state(pdev);
1793 mlx5_drain_health_wq(dev);
1794 mlx5_pci_disable_device(dev);
1795 }
1796 @@ -1385,6 +1385,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1797
1798 pci_set_master(pdev);
1799 pci_restore_state(pdev);
1800 + pci_save_state(pdev);
1801
1802 if (wait_vital(pdev)) {
1803 dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
1804 diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
1805 index 6460c7256f2b..a01e6c0d0cd1 100644
1806 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
1807 +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
1808 @@ -788,7 +788,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
1809 #define MLXSW_REG_SPVM_ID 0x200F
1810 #define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
1811 #define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
1812 -#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
1813 +#define MLXSW_REG_SPVM_REC_MAX_COUNT 255
1814 #define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
1815 MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
1816
1817 @@ -1757,7 +1757,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
1818 #define MLXSW_REG_SPVMLR_ID 0x2020
1819 #define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
1820 #define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
1821 -#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
1822 +#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
1823 #define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
1824 MLXSW_REG_SPVMLR_REC_LEN * \
1825 MLXSW_REG_SPVMLR_REC_MAX_COUNT)
1826 diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
1827 index 0c42c240b5cf..ed014bdbbabd 100644
1828 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
1829 +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
1830 @@ -373,8 +373,9 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
1831 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
1832 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
1833 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1834 + u32 align = elems_per_page * DQ_RANGE_ALIGN;
1835
1836 - p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
1837 + p_conn->cid_count = roundup(p_conn->cid_count, align);
1838 }
1839 }
1840
1841 diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
1842 index 62ae55bd81b8..a3360cbdb30b 100644
1843 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
1844 +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
1845 @@ -187,6 +187,8 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
1846 /* If need to reuse or there's no replacement buffer, repost this */
1847 if (rc)
1848 goto out_post;
1849 + dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
1850 + cdev->ll2->rx_size, DMA_FROM_DEVICE);
1851
1852 skb = build_skb(buffer->data, 0);
1853 if (!skb) {
1854 @@ -441,7 +443,7 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
1855 static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
1856 struct qed_ll2_info *p_ll2_conn,
1857 union core_rx_cqe_union *p_cqe,
1858 - unsigned long lock_flags,
1859 + unsigned long *p_lock_flags,
1860 bool b_last_cqe)
1861 {
1862 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
1863 @@ -462,10 +464,10 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
1864 "Mismatch between active_descq and the LL2 Rx chain\n");
1865 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
1866
1867 - spin_unlock_irqrestore(&p_rx->lock, lock_flags);
1868 + spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
1869 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
1870 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
1871 - spin_lock_irqsave(&p_rx->lock, lock_flags);
1872 + spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
1873
1874 return 0;
1875 }
1876 @@ -505,7 +507,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
1877 break;
1878 case CORE_RX_CQE_TYPE_REGULAR:
1879 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
1880 - cqe, flags, b_last_cqe);
1881 + cqe, &flags,
1882 + b_last_cqe);
1883 break;
1884 default:
1885 rc = -EIO;
1886 diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
1887 index 1d85109cb8ed..3d5d5d54c103 100644
1888 --- a/drivers/net/ethernet/sfc/ef10.c
1889 +++ b/drivers/net/ethernet/sfc/ef10.c
1890 @@ -4967,7 +4967,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
1891 * MCFW do not support VFs.
1892 */
1893 rc = efx_ef10_vport_set_mac_address(efx);
1894 - } else {
1895 + } else if (rc) {
1896 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
1897 sizeof(inbuf), NULL, 0, rc);
1898 }
1899 diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
1900 index e46b1ebbbff4..7ea8ead4fd1c 100644
1901 --- a/drivers/net/fjes/fjes_main.c
1902 +++ b/drivers/net/fjes/fjes_main.c
1903 @@ -1277,7 +1277,7 @@ static void fjes_netdev_setup(struct net_device *netdev)
1904 fjes_set_ethtool_ops(netdev);
1905 netdev->mtu = fjes_support_mtu[3];
1906 netdev->flags |= IFF_BROADCAST;
1907 - netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
1908 + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1909 }
1910
1911 static void fjes_irq_watch_task(struct work_struct *work)
1912 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
1913 index dc8ccac0a01d..6d55049cd3dc 100644
1914 --- a/drivers/net/macvlan.c
1915 +++ b/drivers/net/macvlan.c
1916 @@ -452,7 +452,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
1917 struct macvlan_dev, list);
1918 else
1919 vlan = macvlan_hash_lookup(port, eth->h_dest);
1920 - if (vlan == NULL)
1921 + if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE)
1922 return RX_HANDLER_PASS;
1923
1924 dev = vlan->dev;
1925 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
1926 index 440d5f42810f..b883af93929c 100644
1927 --- a/drivers/net/ppp/ppp_generic.c
1928 +++ b/drivers/net/ppp/ppp_generic.c
1929 @@ -958,6 +958,7 @@ static __net_exit void ppp_exit_net(struct net *net)
1930 unregister_netdevice_many(&list);
1931 rtnl_unlock();
1932
1933 + mutex_destroy(&pn->all_ppp_mutex);
1934 idr_destroy(&pn->units_idr);
1935 }
1936
1937 diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
1938 index e7f5910a6519..f8eb66ef2944 100644
1939 --- a/drivers/net/wimax/i2400m/usb.c
1940 +++ b/drivers/net/wimax/i2400m/usb.c
1941 @@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface,
1942 struct i2400mu *i2400mu;
1943 struct usb_device *usb_dev = interface_to_usbdev(iface);
1944
1945 + if (iface->cur_altsetting->desc.bNumEndpoints < 4)
1946 + return -ENODEV;
1947 +
1948 if (usb_dev->speed != USB_SPEED_HIGH)
1949 dev_err(dev, "device not connected as high speed\n");
1950
1951 diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
1952 index 1fa7f844b5da..8e9480cc33e1 100644
1953 --- a/drivers/net/wireless/ath/ath9k/tx99.c
1954 +++ b/drivers/net/wireless/ath/ath9k/tx99.c
1955 @@ -179,6 +179,9 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
1956 ssize_t len;
1957 int r;
1958
1959 + if (count < 1)
1960 + return -EINVAL;
1961 +
1962 if (sc->cur_chan->nvifs > 1)
1963 return -EOPNOTSUPP;
1964
1965 @@ -186,6 +189,8 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
1966 if (copy_from_user(buf, user_buf, len))
1967 return -EFAULT;
1968
1969 + buf[len] = '\0';
1970 +
1971 if (strtobool(buf, &start))
1972 return -EINVAL;
1973
1974 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1975 index 9789f3c5a785..f1231c0ea336 100644
1976 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1977 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1978 @@ -2320,7 +2320,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
1979 {
1980 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1981
1982 - /* Called when we need to transmit (a) frame(s) from agg queue */
1983 + /* Called when we need to transmit (a) frame(s) from agg or dqa queue */
1984
1985 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
1986 tids, more_data, true);
1987 @@ -2340,7 +2340,8 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
1988 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1989 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1990
1991 - if (tid_data->state != IWL_AGG_ON &&
1992 + if (!iwl_mvm_is_dqa_supported(mvm) &&
1993 + tid_data->state != IWL_AGG_ON &&
1994 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
1995 continue;
1996
1997 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
1998 index e64aeb4a2204..bdd1deed55a4 100644
1999 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
2000 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
2001 @@ -3032,7 +3032,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
2002 struct ieee80211_sta *sta,
2003 enum ieee80211_frame_release_type reason,
2004 u16 cnt, u16 tids, bool more_data,
2005 - bool agg)
2006 + bool single_sta_queue)
2007 {
2008 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2009 struct iwl_mvm_add_sta_cmd cmd = {
2010 @@ -3052,14 +3052,14 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
2011 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
2012 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
2013
2014 - /* If we're releasing frames from aggregation queues then check if the
2015 - * all queues combined that we're releasing frames from have
2016 + /* If we're releasing frames from aggregation or dqa queues then check
2017 + * if all the queues that we're releasing frames from, combined, have:
2018 * - more frames than the service period, in which case more_data
2019 * needs to be set
2020 * - fewer than 'cnt' frames, in which case we need to adjust the
2021 * firmware command (but do that unconditionally)
2022 */
2023 - if (agg) {
2024 + if (single_sta_queue) {
2025 int remaining = cnt;
2026 int sleep_tx_count;
2027
2028 @@ -3069,7 +3069,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
2029 u16 n_queued;
2030
2031 tid_data = &mvmsta->tid_data[tid];
2032 - if (WARN(tid_data->state != IWL_AGG_ON &&
2033 + if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
2034 + tid_data->state != IWL_AGG_ON &&
2035 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
2036 "TID %d state is %d\n",
2037 tid, tid_data->state)) {
2038 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
2039 index e068d5355865..f65950e91ed5 100644
2040 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
2041 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
2042 @@ -545,7 +545,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
2043 struct ieee80211_sta *sta,
2044 enum ieee80211_frame_release_type reason,
2045 u16 cnt, u16 tids, bool more_data,
2046 - bool agg);
2047 + bool single_sta_queue);
2048 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
2049 bool drain);
2050 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
2051 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
2052 index 092ae0024f22..7465d4db136f 100644
2053 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
2054 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
2055 @@ -7,7 +7,7 @@
2056 *
2057 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
2058 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
2059 - * Copyright(c) 2016 Intel Deutschland GmbH
2060 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
2061 *
2062 * This program is free software; you can redistribute it and/or modify
2063 * it under the terms of version 2 of the GNU General Public License as
2064 @@ -34,6 +34,7 @@
2065 *
2066 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
2067 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
2068 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
2069 * All rights reserved.
2070 *
2071 * Redistribution and use in source and binary forms, with or without
2072 @@ -621,8 +622,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
2073 * values.
2074 * Note that we don't need to make sure it isn't agg'd, since we're
2075 * TXing non-sta
2076 + * For DQA mode - we shouldn't increase it though
2077 */
2078 - atomic_inc(&mvm->pending_frames[sta_id]);
2079 + if (!iwl_mvm_is_dqa_supported(mvm))
2080 + atomic_inc(&mvm->pending_frames[sta_id]);
2081
2082 return 0;
2083 }
2084 @@ -1009,11 +1012,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
2085
2086 spin_unlock(&mvmsta->lock);
2087
2088 - /* Increase pending frames count if this isn't AMPDU */
2089 - if ((iwl_mvm_is_dqa_supported(mvm) &&
2090 - mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
2091 - mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
2092 - (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
2093 + /* Increase pending frames count if this isn't AMPDU or DQA queue */
2094 + if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
2095 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
2096
2097 return 0;
2098 @@ -1083,12 +1083,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
2099 lockdep_assert_held(&mvmsta->lock);
2100
2101 if ((tid_data->state == IWL_AGG_ON ||
2102 - tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
2103 + tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
2104 + iwl_mvm_is_dqa_supported(mvm)) &&
2105 iwl_mvm_tid_queued(tid_data) == 0) {
2106 /*
2107 - * Now that this aggregation queue is empty tell mac80211 so it
2108 - * knows we no longer have frames buffered for the station on
2109 - * this TID (for the TIM bitmap calculation.)
2110 + * Now that this aggregation or DQA queue is empty tell
2111 + * mac80211 so it knows we no longer have frames buffered for
2112 + * the station on this TID (for the TIM bitmap calculation.)
2113 */
2114 ieee80211_sta_set_buffered(sta, tid, false);
2115 }
2116 @@ -1261,7 +1262,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
2117 u8 skb_freed = 0;
2118 u16 next_reclaimed, seq_ctl;
2119 bool is_ndp = false;
2120 - bool txq_agg = false; /* Is this TXQ aggregated */
2121
2122 __skb_queue_head_init(&skbs);
2123
2124 @@ -1287,6 +1287,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
2125 info->flags |= IEEE80211_TX_STAT_ACK;
2126 break;
2127 case TX_STATUS_FAIL_DEST_PS:
2128 + /* In DQA, the FW should have stopped the queue and not
2129 + * return this status
2130 + */
2131 + WARN_ON(iwl_mvm_is_dqa_supported(mvm));
2132 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2133 break;
2134 default:
2135 @@ -1391,15 +1395,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
2136 bool send_eosp_ndp = false;
2137
2138 spin_lock_bh(&mvmsta->lock);
2139 - if (iwl_mvm_is_dqa_supported(mvm)) {
2140 - enum iwl_mvm_agg_state state;
2141 -
2142 - state = mvmsta->tid_data[tid].state;
2143 - txq_agg = (state == IWL_AGG_ON ||
2144 - state == IWL_EMPTYING_HW_QUEUE_DELBA);
2145 - } else {
2146 - txq_agg = txq_id >= mvm->first_agg_queue;
2147 - }
2148
2149 if (!is_ndp) {
2150 tid_data->next_reclaimed = next_reclaimed;
2151 @@ -1456,11 +1451,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
2152 * If the txq is not an AMPDU queue, there is no chance we freed
2153 * several skbs. Check that out...
2154 */
2155 - if (txq_agg)
2156 + if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
2157 goto out;
2158
2159 /* We can't free more than one frame at once on a shared queue */
2160 - WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
2161 + WARN_ON(skb_freed > 1);
2162
2163 /* If we have still frames for this STA nothing to do here */
2164 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
2165 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2166 index fbeca065f18c..719ee5fb2626 100644
2167 --- a/drivers/nvme/host/core.c
2168 +++ b/drivers/nvme/host/core.c
2169 @@ -1619,7 +1619,8 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
2170 mutex_lock(&ctrl->namespaces_mutex);
2171 list_for_each_entry(ns, &ctrl->namespaces, list) {
2172 if (ns->ns_id == nsid) {
2173 - kref_get(&ns->kref);
2174 + if (!kref_get_unless_zero(&ns->kref))
2175 + continue;
2176 ret = ns;
2177 break;
2178 }
2179 diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
2180 index fbd6d487103f..c89d68a76f3d 100644
2181 --- a/drivers/nvme/target/core.c
2182 +++ b/drivers/nvme/target/core.c
2183 @@ -422,6 +422,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
2184 ctrl->sqs[qid] = sq;
2185 }
2186
2187 +static void nvmet_confirm_sq(struct percpu_ref *ref)
2188 +{
2189 + struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
2190 +
2191 + complete(&sq->confirm_done);
2192 +}
2193 +
2194 void nvmet_sq_destroy(struct nvmet_sq *sq)
2195 {
2196 /*
2197 @@ -430,7 +437,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
2198 */
2199 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
2200 nvmet_async_events_free(sq->ctrl);
2201 - percpu_ref_kill(&sq->ref);
2202 + percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
2203 + wait_for_completion(&sq->confirm_done);
2204 wait_for_completion(&sq->free_done);
2205 percpu_ref_exit(&sq->ref);
2206
2207 @@ -458,6 +466,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
2208 return ret;
2209 }
2210 init_completion(&sq->free_done);
2211 + init_completion(&sq->confirm_done);
2212
2213 return 0;
2214 }
2215 diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
2216 index d5df77d686b2..c8e612c1c72f 100644
2217 --- a/drivers/nvme/target/loop.c
2218 +++ b/drivers/nvme/target/loop.c
2219 @@ -288,9 +288,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = {
2220
2221 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
2222 {
2223 + nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
2224 blk_cleanup_queue(ctrl->ctrl.admin_q);
2225 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2226 - nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
2227 }
2228
2229 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
2230 diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
2231 index 7655a351320f..26b87dc843d2 100644
2232 --- a/drivers/nvme/target/nvmet.h
2233 +++ b/drivers/nvme/target/nvmet.h
2234 @@ -73,6 +73,7 @@ struct nvmet_sq {
2235 u16 qid;
2236 u16 size;
2237 struct completion free_done;
2238 + struct completion confirm_done;
2239 };
2240
2241 /**
2242 diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
2243 index ca8ddc3fb19e..53bd32550867 100644
2244 --- a/drivers/nvme/target/rdma.c
2245 +++ b/drivers/nvme/target/rdma.c
2246 @@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
2247 {
2248 u16 status;
2249
2250 - cmd->queue = queue;
2251 - cmd->n_rdma = 0;
2252 - cmd->req.port = queue->port;
2253 -
2254 -
2255 ib_dma_sync_single_for_cpu(queue->dev->device,
2256 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
2257 DMA_FROM_DEVICE);
2258 @@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2259
2260 cmd->queue = queue;
2261 rsp = nvmet_rdma_get_rsp(queue);
2262 + rsp->queue = queue;
2263 rsp->cmd = cmd;
2264 rsp->flags = 0;
2265 rsp->req.cmd = cmd->nvme_cmd;
2266 + rsp->req.port = queue->port;
2267 + rsp->n_rdma = 0;
2268
2269 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
2270 unsigned long flags;
2271 diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
2272 index 4b703492376a..00f61225386c 100644
2273 --- a/drivers/pci/pcie/pme.c
2274 +++ b/drivers/pci/pcie/pme.c
2275 @@ -232,6 +232,9 @@ static void pcie_pme_work_fn(struct work_struct *work)
2276 break;
2277
2278 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
2279 + if (rtsta == (u32) ~0)
2280 + break;
2281 +
2282 if (rtsta & PCI_EXP_RTSTA_PME) {
2283 /*
2284 * Clear PME status of the port. If there are other
2285 @@ -279,7 +282,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
2286 spin_lock_irqsave(&data->lock, flags);
2287 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
2288
2289 - if (!(rtsta & PCI_EXP_RTSTA_PME)) {
2290 + if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) {
2291 spin_unlock_irqrestore(&data->lock, flags);
2292 return IRQ_NONE;
2293 }
2294 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
2295 index 60bada90cd75..a98be6db7e93 100644
2296 --- a/drivers/pci/probe.c
2297 +++ b/drivers/pci/probe.c
2298 @@ -932,7 +932,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
2299 child = pci_add_new_bus(bus, dev, max+1);
2300 if (!child)
2301 goto out;
2302 - pci_bus_insert_busn_res(child, max+1, 0xff);
2303 + pci_bus_insert_busn_res(child, max+1,
2304 + bus->busn_res.end);
2305 }
2306 max++;
2307 buses = (buses & 0xff000000)
2308 @@ -2136,6 +2137,10 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
2309 if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
2310 if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
2311 max = bus->busn_res.start + pci_hotplug_bus_size - 1;
2312 +
2313 + /* Do not allocate more buses than we have room left */
2314 + if (max > bus->busn_res.end)
2315 + max = bus->busn_res.end;
2316 }
2317
2318 /*
2319 diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
2320 index f9357e09e9b3..b6b9b5b74e30 100644
2321 --- a/drivers/pci/remove.c
2322 +++ b/drivers/pci/remove.c
2323 @@ -19,9 +19,9 @@ static void pci_stop_dev(struct pci_dev *dev)
2324 pci_pme_active(dev, false);
2325
2326 if (dev->is_added) {
2327 + device_release_driver(&dev->dev);
2328 pci_proc_detach_device(dev);
2329 pci_remove_sysfs_dev_files(dev);
2330 - device_release_driver(&dev->dev);
2331 dev->is_added = 0;
2332 }
2333
2334 diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
2335 index 671610c989b6..b0c0fa0444dd 100644
2336 --- a/drivers/pinctrl/Kconfig
2337 +++ b/drivers/pinctrl/Kconfig
2338 @@ -26,7 +26,8 @@ config DEBUG_PINCTRL
2339
2340 config PINCTRL_ADI2
2341 bool "ADI pin controller driver"
2342 - depends on BLACKFIN
2343 + depends on (BF54x || BF60x)
2344 + depends on !GPIO_ADI
2345 select PINMUX
2346 select IRQ_DOMAIN
2347 help
2348 diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
2349 index 09356684c32f..abd9d83f6009 100644
2350 --- a/drivers/platform/x86/hp_accel.c
2351 +++ b/drivers/platform/x86/hp_accel.c
2352 @@ -240,6 +240,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
2353 AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
2354 AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
2355 AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
2356 + AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted),
2357 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
2358 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
2359 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
2360 diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
2361 index a47a41fc10ad..b5b890127479 100644
2362 --- a/drivers/platform/x86/intel_punit_ipc.c
2363 +++ b/drivers/platform/x86/intel_punit_ipc.c
2364 @@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev)
2365 * - GTDRIVER_IPC BASE_IFACE
2366 */
2367 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
2368 - if (res) {
2369 + if (res && resource_size(res) > 1) {
2370 addr = devm_ioremap_resource(&pdev->dev, res);
2371 if (!IS_ERR(addr))
2372 punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
2373 }
2374
2375 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
2376 - if (res) {
2377 + if (res && resource_size(res) > 1) {
2378 addr = devm_ioremap_resource(&pdev->dev, res);
2379 if (!IS_ERR(addr))
2380 punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
2381 }
2382
2383 res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
2384 - if (res) {
2385 + if (res && resource_size(res) > 1) {
2386 addr = devm_ioremap_resource(&pdev->dev, res);
2387 if (!IS_ERR(addr))
2388 punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
2389 }
2390
2391 res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
2392 - if (res) {
2393 + if (res && resource_size(res) > 1) {
2394 addr = devm_ioremap_resource(&pdev->dev, res);
2395 if (!IS_ERR(addr))
2396 punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
2397 diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
2398 index 1227ceab61ee..a4b8b603c807 100644
2399 --- a/drivers/rtc/rtc-pcf8563.c
2400 +++ b/drivers/rtc/rtc-pcf8563.c
2401 @@ -422,7 +422,7 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw,
2402 return 0;
2403
2404 buf &= PCF8563_REG_CLKO_F_MASK;
2405 - return clkout_rates[ret];
2406 + return clkout_rates[buf];
2407 }
2408
2409 static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
2410 diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
2411 index 8dcd8c70c7ee..05f523971348 100644
2412 --- a/drivers/scsi/bfa/bfad_debugfs.c
2413 +++ b/drivers/scsi/bfa/bfad_debugfs.c
2414 @@ -255,7 +255,8 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
2415 struct bfad_s *bfad = port->bfad;
2416 struct bfa_s *bfa = &bfad->bfa;
2417 struct bfa_ioc_s *ioc = &bfa->ioc;
2418 - int addr, len, rc, i;
2419 + int addr, rc, i;
2420 + u32 len;
2421 u32 *regbuf;
2422 void __iomem *rb, *reg_addr;
2423 unsigned long flags;
2424 @@ -266,7 +267,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
2425 return PTR_ERR(kern_buf);
2426
2427 rc = sscanf(kern_buf, "%x:%x", &addr, &len);
2428 - if (rc < 2) {
2429 + if (rc < 2 || len > (UINT_MAX >> 2)) {
2430 printk(KERN_INFO
2431 "bfad[%d]: %s failed to read user buf\n",
2432 bfad->inst_no, __func__);
2433 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
2434 index a1d6ab76a514..99623701fc3d 100644
2435 --- a/drivers/scsi/hpsa.c
2436 +++ b/drivers/scsi/hpsa.c
2437 @@ -2951,7 +2951,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2438 /* fill_cmd can't fail here, no data buffer to map. */
2439 (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
2440 scsi3addr, TYPE_MSG);
2441 - rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
2442 + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2443 if (rc) {
2444 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2445 goto out;
2446 @@ -3686,7 +3686,7 @@ static int hpsa_get_volume_status(struct ctlr_info *h,
2447 * # (integer code indicating one of several NOT READY states
2448 * describing why a volume is to be kept offline)
2449 */
2450 -static int hpsa_volume_offline(struct ctlr_info *h,
2451 +static unsigned char hpsa_volume_offline(struct ctlr_info *h,
2452 unsigned char scsi3addr[])
2453 {
2454 struct CommandList *c;
2455 @@ -3707,7 +3707,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
2456 DEFAULT_TIMEOUT);
2457 if (rc) {
2458 cmd_free(h, c);
2459 - return 0;
2460 + return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2461 }
2462 sense = c->err_info->SenseInfo;
2463 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
2464 @@ -3718,19 +3718,13 @@ static int hpsa_volume_offline(struct ctlr_info *h,
2465 cmd_status = c->err_info->CommandStatus;
2466 scsi_status = c->err_info->ScsiStatus;
2467 cmd_free(h, c);
2468 - /* Is the volume 'not ready'? */
2469 - if (cmd_status != CMD_TARGET_STATUS ||
2470 - scsi_status != SAM_STAT_CHECK_CONDITION ||
2471 - sense_key != NOT_READY ||
2472 - asc != ASC_LUN_NOT_READY) {
2473 - return 0;
2474 - }
2475
2476 /* Determine the reason for not ready state */
2477 ldstat = hpsa_get_volume_status(h, scsi3addr);
2478
2479 /* Keep volume offline in certain cases: */
2480 switch (ldstat) {
2481 + case HPSA_LV_FAILED:
2482 case HPSA_LV_UNDERGOING_ERASE:
2483 case HPSA_LV_NOT_AVAILABLE:
2484 case HPSA_LV_UNDERGOING_RPI:
2485 @@ -3752,7 +3746,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
2486 default:
2487 break;
2488 }
2489 - return 0;
2490 + return HPSA_LV_OK;
2491 }
2492
2493 /*
2494 @@ -3825,10 +3819,10 @@ static int hpsa_update_device_info(struct ctlr_info *h,
2495 /* Do an inquiry to the device to see what it is. */
2496 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2497 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2498 - /* Inquiry failed (msg printed already) */
2499 dev_err(&h->pdev->dev,
2500 - "hpsa_update_device_info: inquiry failed\n");
2501 - rc = -EIO;
2502 + "%s: inquiry failed, device will be skipped.\n",
2503 + __func__);
2504 + rc = HPSA_INQUIRY_FAILED;
2505 goto bail_out;
2506 }
2507
2508 @@ -3857,15 +3851,19 @@ static int hpsa_update_device_info(struct ctlr_info *h,
2509 if ((this_device->devtype == TYPE_DISK ||
2510 this_device->devtype == TYPE_ZBC) &&
2511 is_logical_dev_addr_mode(scsi3addr)) {
2512 - int volume_offline;
2513 + unsigned char volume_offline;
2514
2515 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
2516 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2517 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2518 volume_offline = hpsa_volume_offline(h, scsi3addr);
2519 - if (volume_offline < 0 || volume_offline > 0xff)
2520 - volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2521 - this_device->volume_offline = volume_offline & 0xff;
2522 + if (volume_offline == HPSA_LV_FAILED) {
2523 + rc = HPSA_LV_FAILED;
2524 + dev_err(&h->pdev->dev,
2525 + "%s: LV failed, device will be skipped.\n",
2526 + __func__);
2527 + goto bail_out;
2528 + }
2529 } else {
2530 this_device->raid_level = RAID_UNKNOWN;
2531 this_device->offload_config = 0;
2532 @@ -4353,8 +4351,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
2533 goto out;
2534 }
2535 if (rc) {
2536 - dev_warn(&h->pdev->dev,
2537 - "Inquiry failed, skipping device.\n");
2538 + h->drv_req_rescan = 1;
2539 continue;
2540 }
2541
2542 @@ -5532,7 +5529,7 @@ static void hpsa_scan_complete(struct ctlr_info *h)
2543
2544 spin_lock_irqsave(&h->scan_lock, flags);
2545 h->scan_finished = 1;
2546 - wake_up_all(&h->scan_wait_queue);
2547 + wake_up(&h->scan_wait_queue);
2548 spin_unlock_irqrestore(&h->scan_lock, flags);
2549 }
2550
2551 @@ -5550,11 +5547,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
2552 if (unlikely(lockup_detected(h)))
2553 return hpsa_scan_complete(h);
2554
2555 + /*
2556 + * If a scan is already waiting to run, no need to add another
2557 + */
2558 + spin_lock_irqsave(&h->scan_lock, flags);
2559 + if (h->scan_waiting) {
2560 + spin_unlock_irqrestore(&h->scan_lock, flags);
2561 + return;
2562 + }
2563 +
2564 + spin_unlock_irqrestore(&h->scan_lock, flags);
2565 +
2566 /* wait until any scan already in progress is finished. */
2567 while (1) {
2568 spin_lock_irqsave(&h->scan_lock, flags);
2569 if (h->scan_finished)
2570 break;
2571 + h->scan_waiting = 1;
2572 spin_unlock_irqrestore(&h->scan_lock, flags);
2573 wait_event(h->scan_wait_queue, h->scan_finished);
2574 /* Note: We don't need to worry about a race between this
2575 @@ -5564,6 +5573,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
2576 */
2577 }
2578 h->scan_finished = 0; /* mark scan as in progress */
2579 + h->scan_waiting = 0;
2580 spin_unlock_irqrestore(&h->scan_lock, flags);
2581
2582 if (unlikely(lockup_detected(h)))
2583 @@ -8802,6 +8812,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2584 init_waitqueue_head(&h->event_sync_wait_queue);
2585 mutex_init(&h->reset_mutex);
2586 h->scan_finished = 1; /* no scan currently in progress */
2587 + h->scan_waiting = 0;
2588
2589 pci_set_drvdata(pdev, h);
2590 h->ndevices = 0;
2591 @@ -9094,6 +9105,8 @@ static void hpsa_remove_one(struct pci_dev *pdev)
2592 destroy_workqueue(h->rescan_ctlr_wq);
2593 destroy_workqueue(h->resubmit_wq);
2594
2595 + hpsa_delete_sas_host(h);
2596 +
2597 /*
2598 * Call before disabling interrupts.
2599 * scsi_remove_host can trigger I/O operations especially
2600 @@ -9128,8 +9141,6 @@ static void hpsa_remove_one(struct pci_dev *pdev)
2601 h->lockup_detected = NULL; /* init_one 2 */
2602 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
2603
2604 - hpsa_delete_sas_host(h);
2605 -
2606 kfree(h); /* init_one 1 */
2607 }
2608
2609 @@ -9621,9 +9632,9 @@ static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
2610 struct sas_phy *phy = hpsa_sas_phy->phy;
2611
2612 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
2613 - sas_phy_free(phy);
2614 if (hpsa_sas_phy->added_to_port)
2615 list_del(&hpsa_sas_phy->phy_list_entry);
2616 + sas_phy_delete(phy);
2617 kfree(hpsa_sas_phy);
2618 }
2619
2620 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
2621 index 9ea162de80dc..e16f2945f6ac 100644
2622 --- a/drivers/scsi/hpsa.h
2623 +++ b/drivers/scsi/hpsa.h
2624 @@ -203,6 +203,7 @@ struct ctlr_info {
2625 dma_addr_t errinfo_pool_dhandle;
2626 unsigned long *cmd_pool_bits;
2627 int scan_finished;
2628 + u8 scan_waiting : 1;
2629 spinlock_t scan_lock;
2630 wait_queue_head_t scan_wait_queue;
2631
2632 diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
2633 index a584cdf07058..5961705eef76 100644
2634 --- a/drivers/scsi/hpsa_cmd.h
2635 +++ b/drivers/scsi/hpsa_cmd.h
2636 @@ -156,6 +156,7 @@
2637 #define CFGTBL_BusType_Fibre2G 0x00000200l
2638
2639 /* VPD Inquiry types */
2640 +#define HPSA_INQUIRY_FAILED 0x02
2641 #define HPSA_VPD_SUPPORTED_PAGES 0x00
2642 #define HPSA_VPD_LV_DEVICE_ID 0x83
2643 #define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
2644 @@ -166,6 +167,7 @@
2645 /* Logical volume states */
2646 #define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
2647 #define HPSA_LV_OK 0x0
2648 +#define HPSA_LV_FAILED 0x01
2649 #define HPSA_LV_NOT_AVAILABLE 0x0b
2650 #define HPSA_LV_UNDERGOING_ERASE 0x0F
2651 #define HPSA_LV_UNDERGOING_RPI 0x12
2652 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
2653 index cf04a364fd8b..2b0e61557317 100644
2654 --- a/drivers/scsi/scsi_debug.c
2655 +++ b/drivers/scsi/scsi_debug.c
2656 @@ -2996,11 +2996,11 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
2657 if (-1 == ret) {
2658 write_unlock_irqrestore(&atomic_rw, iflags);
2659 return DID_ERROR << 16;
2660 - } else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
2661 + } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
2662 sdev_printk(KERN_INFO, scp->device,
2663 - "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2664 + "%s: %s: lb size=%u, IO sent=%d bytes\n",
2665 my_name, "write same",
2666 - num * sdebug_sector_size, ret);
2667 + sdebug_sector_size, ret);
2668
2669 /* Copy first sector to remaining blocks */
2670 for (i = 1 ; i < num ; i++)
2671 diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
2672 index 246456925335..26e6b05d05fc 100644
2673 --- a/drivers/scsi/scsi_devinfo.c
2674 +++ b/drivers/scsi/scsi_devinfo.c
2675 @@ -160,7 +160,7 @@ static struct {
2676 {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
2677 {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
2678 {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
2679 - {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN},
2680 + {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
2681 {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
2682 {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
2683 {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
2684 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2685 index 09fa1fd0c4ce..ace56c5e61e1 100644
2686 --- a/drivers/scsi/sd.c
2687 +++ b/drivers/scsi/sd.c
2688 @@ -234,11 +234,15 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr,
2689 {
2690 struct scsi_disk *sdkp = to_scsi_disk(dev);
2691 struct scsi_device *sdp = sdkp->device;
2692 + bool v;
2693
2694 if (!capable(CAP_SYS_ADMIN))
2695 return -EACCES;
2696
2697 - sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
2698 + if (kstrtobool(buf, &v))
2699 + return -EINVAL;
2700 +
2701 + sdp->manage_start_stop = v;
2702
2703 return count;
2704 }
2705 @@ -256,6 +260,7 @@ static ssize_t
2706 allow_restart_store(struct device *dev, struct device_attribute *attr,
2707 const char *buf, size_t count)
2708 {
2709 + bool v;
2710 struct scsi_disk *sdkp = to_scsi_disk(dev);
2711 struct scsi_device *sdp = sdkp->device;
2712
2713 @@ -265,7 +270,10 @@ allow_restart_store(struct device *dev, struct device_attribute *attr,
2714 if (sdp->type != TYPE_DISK)
2715 return -EINVAL;
2716
2717 - sdp->allow_restart = simple_strtoul(buf, NULL, 10);
2718 + if (kstrtobool(buf, &v))
2719 + return -EINVAL;
2720 +
2721 + sdp->allow_restart = v;
2722
2723 return count;
2724 }
2725 diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
2726 index a5f10936fb9c..e929f5142862 100644
2727 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c
2728 +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
2729 @@ -522,7 +522,7 @@ struct pmic_wrapper_type {
2730 u32 int_en_all;
2731 u32 spi_w;
2732 u32 wdt_src;
2733 - int has_bridge:1;
2734 + unsigned int has_bridge:1;
2735 int (*init_reg_clock)(struct pmic_wrapper *wrp);
2736 int (*init_soc_specific)(struct pmic_wrapper *wrp);
2737 };
2738 diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
2739 index f1f4788dbd86..6051a7ba0797 100644
2740 --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
2741 +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
2742 @@ -342,7 +342,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
2743 else
2744 RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
2745
2746 - pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
2747 + pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
2748 if (!pcmd) {
2749 res = _FAIL;
2750 goto exit;
2751 @@ -522,7 +522,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
2752
2753 if (enqueue) {
2754 /* need enqueue, prepare cmd_obj and enqueue */
2755 - cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
2756 + cmdobj = kzalloc(sizeof(*cmdobj), GFP_ATOMIC);
2757 if (!cmdobj) {
2758 res = _FAIL;
2759 kfree(param);
2760 diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
2761 index f109eeac358d..ab96629b7889 100644
2762 --- a/drivers/staging/vt6655/device_main.c
2763 +++ b/drivers/staging/vt6655/device_main.c
2764 @@ -1698,10 +1698,11 @@ static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state)
2765 MACbShutdown(priv);
2766
2767 pci_disable_device(pcid);
2768 - pci_set_power_state(pcid, pci_choose_state(pcid, state));
2769
2770 spin_unlock_irqrestore(&priv->lock, flags);
2771
2772 + pci_set_power_state(pcid, pci_choose_state(pcid, state));
2773 +
2774 return 0;
2775 }
2776
2777 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
2778 index 0d578297d9f9..72e926d9868f 100644
2779 --- a/drivers/target/iscsi/iscsi_target.c
2780 +++ b/drivers/target/iscsi/iscsi_target.c
2781 @@ -841,6 +841,7 @@ static int iscsit_add_reject_from_cmd(
2782 unsigned char *buf)
2783 {
2784 struct iscsi_conn *conn;
2785 + const bool do_put = cmd->se_cmd.se_tfo != NULL;
2786
2787 if (!cmd->conn) {
2788 pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
2789 @@ -871,7 +872,7 @@ static int iscsit_add_reject_from_cmd(
2790 * Perform the kref_put now if se_cmd has already been setup by
2791 * scsit_setup_scsi_cmd()
2792 */
2793 - if (cmd->se_cmd.se_tfo != NULL) {
2794 + if (do_put) {
2795 pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
2796 target_put_sess_cmd(&cmd->se_cmd);
2797 }
2798 diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
2799 index 9cbbc9cf63fb..8a4bc15bc3f5 100644
2800 --- a/drivers/target/iscsi/iscsi_target_configfs.c
2801 +++ b/drivers/target/iscsi/iscsi_target_configfs.c
2802 @@ -1144,7 +1144,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
2803
2804 ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
2805 if (ret < 0)
2806 - return NULL;
2807 + goto free_out;
2808
2809 ret = iscsit_tpg_add_portal_group(tiqn, tpg);
2810 if (ret != 0)
2811 @@ -1156,6 +1156,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
2812 return &tpg->tpg_se_tpg;
2813 out:
2814 core_tpg_deregister(&tpg->tpg_se_tpg);
2815 +free_out:
2816 kfree(tpg);
2817 return NULL;
2818 }
2819 diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
2820 index 4c82bbe19003..ee5b29aed54b 100644
2821 --- a/drivers/target/target_core_alua.c
2822 +++ b/drivers/target/target_core_alua.c
2823 @@ -1010,7 +1010,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
2824 static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
2825 {
2826 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
2827 - struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
2828 + struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
2829 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
2830 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
2831 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
2832 @@ -1073,17 +1073,8 @@ static int core_alua_do_transition_tg_pt(
2833 /*
2834 * Flush any pending transitions
2835 */
2836 - if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
2837 - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
2838 - ALUA_ACCESS_STATE_TRANSITION) {
2839 - /* Just in case */
2840 - tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
2841 - tg_pt_gp->tg_pt_gp_transition_complete = &wait;
2842 - flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
2843 - wait_for_completion(&wait);
2844 - tg_pt_gp->tg_pt_gp_transition_complete = NULL;
2845 - return 0;
2846 - }
2847 + if (!explicit)
2848 + flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
2849
2850 /*
2851 * Save the old primary ALUA access state, and set the current state
2852 @@ -1114,17 +1105,9 @@ static int core_alua_do_transition_tg_pt(
2853 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
2854 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
2855
2856 - if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
2857 - unsigned long transition_tmo;
2858 -
2859 - transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
2860 - queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
2861 - &tg_pt_gp->tg_pt_gp_transition_work,
2862 - transition_tmo);
2863 - } else {
2864 + schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
2865 + if (explicit) {
2866 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
2867 - queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
2868 - &tg_pt_gp->tg_pt_gp_transition_work, 0);
2869 wait_for_completion(&wait);
2870 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
2871 }
2872 @@ -1692,8 +1675,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
2873 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
2874 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
2875 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
2876 - INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
2877 - core_alua_do_transition_tg_pt_work);
2878 + INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
2879 + core_alua_do_transition_tg_pt_work);
2880 tg_pt_gp->tg_pt_gp_dev = dev;
2881 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
2882 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
2883 @@ -1801,7 +1784,7 @@ void core_alua_free_tg_pt_gp(
2884 dev->t10_alua.alua_tg_pt_gps_counter--;
2885 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
2886
2887 - flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
2888 + flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
2889
2890 /*
2891 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
2892 diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
2893 index 29f807b29e74..97928b42ad62 100644
2894 --- a/drivers/target/target_core_file.c
2895 +++ b/drivers/target/target_core_file.c
2896 @@ -466,6 +466,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
2897 struct inode *inode = file->f_mapping->host;
2898 int ret;
2899
2900 + if (!nolb) {
2901 + return 0;
2902 + }
2903 +
2904 if (cmd->se_dev->dev_attrib.pi_prot_type) {
2905 ret = fd_do_prot_unmap(cmd, lba, nolb);
2906 if (ret)
2907 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
2908 index 47463c99c318..df20921c233c 100644
2909 --- a/drivers/target/target_core_pr.c
2910 +++ b/drivers/target/target_core_pr.c
2911 @@ -56,8 +56,10 @@ void core_pr_dump_initiator_port(
2912 char *buf,
2913 u32 size)
2914 {
2915 - if (!pr_reg->isid_present_at_reg)
2916 + if (!pr_reg->isid_present_at_reg) {
2917 buf[0] = '\0';
2918 + return;
2919 + }
2920
2921 snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
2922 }
2923 diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
2924 index bcef2e7c4ec9..1eea63caa451 100644
2925 --- a/drivers/thermal/step_wise.c
2926 +++ b/drivers/thermal/step_wise.c
2927 @@ -31,8 +31,7 @@
2928 * If the temperature is higher than a trip point,
2929 * a. if the trend is THERMAL_TREND_RAISING, use higher cooling
2930 * state for this trip point
2931 - * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
2932 - * state for this trip point
2933 + * b. if the trend is THERMAL_TREND_DROPPING, do nothing
2934 * c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit
2935 * for this trip point
2936 * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
2937 @@ -94,9 +93,11 @@ static unsigned long get_target_state(struct thermal_instance *instance,
2938 if (!throttle)
2939 next_target = THERMAL_NO_TARGET;
2940 } else {
2941 - next_target = cur_state - 1;
2942 - if (next_target > instance->upper)
2943 - next_target = instance->upper;
2944 + if (!throttle) {
2945 + next_target = cur_state - 1;
2946 + if (next_target > instance->upper)
2947 + next_target = instance->upper;
2948 + }
2949 }
2950 break;
2951 case THERMAL_TREND_DROP_FULL:
2952 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
2953 index 68947f6de5ad..b0500a0a87b8 100644
2954 --- a/drivers/tty/tty_ldisc.c
2955 +++ b/drivers/tty/tty_ldisc.c
2956 @@ -271,10 +271,13 @@ const struct file_operations tty_ldiscs_proc_fops = {
2957
2958 struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
2959 {
2960 + struct tty_ldisc *ld;
2961 +
2962 ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT);
2963 - if (!tty->ldisc)
2964 + ld = tty->ldisc;
2965 + if (!ld)
2966 ldsem_up_read(&tty->ldisc_sem);
2967 - return tty->ldisc;
2968 + return ld;
2969 }
2970 EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
2971
2972 @@ -488,41 +491,6 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
2973 tty_ldisc_debug(tty, "%p: closed\n", ld);
2974 }
2975
2976 -/**
2977 - * tty_ldisc_restore - helper for tty ldisc change
2978 - * @tty: tty to recover
2979 - * @old: previous ldisc
2980 - *
2981 - * Restore the previous line discipline or N_TTY when a line discipline
2982 - * change fails due to an open error
2983 - */
2984 -
2985 -static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
2986 -{
2987 - struct tty_ldisc *new_ldisc;
2988 - int r;
2989 -
2990 - /* There is an outstanding reference here so this is safe */
2991 - old = tty_ldisc_get(tty, old->ops->num);
2992 - WARN_ON(IS_ERR(old));
2993 - tty->ldisc = old;
2994 - tty_set_termios_ldisc(tty, old->ops->num);
2995 - if (tty_ldisc_open(tty, old) < 0) {
2996 - tty_ldisc_put(old);
2997 - /* This driver is always present */
2998 - new_ldisc = tty_ldisc_get(tty, N_TTY);
2999 - if (IS_ERR(new_ldisc))
3000 - panic("n_tty: get");
3001 - tty->ldisc = new_ldisc;
3002 - tty_set_termios_ldisc(tty, N_TTY);
3003 - r = tty_ldisc_open(tty, new_ldisc);
3004 - if (r < 0)
3005 - panic("Couldn't open N_TTY ldisc for "
3006 - "%s --- error %d.",
3007 - tty_name(tty), r);
3008 - }
3009 -}
3010 -
3011 /**
3012 * tty_set_ldisc - set line discipline
3013 * @tty: the terminal to set
3014 @@ -536,12 +504,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
3015
3016 int tty_set_ldisc(struct tty_struct *tty, int disc)
3017 {
3018 - int retval;
3019 - struct tty_ldisc *old_ldisc, *new_ldisc;
3020 -
3021 - new_ldisc = tty_ldisc_get(tty, disc);
3022 - if (IS_ERR(new_ldisc))
3023 - return PTR_ERR(new_ldisc);
3024 + int retval, old_disc;
3025
3026 tty_lock(tty);
3027 retval = tty_ldisc_lock(tty, 5 * HZ);
3028 @@ -554,7 +517,8 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
3029 }
3030
3031 /* Check the no-op case */
3032 - if (tty->ldisc->ops->num == disc)
3033 + old_disc = tty->ldisc->ops->num;
3034 + if (old_disc == disc)
3035 goto out;
3036
3037 if (test_bit(TTY_HUPPED, &tty->flags)) {
3038 @@ -563,34 +527,25 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
3039 goto out;
3040 }
3041
3042 - old_ldisc = tty->ldisc;
3043 -
3044 - /* Shutdown the old discipline. */
3045 - tty_ldisc_close(tty, old_ldisc);
3046 -
3047 - /* Now set up the new line discipline. */
3048 - tty->ldisc = new_ldisc;
3049 - tty_set_termios_ldisc(tty, disc);
3050 -
3051 - retval = tty_ldisc_open(tty, new_ldisc);
3052 + retval = tty_ldisc_reinit(tty, disc);
3053 if (retval < 0) {
3054 /* Back to the old one or N_TTY if we can't */
3055 - tty_ldisc_put(new_ldisc);
3056 - tty_ldisc_restore(tty, old_ldisc);
3057 + if (tty_ldisc_reinit(tty, old_disc) < 0) {
3058 + pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n");
3059 + if (tty_ldisc_reinit(tty, N_TTY) < 0) {
3060 + /* At this point we have tty->ldisc == NULL. */
3061 + pr_err("tty: reinitializing N_TTY failed\n");
3062 + }
3063 + }
3064 }
3065
3066 - if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
3067 + if (tty->ldisc && tty->ldisc->ops->num != old_disc &&
3068 + tty->ops->set_ldisc) {
3069 down_read(&tty->termios_rwsem);
3070 tty->ops->set_ldisc(tty);
3071 up_read(&tty->termios_rwsem);
3072 }
3073
3074 - /* At this point we hold a reference to the new ldisc and a
3075 - reference to the old ldisc, or we hold two references to
3076 - the old ldisc (if it was restored as part of error cleanup
3077 - above). In either case, releasing a single reference from
3078 - the old ldisc is correct. */
3079 - new_ldisc = old_ldisc;
3080 out:
3081 tty_ldisc_unlock(tty);
3082
3083 @@ -598,7 +553,6 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
3084 already running */
3085 tty_buffer_restart_work(tty->port);
3086 err:
3087 - tty_ldisc_put(new_ldisc); /* drop the extra reference */
3088 tty_unlock(tty);
3089 return retval;
3090 }
3091 @@ -659,10 +613,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
3092 int retval;
3093
3094 ld = tty_ldisc_get(tty, disc);
3095 - if (IS_ERR(ld)) {
3096 - BUG_ON(disc == N_TTY);
3097 + if (IS_ERR(ld))
3098 return PTR_ERR(ld);
3099 - }
3100
3101 if (tty->ldisc) {
3102 tty_ldisc_close(tty, tty->ldisc);
3103 @@ -674,10 +626,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
3104 tty_set_termios_ldisc(tty, disc);
3105 retval = tty_ldisc_open(tty, tty->ldisc);
3106 if (retval) {
3107 - if (!WARN_ON(disc == N_TTY)) {
3108 - tty_ldisc_put(tty->ldisc);
3109 - tty->ldisc = NULL;
3110 - }
3111 + tty_ldisc_put(tty->ldisc);
3112 + tty->ldisc = NULL;
3113 }
3114 return retval;
3115 }
3116 diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
3117 index 5ebe04d3598b..ba9b29bc441f 100644
3118 --- a/drivers/usb/core/config.c
3119 +++ b/drivers/usb/core/config.c
3120 @@ -550,6 +550,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
3121 unsigned iad_num = 0;
3122
3123 memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
3124 + nintf = nintf_orig = config->desc.bNumInterfaces;
3125 + config->desc.bNumInterfaces = 0; // Adjusted later
3126 +
3127 if (config->desc.bDescriptorType != USB_DT_CONFIG ||
3128 config->desc.bLength < USB_DT_CONFIG_SIZE ||
3129 config->desc.bLength > size) {
3130 @@ -563,7 +566,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
3131 buffer += config->desc.bLength;
3132 size -= config->desc.bLength;
3133
3134 - nintf = nintf_orig = config->desc.bNumInterfaces;
3135 if (nintf > USB_MAXINTERFACES) {
3136 dev_warn(ddev, "config %d has too many interfaces: %d, "
3137 "using maximum allowed: %d\n",
3138 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
3139 index a3ecd8bd5324..82eea55a7b5c 100644
3140 --- a/drivers/usb/host/xhci-mem.c
3141 +++ b/drivers/usb/host/xhci-mem.c
3142 @@ -1032,10 +1032,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
3143 return 0;
3144 }
3145
3146 - xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
3147 - if (!xhci->devs[slot_id])
3148 + dev = kzalloc(sizeof(*dev), flags);
3149 + if (!dev)
3150 return 0;
3151 - dev = xhci->devs[slot_id];
3152
3153 /* Allocate the (output) device context that will be used in the HC. */
3154 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
3155 @@ -1083,9 +1082,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
3156 &xhci->dcbaa->dev_context_ptrs[slot_id],
3157 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
3158
3159 + xhci->devs[slot_id] = dev;
3160 +
3161 return 1;
3162 fail:
3163 - xhci_free_virt_device(xhci, slot_id);
3164 +
3165 + if (dev->in_ctx)
3166 + xhci_free_container_ctx(xhci, dev->in_ctx);
3167 + if (dev->out_ctx)
3168 + xhci_free_container_ctx(xhci, dev->out_ctx);
3169 + kfree(dev);
3170 +
3171 return 0;
3172 }
3173
3174 diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
3175 index f2365a47fa4a..ce9e457e60c3 100644
3176 --- a/drivers/usb/host/xhci-mtk.c
3177 +++ b/drivers/usb/host/xhci-mtk.c
3178 @@ -632,13 +632,13 @@ static int xhci_mtk_probe(struct platform_device *pdev)
3179 goto power_off_phys;
3180 }
3181
3182 - if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
3183 - xhci->shared_hcd->can_do_streams = 1;
3184 -
3185 ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
3186 if (ret)
3187 goto put_usb3_hcd;
3188
3189 + if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
3190 + xhci->shared_hcd->can_do_streams = 1;
3191 +
3192 ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
3193 if (ret)
3194 goto dealloc_usb2_hcd;
3195 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3196 index 63735b5310bb..89a14d5f6ad8 100644
3197 --- a/drivers/usb/host/xhci-ring.c
3198 +++ b/drivers/usb/host/xhci-ring.c
3199 @@ -3132,7 +3132,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3200 {
3201 u32 maxp, total_packet_count;
3202
3203 - /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
3204 + /* MTK xHCI 0.96 contains some features from 1.0 */
3205 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3206 return ((td_total_len - transferred) >> 10);
3207
3208 @@ -3141,8 +3141,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3209 trb_buff_len == td_total_len)
3210 return 0;
3211
3212 - /* for MTK xHCI, TD size doesn't include this TRB */
3213 - if (xhci->quirks & XHCI_MTK_HOST)
3214 + /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
3215 + if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
3216 trb_buff_len = 0;
3217
3218 maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3219 diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
3220 index bacee0fd4dd3..ea5bad49394b 100644
3221 --- a/drivers/usb/musb/da8xx.c
3222 +++ b/drivers/usb/musb/da8xx.c
3223 @@ -302,7 +302,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
3224 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
3225 portstate(musb->port1_status |= USB_PORT_STAT_POWER);
3226 del_timer(&otg_workaround);
3227 - } else {
3228 + } else if (!(musb->int_usb & MUSB_INTR_BABBLE)){
3229 + /*
3230 + * When babble condition happens, drvvbus interrupt
3231 + * is also generated. Ignore this drvvbus interrupt
3232 + * and let babble interrupt handler recovers the
3233 + * controller; otherwise, the host-mode flag is lost
3234 + * due to the MUSB_DEV_MODE() call below and babble
3235 + * recovery logic will not called.
3236 + */
3237 musb->is_active = 0;
3238 MUSB_DEV_MODE(musb);
3239 otg->default_a = 0;
3240 diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
3241 index db68156568e6..b3b33cf7ddf6 100644
3242 --- a/drivers/usb/phy/phy-isp1301.c
3243 +++ b/drivers/usb/phy/phy-isp1301.c
3244 @@ -33,6 +33,12 @@ static const struct i2c_device_id isp1301_id[] = {
3245 };
3246 MODULE_DEVICE_TABLE(i2c, isp1301_id);
3247
3248 +static const struct of_device_id isp1301_of_match[] = {
3249 + {.compatible = "nxp,isp1301" },
3250 + { },
3251 +};
3252 +MODULE_DEVICE_TABLE(of, isp1301_of_match);
3253 +
3254 static struct i2c_client *isp1301_i2c_client;
3255
3256 static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear)
3257 @@ -130,6 +136,7 @@ static int isp1301_remove(struct i2c_client *client)
3258 static struct i2c_driver isp1301_driver = {
3259 .driver = {
3260 .name = DRV_NAME,
3261 + .of_match_table = of_match_ptr(isp1301_of_match),
3262 },
3263 .probe = isp1301_probe,
3264 .remove = isp1301_remove,
3265 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
3266 index 2572fd5cd2bb..b605115eb47a 100644
3267 --- a/drivers/usb/storage/unusual_devs.h
3268 +++ b/drivers/usb/storage/unusual_devs.h
3269 @@ -2113,6 +2113,13 @@ UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
3270 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
3271 US_FL_BROKEN_FUA ),
3272
3273 +/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */
3274 +UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
3275 + "JMicron",
3276 + "JMS567",
3277 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
3278 + US_FL_BROKEN_FUA),
3279 +
3280 /*
3281 * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
3282 * JMicron responds to USN and several other SCSI ioctls with a
3283 diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
3284 index cde115359793..9f356f7cf7d5 100644
3285 --- a/drivers/usb/storage/unusual_uas.h
3286 +++ b/drivers/usb/storage/unusual_uas.h
3287 @@ -142,6 +142,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
3288 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
3289 US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
3290
3291 +/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */
3292 +UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
3293 + "JMicron",
3294 + "JMS567",
3295 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
3296 + US_FL_BROKEN_FUA),
3297 +
3298 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
3299 UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
3300 "VIA",
3301 diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
3302 index 191b176ffedf..283a9be77a22 100644
3303 --- a/drivers/usb/usbip/stub_rx.c
3304 +++ b/drivers/usb/usbip/stub_rx.c
3305 @@ -336,23 +336,34 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
3306 return priv;
3307 }
3308
3309 -static int get_pipe(struct stub_device *sdev, int epnum, int dir)
3310 +static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
3311 {
3312 struct usb_device *udev = sdev->udev;
3313 struct usb_host_endpoint *ep;
3314 struct usb_endpoint_descriptor *epd = NULL;
3315 + int epnum = pdu->base.ep;
3316 + int dir = pdu->base.direction;
3317 +
3318 + if (epnum < 0 || epnum > 15)
3319 + goto err_ret;
3320
3321 if (dir == USBIP_DIR_IN)
3322 ep = udev->ep_in[epnum & 0x7f];
3323 else
3324 ep = udev->ep_out[epnum & 0x7f];
3325 - if (!ep) {
3326 - dev_err(&sdev->udev->dev, "no such endpoint?, %d\n",
3327 - epnum);
3328 - BUG();
3329 - }
3330 + if (!ep)
3331 + goto err_ret;
3332
3333 epd = &ep->desc;
3334 +
3335 + /* validate transfer_buffer_length */
3336 + if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) {
3337 + dev_err(&sdev->udev->dev,
3338 + "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n",
3339 + pdu->u.cmd_submit.transfer_buffer_length);
3340 + return -1;
3341 + }
3342 +
3343 if (usb_endpoint_xfer_control(epd)) {
3344 if (dir == USBIP_DIR_OUT)
3345 return usb_sndctrlpipe(udev, epnum);
3346 @@ -375,15 +386,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
3347 }
3348
3349 if (usb_endpoint_xfer_isoc(epd)) {
3350 + /* validate packet size and number of packets */
3351 + unsigned int maxp, packets, bytes;
3352 +
3353 + maxp = usb_endpoint_maxp(epd);
3354 + maxp *= usb_endpoint_maxp_mult(epd);
3355 + bytes = pdu->u.cmd_submit.transfer_buffer_length;
3356 + packets = DIV_ROUND_UP(bytes, maxp);
3357 +
3358 + if (pdu->u.cmd_submit.number_of_packets < 0 ||
3359 + pdu->u.cmd_submit.number_of_packets > packets) {
3360 + dev_err(&sdev->udev->dev,
3361 + "CMD_SUBMIT: isoc invalid num packets %d\n",
3362 + pdu->u.cmd_submit.number_of_packets);
3363 + return -1;
3364 + }
3365 if (dir == USBIP_DIR_OUT)
3366 return usb_sndisocpipe(udev, epnum);
3367 else
3368 return usb_rcvisocpipe(udev, epnum);
3369 }
3370
3371 +err_ret:
3372 /* NOT REACHED */
3373 - dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum);
3374 - return 0;
3375 + dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
3376 + return -1;
3377 }
3378
3379 static void masking_bogus_flags(struct urb *urb)
3380 @@ -447,7 +474,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
3381 struct stub_priv *priv;
3382 struct usbip_device *ud = &sdev->ud;
3383 struct usb_device *udev = sdev->udev;
3384 - int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
3385 + int pipe = get_pipe(sdev, pdu);
3386 +
3387 + if (pipe == -1)
3388 + return;
3389
3390 priv = stub_priv_alloc(sdev, pdu);
3391 if (!priv)
3392 @@ -466,7 +496,8 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
3393 }
3394
3395 /* allocate urb transfer buffer, if needed */
3396 - if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
3397 + if (pdu->u.cmd_submit.transfer_buffer_length > 0 &&
3398 + pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) {
3399 priv->urb->transfer_buffer =
3400 kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
3401 GFP_KERNEL);
3402 diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
3403 index be50cef645d8..87ff94be4235 100644
3404 --- a/drivers/usb/usbip/stub_tx.c
3405 +++ b/drivers/usb/usbip/stub_tx.c
3406 @@ -181,6 +181,13 @@ static int stub_send_ret_submit(struct stub_device *sdev)
3407 memset(&pdu_header, 0, sizeof(pdu_header));
3408 memset(&msg, 0, sizeof(msg));
3409
3410 + if (urb->actual_length > 0 && !urb->transfer_buffer) {
3411 + dev_err(&sdev->udev->dev,
3412 + "urb: actual_length %d transfer_buffer null\n",
3413 + urb->actual_length);
3414 + return -1;
3415 + }
3416 +
3417 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
3418 iovnum = 2 + urb->number_of_packets;
3419 else
3420 diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
3421 index 6c2b2ca4a909..44c2be15a08b 100644
3422 --- a/drivers/video/fbdev/au1200fb.c
3423 +++ b/drivers/video/fbdev/au1200fb.c
3424 @@ -1681,8 +1681,10 @@ static int au1200fb_drv_probe(struct platform_device *dev)
3425
3426 fbi = framebuffer_alloc(sizeof(struct au1200fb_device),
3427 &dev->dev);
3428 - if (!fbi)
3429 + if (!fbi) {
3430 + ret = -ENOMEM;
3431 goto failed;
3432 + }
3433
3434 _au1200fb_infos[plane] = fbi;
3435 fbdev = fbi->par;
3436 @@ -1700,7 +1702,8 @@ static int au1200fb_drv_probe(struct platform_device *dev)
3437 if (!fbdev->fb_mem) {
3438 print_err("fail to allocate frambuffer (size: %dK))",
3439 fbdev->fb_len / 1024);
3440 - return -ENOMEM;
3441 + ret = -ENOMEM;
3442 + goto failed;
3443 }
3444
3445 /*
3446 diff --git a/drivers/video/fbdev/controlfb.h b/drivers/video/fbdev/controlfb.h
3447 index 6026c60fc100..261522fabdac 100644
3448 --- a/drivers/video/fbdev/controlfb.h
3449 +++ b/drivers/video/fbdev/controlfb.h
3450 @@ -141,5 +141,7 @@ static struct max_cmodes control_mac_modes[] = {
3451 {{ 1, 2}}, /* 1152x870, 75Hz */
3452 {{ 0, 1}}, /* 1280x960, 75Hz */
3453 {{ 0, 1}}, /* 1280x1024, 75Hz */
3454 + {{ 1, 2}}, /* 1152x768, 60Hz */
3455 + {{ 0, 1}}, /* 1600x1024, 60Hz */
3456 };
3457
3458 diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
3459 index e9c2f7ba3c8e..53326badfb61 100644
3460 --- a/drivers/video/fbdev/udlfb.c
3461 +++ b/drivers/video/fbdev/udlfb.c
3462 @@ -769,11 +769,11 @@ static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len)
3463
3464 for (i = 0; i < len; i++) {
3465 ret = usb_control_msg(dev->udev,
3466 - usb_rcvctrlpipe(dev->udev, 0), (0x02),
3467 - (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
3468 - HZ);
3469 - if (ret < 1) {
3470 - pr_err("Read EDID byte %d failed err %x\n", i, ret);
3471 + usb_rcvctrlpipe(dev->udev, 0), 0x02,
3472 + (0x80 | (0x02 << 5)), i << 8, 0xA1,
3473 + rbuf, 2, USB_CTRL_GET_TIMEOUT);
3474 + if (ret < 2) {
3475 + pr_err("Read EDID byte %d failed: %d\n", i, ret);
3476 i--;
3477 break;
3478 }
3479 diff --git a/fs/afs/callback.c b/fs/afs/callback.c
3480 index 1e9d2f84e5b5..1592dc613200 100644
3481 --- a/fs/afs/callback.c
3482 +++ b/fs/afs/callback.c
3483 @@ -362,7 +362,7 @@ static void afs_callback_updater(struct work_struct *work)
3484 {
3485 struct afs_server *server;
3486 struct afs_vnode *vnode, *xvnode;
3487 - time_t now;
3488 + time64_t now;
3489 long timeout;
3490 int ret;
3491
3492 @@ -370,7 +370,7 @@ static void afs_callback_updater(struct work_struct *work)
3493
3494 _enter("");
3495
3496 - now = get_seconds();
3497 + now = ktime_get_real_seconds();
3498
3499 /* find the first vnode to update */
3500 spin_lock(&server->cb_lock);
3501 @@ -424,7 +424,8 @@ static void afs_callback_updater(struct work_struct *work)
3502
3503 /* and then reschedule */
3504 _debug("reschedule");
3505 - vnode->update_at = get_seconds() + afs_vnode_update_timeout;
3506 + vnode->update_at = ktime_get_real_seconds() +
3507 + afs_vnode_update_timeout;
3508
3509 spin_lock(&server->cb_lock);
3510
3511 diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
3512 index 8d2c5180e015..168f2a4d1180 100644
3513 --- a/fs/afs/cmservice.c
3514 +++ b/fs/afs/cmservice.c
3515 @@ -168,7 +168,6 @@ static int afs_deliver_cb_callback(struct afs_call *call)
3516 struct afs_callback *cb;
3517 struct afs_server *server;
3518 __be32 *bp;
3519 - u32 tmp;
3520 int ret, loop;
3521
3522 _enter("{%u}", call->unmarshall);
3523 @@ -230,9 +229,9 @@ static int afs_deliver_cb_callback(struct afs_call *call)
3524 if (ret < 0)
3525 return ret;
3526
3527 - tmp = ntohl(call->tmp);
3528 - _debug("CB count: %u", tmp);
3529 - if (tmp != call->count && tmp != 0)
3530 + call->count2 = ntohl(call->tmp);
3531 + _debug("CB count: %u", call->count2);
3532 + if (call->count2 != call->count && call->count2 != 0)
3533 return -EBADMSG;
3534 call->offset = 0;
3535 call->unmarshall++;
3536 @@ -240,14 +239,14 @@ static int afs_deliver_cb_callback(struct afs_call *call)
3537 case 4:
3538 _debug("extract CB array");
3539 ret = afs_extract_data(call, call->buffer,
3540 - call->count * 3 * 4, false);
3541 + call->count2 * 3 * 4, false);
3542 if (ret < 0)
3543 return ret;
3544
3545 _debug("unmarshall CB array");
3546 cb = call->request;
3547 bp = call->buffer;
3548 - for (loop = call->count; loop > 0; loop--, cb++) {
3549 + for (loop = call->count2; loop > 0; loop--, cb++) {
3550 cb->version = ntohl(*bp++);
3551 cb->expiry = ntohl(*bp++);
3552 cb->type = ntohl(*bp++);
3553 diff --git a/fs/afs/file.c b/fs/afs/file.c
3554 index 6344aee4ac4b..72372970725b 100644
3555 --- a/fs/afs/file.c
3556 +++ b/fs/afs/file.c
3557 @@ -29,6 +29,7 @@ static int afs_readpages(struct file *filp, struct address_space *mapping,
3558
3559 const struct file_operations afs_file_operations = {
3560 .open = afs_open,
3561 + .flush = afs_flush,
3562 .release = afs_release,
3563 .llseek = generic_file_llseek,
3564 .read_iter = generic_file_read_iter,
3565 diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
3566 index 31c616ab9b40..88e440607ed7 100644
3567 --- a/fs/afs/fsclient.c
3568 +++ b/fs/afs/fsclient.c
3569 @@ -105,7 +105,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
3570 vnode->vfs_inode.i_mode = mode;
3571 }
3572
3573 - vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server;
3574 + vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client;
3575 vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime;
3576 vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
3577 vnode->vfs_inode.i_version = data_version;
3578 @@ -139,7 +139,7 @@ static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode)
3579 vnode->cb_version = ntohl(*bp++);
3580 vnode->cb_expiry = ntohl(*bp++);
3581 vnode->cb_type = ntohl(*bp++);
3582 - vnode->cb_expires = vnode->cb_expiry + get_seconds();
3583 + vnode->cb_expires = vnode->cb_expiry + ktime_get_real_seconds();
3584 *_bp = bp;
3585 }
3586
3587 @@ -676,8 +676,8 @@ int afs_fs_create(struct afs_server *server,
3588 memset(bp, 0, padsz);
3589 bp = (void *) bp + padsz;
3590 }
3591 - *bp++ = htonl(AFS_SET_MODE);
3592 - *bp++ = 0; /* mtime */
3593 + *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
3594 + *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
3595 *bp++ = 0; /* owner */
3596 *bp++ = 0; /* group */
3597 *bp++ = htonl(mode & S_IALLUGO); /* unix mode */
3598 @@ -945,8 +945,8 @@ int afs_fs_symlink(struct afs_server *server,
3599 memset(bp, 0, c_padsz);
3600 bp = (void *) bp + c_padsz;
3601 }
3602 - *bp++ = htonl(AFS_SET_MODE);
3603 - *bp++ = 0; /* mtime */
3604 + *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
3605 + *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
3606 *bp++ = 0; /* owner */
3607 *bp++ = 0; /* group */
3608 *bp++ = htonl(S_IRWXUGO); /* unix mode */
3609 @@ -1145,8 +1145,8 @@ static int afs_fs_store_data64(struct afs_server *server,
3610 *bp++ = htonl(vnode->fid.vnode);
3611 *bp++ = htonl(vnode->fid.unique);
3612
3613 - *bp++ = 0; /* mask */
3614 - *bp++ = 0; /* mtime */
3615 + *bp++ = htonl(AFS_SET_MTIME); /* mask */
3616 + *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
3617 *bp++ = 0; /* owner */
3618 *bp++ = 0; /* group */
3619 *bp++ = 0; /* unix mode */
3620 @@ -1178,7 +1178,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
3621 _enter(",%x,{%x:%u},,",
3622 key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode);
3623
3624 - size = to - offset;
3625 + size = (loff_t)to - (loff_t)offset;
3626 if (first != last)
3627 size += (loff_t)(last - first) << PAGE_SHIFT;
3628 pos = (loff_t)first << PAGE_SHIFT;
3629 @@ -1222,8 +1222,8 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
3630 *bp++ = htonl(vnode->fid.vnode);
3631 *bp++ = htonl(vnode->fid.unique);
3632
3633 - *bp++ = 0; /* mask */
3634 - *bp++ = 0; /* mtime */
3635 + *bp++ = htonl(AFS_SET_MTIME); /* mask */
3636 + *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
3637 *bp++ = 0; /* owner */
3638 *bp++ = 0; /* group */
3639 *bp++ = 0; /* unix mode */
3640 diff --git a/fs/afs/inode.c b/fs/afs/inode.c
3641 index 86cc7264c21c..42582e41948f 100644
3642 --- a/fs/afs/inode.c
3643 +++ b/fs/afs/inode.c
3644 @@ -70,9 +70,9 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
3645
3646 set_nlink(inode, vnode->status.nlink);
3647 inode->i_uid = vnode->status.owner;
3648 - inode->i_gid = GLOBAL_ROOT_GID;
3649 + inode->i_gid = vnode->status.group;
3650 inode->i_size = vnode->status.size;
3651 - inode->i_ctime.tv_sec = vnode->status.mtime_server;
3652 + inode->i_ctime.tv_sec = vnode->status.mtime_client;
3653 inode->i_ctime.tv_nsec = 0;
3654 inode->i_atime = inode->i_mtime = inode->i_ctime;
3655 inode->i_blocks = 0;
3656 @@ -245,12 +245,13 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
3657 vnode->cb_version = 0;
3658 vnode->cb_expiry = 0;
3659 vnode->cb_type = 0;
3660 - vnode->cb_expires = get_seconds();
3661 + vnode->cb_expires = ktime_get_real_seconds();
3662 } else {
3663 vnode->cb_version = cb->version;
3664 vnode->cb_expiry = cb->expiry;
3665 vnode->cb_type = cb->type;
3666 - vnode->cb_expires = vnode->cb_expiry + get_seconds();
3667 + vnode->cb_expires = vnode->cb_expiry +
3668 + ktime_get_real_seconds();
3669 }
3670 }
3671
3672 @@ -323,7 +324,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
3673 !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
3674 !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
3675 !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
3676 - if (vnode->cb_expires < get_seconds() + 10) {
3677 + if (vnode->cb_expires < ktime_get_real_seconds() + 10) {
3678 _debug("callback expired");
3679 set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
3680 } else {
3681 diff --git a/fs/afs/internal.h b/fs/afs/internal.h
3682 index 535a38d2c1d0..dd98dcda6a3f 100644
3683 --- a/fs/afs/internal.h
3684 +++ b/fs/afs/internal.h
3685 @@ -11,6 +11,7 @@
3686
3687 #include <linux/compiler.h>
3688 #include <linux/kernel.h>
3689 +#include <linux/ktime.h>
3690 #include <linux/fs.h>
3691 #include <linux/pagemap.h>
3692 #include <linux/rxrpc.h>
3693 @@ -105,7 +106,10 @@ struct afs_call {
3694 unsigned request_size; /* size of request data */
3695 unsigned reply_max; /* maximum size of reply */
3696 unsigned first_offset; /* offset into mapping[first] */
3697 - unsigned last_to; /* amount of mapping[last] */
3698 + union {
3699 + unsigned last_to; /* amount of mapping[last] */
3700 + unsigned count2; /* count used in unmarshalling */
3701 + };
3702 unsigned char unmarshall; /* unmarshalling phase */
3703 bool incoming; /* T if incoming call */
3704 bool send_pages; /* T if data from mapping should be sent */
3705 @@ -242,7 +246,7 @@ struct afs_cache_vhash {
3706 */
3707 struct afs_vlocation {
3708 atomic_t usage;
3709 - time_t time_of_death; /* time at which put reduced usage to 0 */
3710 + time64_t time_of_death; /* time at which put reduced usage to 0 */
3711 struct list_head link; /* link in cell volume location list */
3712 struct list_head grave; /* link in master graveyard list */
3713 struct list_head update; /* link in master update list */
3714 @@ -253,7 +257,7 @@ struct afs_vlocation {
3715 struct afs_cache_vlocation vldb; /* volume information DB record */
3716 struct afs_volume *vols[3]; /* volume access record pointer (index by type) */
3717 wait_queue_head_t waitq; /* status change waitqueue */
3718 - time_t update_at; /* time at which record should be updated */
3719 + time64_t update_at; /* time at which record should be updated */
3720 spinlock_t lock; /* access lock */
3721 afs_vlocation_state_t state; /* volume location state */
3722 unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */
3723 @@ -266,7 +270,7 @@ struct afs_vlocation {
3724 */
3725 struct afs_server {
3726 atomic_t usage;
3727 - time_t time_of_death; /* time at which put reduced usage to 0 */
3728 + time64_t time_of_death; /* time at which put reduced usage to 0 */
3729 struct in_addr addr; /* server address */
3730 struct afs_cell *cell; /* cell in which server resides */
3731 struct list_head link; /* link in cell's server list */
3732 @@ -369,8 +373,8 @@ struct afs_vnode {
3733 struct rb_node server_rb; /* link in server->fs_vnodes */
3734 struct rb_node cb_promise; /* link in server->cb_promises */
3735 struct work_struct cb_broken_work; /* work to be done on callback break */
3736 - time_t cb_expires; /* time at which callback expires */
3737 - time_t cb_expires_at; /* time used to order cb_promise */
3738 + time64_t cb_expires; /* time at which callback expires */
3739 + time64_t cb_expires_at; /* time used to order cb_promise */
3740 unsigned cb_version; /* callback version */
3741 unsigned cb_expiry; /* callback expiry time */
3742 afs_callback_type_t cb_type; /* type of callback */
3743 @@ -749,6 +753,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
3744 extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
3745 extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
3746 extern int afs_writeback_all(struct afs_vnode *);
3747 +extern int afs_flush(struct file *, fl_owner_t);
3748 extern int afs_fsync(struct file *, loff_t, loff_t, int);
3749
3750
3751 diff --git a/fs/afs/misc.c b/fs/afs/misc.c
3752 index 91ea1aa0d8b3..100b207efc9e 100644
3753 --- a/fs/afs/misc.c
3754 +++ b/fs/afs/misc.c
3755 @@ -84,6 +84,8 @@ int afs_abort_to_error(u32 abort_code)
3756 case RXKADDATALEN: return -EKEYREJECTED;
3757 case RXKADILLEGALLEVEL: return -EKEYREJECTED;
3758
3759 + case RXGEN_OPCODE: return -ENOTSUPP;
3760 +
3761 default: return -EREMOTEIO;
3762 }
3763 }
3764 diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
3765 index 25f05a8d21b1..523b1d3ca2c6 100644
3766 --- a/fs/afs/rxrpc.c
3767 +++ b/fs/afs/rxrpc.c
3768 @@ -321,6 +321,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
3769 struct rxrpc_call *rxcall;
3770 struct msghdr msg;
3771 struct kvec iov[1];
3772 + size_t offset;
3773 + u32 abort_code;
3774 int ret;
3775
3776 _enter("%x,{%d},", addr->s_addr, ntohs(call->port));
3777 @@ -368,9 +370,11 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
3778 msg.msg_controllen = 0;
3779 msg.msg_flags = (call->send_pages ? MSG_MORE : 0);
3780
3781 - /* have to change the state *before* sending the last packet as RxRPC
3782 - * might give us the reply before it returns from sending the
3783 - * request */
3784 + /* We have to change the state *before* sending the last packet as
3785 + * rxrpc might give us the reply before it returns from sending the
3786 + * request. Further, if the send fails, we may already have been given
3787 + * a notification and may have collected it.
3788 + */
3789 if (!call->send_pages)
3790 call->state = AFS_CALL_AWAIT_REPLY;
3791 ret = rxrpc_kernel_send_data(afs_socket, rxcall,
3792 @@ -389,7 +393,17 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
3793 return wait_mode->wait(call);
3794
3795 error_do_abort:
3796 - rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD");
3797 + call->state = AFS_CALL_COMPLETE;
3798 + if (ret != -ECONNABORTED) {
3799 + rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT,
3800 + -ret, "KSD");
3801 + } else {
3802 + abort_code = 0;
3803 + offset = 0;
3804 + rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset,
3805 + false, &abort_code);
3806 + ret = call->type->abort_to_error(abort_code);
3807 + }
3808 error_kill_call:
3809 afs_end_call(call);
3810 _leave(" = %d", ret);
3811 @@ -434,16 +448,18 @@ static void afs_deliver_to_call(struct afs_call *call)
3812 case -EINPROGRESS:
3813 case -EAGAIN:
3814 goto out;
3815 + case -ECONNABORTED:
3816 + goto call_complete;
3817 case -ENOTCONN:
3818 abort_code = RX_CALL_DEAD;
3819 rxrpc_kernel_abort_call(afs_socket, call->rxcall,
3820 abort_code, -ret, "KNC");
3821 - goto do_abort;
3822 + goto save_error;
3823 case -ENOTSUPP:
3824 - abort_code = RX_INVALID_OPERATION;
3825 + abort_code = RXGEN_OPCODE;
3826 rxrpc_kernel_abort_call(afs_socket, call->rxcall,
3827 abort_code, -ret, "KIV");
3828 - goto do_abort;
3829 + goto save_error;
3830 case -ENODATA:
3831 case -EBADMSG:
3832 case -EMSGSIZE:
3833 @@ -453,7 +469,7 @@ static void afs_deliver_to_call(struct afs_call *call)
3834 abort_code = RXGEN_SS_UNMARSHAL;
3835 rxrpc_kernel_abort_call(afs_socket, call->rxcall,
3836 abort_code, EBADMSG, "KUM");
3837 - goto do_abort;
3838 + goto save_error;
3839 }
3840 }
3841
3842 @@ -464,8 +480,9 @@ static void afs_deliver_to_call(struct afs_call *call)
3843 _leave("");
3844 return;
3845
3846 -do_abort:
3847 +save_error:
3848 call->error = ret;
3849 +call_complete:
3850 call->state = AFS_CALL_COMPLETE;
3851 goto done;
3852 }
3853 @@ -475,7 +492,6 @@ static void afs_deliver_to_call(struct afs_call *call)
3854 */
3855 static int afs_wait_for_call_to_complete(struct afs_call *call)
3856 {
3857 - const char *abort_why;
3858 int ret;
3859
3860 DECLARE_WAITQUEUE(myself, current);
3861 @@ -494,13 +510,8 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
3862 continue;
3863 }
3864
3865 - abort_why = "KWC";
3866 - ret = call->error;
3867 - if (call->state == AFS_CALL_COMPLETE)
3868 - break;
3869 - abort_why = "KWI";
3870 - ret = -EINTR;
3871 - if (signal_pending(current))
3872 + if (call->state == AFS_CALL_COMPLETE ||
3873 + signal_pending(current))
3874 break;
3875 schedule();
3876 }
3877 @@ -508,13 +519,14 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
3878 remove_wait_queue(&call->waitq, &myself);
3879 __set_current_state(TASK_RUNNING);
3880
3881 - /* kill the call */
3882 + /* Kill off the call if it's still live. */
3883 if (call->state < AFS_CALL_COMPLETE) {
3884 - _debug("call incomplete");
3885 + _debug("call interrupted");
3886 rxrpc_kernel_abort_call(afs_socket, call->rxcall,
3887 - RX_CALL_DEAD, -ret, abort_why);
3888 + RX_USER_ABORT, -EINTR, "KWI");
3889 }
3890
3891 + ret = call->error;
3892 _debug("call complete");
3893 afs_end_call(call);
3894 _leave(" = %d", ret);
3895 diff --git a/fs/afs/security.c b/fs/afs/security.c
3896 index 8d010422dc89..bfa9d3428383 100644
3897 --- a/fs/afs/security.c
3898 +++ b/fs/afs/security.c
3899 @@ -340,17 +340,22 @@ int afs_permission(struct inode *inode, int mask)
3900 } else {
3901 if (!(access & AFS_ACE_LOOKUP))
3902 goto permission_denied;
3903 + if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR))
3904 + goto permission_denied;
3905 if (mask & (MAY_EXEC | MAY_READ)) {
3906 if (!(access & AFS_ACE_READ))
3907 goto permission_denied;
3908 + if (!(inode->i_mode & S_IRUSR))
3909 + goto permission_denied;
3910 } else if (mask & MAY_WRITE) {
3911 if (!(access & AFS_ACE_WRITE))
3912 goto permission_denied;
3913 + if (!(inode->i_mode & S_IWUSR))
3914 + goto permission_denied;
3915 }
3916 }
3917
3918 key_put(key);
3919 - ret = generic_permission(inode, mask);
3920 _leave(" = %d", ret);
3921 return ret;
3922
3923 diff --git a/fs/afs/server.c b/fs/afs/server.c
3924 index d4066ab7dd55..c001b1f2455f 100644
3925 --- a/fs/afs/server.c
3926 +++ b/fs/afs/server.c
3927 @@ -242,7 +242,7 @@ void afs_put_server(struct afs_server *server)
3928 spin_lock(&afs_server_graveyard_lock);
3929 if (atomic_read(&server->usage) == 0) {
3930 list_move_tail(&server->grave, &afs_server_graveyard);
3931 - server->time_of_death = get_seconds();
3932 + server->time_of_death = ktime_get_real_seconds();
3933 queue_delayed_work(afs_wq, &afs_server_reaper,
3934 afs_server_timeout * HZ);
3935 }
3936 @@ -277,9 +277,9 @@ static void afs_reap_server(struct work_struct *work)
3937 LIST_HEAD(corpses);
3938 struct afs_server *server;
3939 unsigned long delay, expiry;
3940 - time_t now;
3941 + time64_t now;
3942
3943 - now = get_seconds();
3944 + now = ktime_get_real_seconds();
3945 spin_lock(&afs_server_graveyard_lock);
3946
3947 while (!list_empty(&afs_server_graveyard)) {
3948 diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
3949 index 45a86396fd2d..92bd5553b8c9 100644
3950 --- a/fs/afs/vlocation.c
3951 +++ b/fs/afs/vlocation.c
3952 @@ -340,7 +340,8 @@ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
3953 struct afs_vlocation *xvl;
3954
3955 /* wait at least 10 minutes before updating... */
3956 - vl->update_at = get_seconds() + afs_vlocation_update_timeout;
3957 + vl->update_at = ktime_get_real_seconds() +
3958 + afs_vlocation_update_timeout;
3959
3960 spin_lock(&afs_vlocation_updates_lock);
3961
3962 @@ -506,7 +507,7 @@ void afs_put_vlocation(struct afs_vlocation *vl)
3963 if (atomic_read(&vl->usage) == 0) {
3964 _debug("buried");
3965 list_move_tail(&vl->grave, &afs_vlocation_graveyard);
3966 - vl->time_of_death = get_seconds();
3967 + vl->time_of_death = ktime_get_real_seconds();
3968 queue_delayed_work(afs_wq, &afs_vlocation_reap,
3969 afs_vlocation_timeout * HZ);
3970
3971 @@ -543,11 +544,11 @@ static void afs_vlocation_reaper(struct work_struct *work)
3972 LIST_HEAD(corpses);
3973 struct afs_vlocation *vl;
3974 unsigned long delay, expiry;
3975 - time_t now;
3976 + time64_t now;
3977
3978 _enter("");
3979
3980 - now = get_seconds();
3981 + now = ktime_get_real_seconds();
3982 spin_lock(&afs_vlocation_graveyard_lock);
3983
3984 while (!list_empty(&afs_vlocation_graveyard)) {
3985 @@ -622,13 +623,13 @@ static void afs_vlocation_updater(struct work_struct *work)
3986 {
3987 struct afs_cache_vlocation vldb;
3988 struct afs_vlocation *vl, *xvl;
3989 - time_t now;
3990 + time64_t now;
3991 long timeout;
3992 int ret;
3993
3994 _enter("");
3995
3996 - now = get_seconds();
3997 + now = ktime_get_real_seconds();
3998
3999 /* find a record to update */
4000 spin_lock(&afs_vlocation_updates_lock);
4001 @@ -684,7 +685,8 @@ static void afs_vlocation_updater(struct work_struct *work)
4002
4003 /* and then reschedule */
4004 _debug("reschedule");
4005 - vl->update_at = get_seconds() + afs_vlocation_update_timeout;
4006 + vl->update_at = ktime_get_real_seconds() +
4007 + afs_vlocation_update_timeout;
4008
4009 spin_lock(&afs_vlocation_updates_lock);
4010
4011 diff --git a/fs/afs/write.c b/fs/afs/write.c
4012 index f865c3f05bea..3fba2b573c86 100644
4013 --- a/fs/afs/write.c
4014 +++ b/fs/afs/write.c
4015 @@ -148,12 +148,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
4016 kfree(candidate);
4017 return -ENOMEM;
4018 }
4019 - *pagep = page;
4020 - /* page won't leak in error case: it eventually gets cleaned off LRU */
4021
4022 if (!PageUptodate(page) && len != PAGE_SIZE) {
4023 ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
4024 if (ret < 0) {
4025 + unlock_page(page);
4026 + put_page(page);
4027 kfree(candidate);
4028 _leave(" = %d [prep]", ret);
4029 return ret;
4030 @@ -161,6 +161,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
4031 SetPageUptodate(page);
4032 }
4033
4034 + /* page won't leak in error case: it eventually gets cleaned off LRU */
4035 + *pagep = page;
4036 +
4037 try_again:
4038 spin_lock(&vnode->writeback_lock);
4039
4040 @@ -296,10 +299,14 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error,
4041 ASSERTCMP(pv.nr, ==, count);
4042
4043 for (loop = 0; loop < count; loop++) {
4044 - ClearPageUptodate(pv.pages[loop]);
4045 + struct page *page = pv.pages[loop];
4046 + ClearPageUptodate(page);
4047 if (error)
4048 - SetPageError(pv.pages[loop]);
4049 - end_page_writeback(pv.pages[loop]);
4050 + SetPageError(page);
4051 + if (PageWriteback(page))
4052 + end_page_writeback(page);
4053 + if (page->index >= first)
4054 + first = page->index + 1;
4055 }
4056
4057 __pagevec_release(&pv);
4058 @@ -502,6 +509,7 @@ static int afs_writepages_region(struct address_space *mapping,
4059
4060 if (PageWriteback(page) || !PageDirty(page)) {
4061 unlock_page(page);
4062 + put_page(page);
4063 continue;
4064 }
4065
4066 @@ -734,6 +742,20 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
4067 return ret;
4068 }
4069
4070 +/*
4071 + * Flush out all outstanding writes on a file opened for writing when it is
4072 + * closed.
4073 + */
4074 +int afs_flush(struct file *file, fl_owner_t id)
4075 +{
4076 + _enter("");
4077 +
4078 + if ((file->f_mode & FMODE_WRITE) == 0)
4079 + return 0;
4080 +
4081 + return vfs_fsync(file, 0);
4082 +}
4083 +
4084 /*
4085 * notification that a previously read-only page is about to become writable
4086 * - if it returns an error, the caller will deliver a bus error signal
4087 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
4088 index 4c71dba90120..0ea31a53fd5b 100644
4089 --- a/fs/autofs4/waitq.c
4090 +++ b/fs/autofs4/waitq.c
4091 @@ -176,7 +176,6 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
4092
4093 mutex_unlock(&sbi->wq_mutex);
4094
4095 - if (autofs4_write(sbi, pipe, &pkt, pktsz))
4096 switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
4097 case 0:
4098 break;
4099 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4100 index f089d7d8afe7..894d56361ea9 100644
4101 --- a/fs/btrfs/inode.c
4102 +++ b/fs/btrfs/inode.c
4103 @@ -6812,6 +6812,20 @@ static noinline int uncompress_inline(struct btrfs_path *path,
4104 max_size = min_t(unsigned long, PAGE_SIZE, max_size);
4105 ret = btrfs_decompress(compress_type, tmp, page,
4106 extent_offset, inline_size, max_size);
4107 +
4108 + /*
4109 + * decompression code contains a memset to fill in any space between the end
4110 + * of the uncompressed data and the end of max_size in case the decompressed
4111 + * data ends up shorter than ram_bytes. That doesn't cover the hole between
4112 + * the end of an inline extent and the beginning of the next block, so we
4113 + * cover that region here.
4114 + */
4115 +
4116 + if (max_size + pg_offset < PAGE_SIZE) {
4117 + char *map = kmap(page);
4118 + memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
4119 + kunmap(page);
4120 + }
4121 kfree(tmp);
4122 return ret;
4123 }
4124 diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
4125 index 6e144048a72e..a724d9a79bd2 100644
4126 --- a/fs/btrfs/tests/free-space-tree-tests.c
4127 +++ b/fs/btrfs/tests/free-space-tree-tests.c
4128 @@ -501,7 +501,8 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
4129 path = btrfs_alloc_path();
4130 if (!path) {
4131 test_msg("Couldn't allocate path\n");
4132 - return -ENOMEM;
4133 + ret = -ENOMEM;
4134 + goto out;
4135 }
4136
4137 ret = add_block_group_free_space(&trans, root->fs_info, cache);
4138 diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
4139 index c0f52c443c34..3d2639c30018 100644
4140 --- a/fs/ceph/mds_client.c
4141 +++ b/fs/ceph/mds_client.c
4142 @@ -1396,6 +1396,29 @@ static int __close_session(struct ceph_mds_client *mdsc,
4143 return request_close_session(mdsc, session);
4144 }
4145
4146 +static bool drop_negative_children(struct dentry *dentry)
4147 +{
4148 + struct dentry *child;
4149 + bool all_negative = true;
4150 +
4151 + if (!d_is_dir(dentry))
4152 + goto out;
4153 +
4154 + spin_lock(&dentry->d_lock);
4155 + list_for_each_entry(child, &dentry->d_subdirs, d_child) {
4156 + if (d_really_is_positive(child)) {
4157 + all_negative = false;
4158 + break;
4159 + }
4160 + }
4161 + spin_unlock(&dentry->d_lock);
4162 +
4163 + if (all_negative)
4164 + shrink_dcache_parent(dentry);
4165 +out:
4166 + return all_negative;
4167 +}
4168 +
4169 /*
4170 * Trim old(er) caps.
4171 *
4172 @@ -1441,16 +1464,27 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
4173 if ((used | wanted) & ~oissued & mine)
4174 goto out; /* we need these caps */
4175
4176 - session->s_trim_caps--;
4177 if (oissued) {
4178 /* we aren't the only cap.. just remove us */
4179 __ceph_remove_cap(cap, true);
4180 + session->s_trim_caps--;
4181 } else {
4182 + struct dentry *dentry;
4183 /* try dropping referring dentries */
4184 spin_unlock(&ci->i_ceph_lock);
4185 - d_prune_aliases(inode);
4186 - dout("trim_caps_cb %p cap %p pruned, count now %d\n",
4187 - inode, cap, atomic_read(&inode->i_count));
4188 + dentry = d_find_any_alias(inode);
4189 + if (dentry && drop_negative_children(dentry)) {
4190 + int count;
4191 + dput(dentry);
4192 + d_prune_aliases(inode);
4193 + count = atomic_read(&inode->i_count);
4194 + if (count == 1)
4195 + session->s_trim_caps--;
4196 + dout("trim_caps_cb %p cap %p pruned, count now %d\n",
4197 + inode, cap, count);
4198 + } else {
4199 + dput(dentry);
4200 + }
4201 return 0;
4202 }
4203
4204 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
4205 index a77cbc5b657b..1a0c57100f28 100644
4206 --- a/fs/ext4/extents.c
4207 +++ b/fs/ext4/extents.c
4208 @@ -4731,6 +4731,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4209 EXT4_INODE_EOFBLOCKS);
4210 }
4211 ext4_mark_inode_dirty(handle, inode);
4212 + ext4_update_inode_fsync_trans(handle, inode, 1);
4213 ret2 = ext4_journal_stop(handle);
4214 if (ret2)
4215 break;
4216 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
4217 index 4438b93f6fd6..b1766a67d2eb 100644
4218 --- a/fs/ext4/namei.c
4219 +++ b/fs/ext4/namei.c
4220 @@ -1417,6 +1417,10 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
4221 "falling back\n"));
4222 }
4223 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
4224 + if (!nblocks) {
4225 + ret = NULL;
4226 + goto cleanup_and_exit;
4227 + }
4228 start = EXT4_I(dir)->i_dir_start_lookup;
4229 if (start >= nblocks)
4230 start = 0;
4231 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
4232 index 05713a5da083..0703a1179847 100644
4233 --- a/fs/fs-writeback.c
4234 +++ b/fs/fs-writeback.c
4235 @@ -173,19 +173,33 @@ static void wb_wakeup(struct bdi_writeback *wb)
4236 spin_unlock_bh(&wb->work_lock);
4237 }
4238
4239 +static void finish_writeback_work(struct bdi_writeback *wb,
4240 + struct wb_writeback_work *work)
4241 +{
4242 + struct wb_completion *done = work->done;
4243 +
4244 + if (work->auto_free)
4245 + kfree(work);
4246 + if (done && atomic_dec_and_test(&done->cnt))
4247 + wake_up_all(&wb->bdi->wb_waitq);
4248 +}
4249 +
4250 static void wb_queue_work(struct bdi_writeback *wb,
4251 struct wb_writeback_work *work)
4252 {
4253 trace_writeback_queue(wb, work);
4254
4255 - spin_lock_bh(&wb->work_lock);
4256 - if (!test_bit(WB_registered, &wb->state))
4257 - goto out_unlock;
4258 if (work->done)
4259 atomic_inc(&work->done->cnt);
4260 - list_add_tail(&work->list, &wb->work_list);
4261 - mod_delayed_work(bdi_wq, &wb->dwork, 0);
4262 -out_unlock:
4263 +
4264 + spin_lock_bh(&wb->work_lock);
4265 +
4266 + if (test_bit(WB_registered, &wb->state)) {
4267 + list_add_tail(&work->list, &wb->work_list);
4268 + mod_delayed_work(bdi_wq, &wb->dwork, 0);
4269 + } else
4270 + finish_writeback_work(wb, work);
4271 +
4272 spin_unlock_bh(&wb->work_lock);
4273 }
4274
4275 @@ -1875,16 +1889,9 @@ static long wb_do_writeback(struct bdi_writeback *wb)
4276
4277 set_bit(WB_writeback_running, &wb->state);
4278 while ((work = get_next_work_item(wb)) != NULL) {
4279 - struct wb_completion *done = work->done;
4280 -
4281 trace_writeback_exec(wb, work);
4282 -
4283 wrote += wb_writeback(wb, work);
4284 -
4285 - if (work->auto_free)
4286 - kfree(work);
4287 - if (done && atomic_dec_and_test(&done->cnt))
4288 - wake_up_all(&wb->bdi->wb_waitq);
4289 + finish_writeback_work(wb, work);
4290 }
4291
4292 /*
4293 diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
4294 index e23ff70b3435..39c382f16272 100644
4295 --- a/fs/gfs2/file.c
4296 +++ b/fs/gfs2/file.c
4297 @@ -256,7 +256,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
4298 goto out;
4299 }
4300 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
4301 - if (flags & GFS2_DIF_JDATA)
4302 + if (new_flags & GFS2_DIF_JDATA)
4303 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
4304 error = filemap_fdatawrite(inode->i_mapping);
4305 if (error)
4306 @@ -264,6 +264,8 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
4307 error = filemap_fdatawait(inode->i_mapping);
4308 if (error)
4309 goto out;
4310 + if (new_flags & GFS2_DIF_JDATA)
4311 + gfs2_ordered_del_inode(ip);
4312 }
4313 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
4314 if (error)
4315 diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
4316 index 074ac7131459..f6b0848cc831 100644
4317 --- a/fs/nfs/nfs4client.c
4318 +++ b/fs/nfs/nfs4client.c
4319 @@ -1004,9 +1004,9 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
4320 server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
4321 server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
4322
4323 - if (server->rsize > server_resp_sz)
4324 + if (!server->rsize || server->rsize > server_resp_sz)
4325 server->rsize = server_resp_sz;
4326 - if (server->wsize > server_rqst_sz)
4327 + if (!server->wsize || server->wsize > server_rqst_sz)
4328 server->wsize = server_rqst_sz;
4329 #endif /* CONFIG_NFS_V4_1 */
4330 }
4331 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
4332 index e4772a8340f8..9905735463a4 100644
4333 --- a/fs/nfs/write.c
4334 +++ b/fs/nfs/write.c
4335 @@ -1859,6 +1859,8 @@ int nfs_commit_inode(struct inode *inode, int how)
4336 if (res)
4337 error = nfs_generic_commit_list(inode, &head, how, &cinfo);
4338 nfs_commit_end(cinfo.mds);
4339 + if (res == 0)
4340 + return res;
4341 if (error < 0)
4342 goto out_error;
4343 if (!may_wait)
4344 diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
4345 index 1645b977c9c6..5c4800626f13 100644
4346 --- a/fs/nfsd/nfssvc.c
4347 +++ b/fs/nfsd/nfssvc.c
4348 @@ -155,7 +155,8 @@ int nfsd_vers(int vers, enum vers_op change)
4349
4350 int nfsd_minorversion(u32 minorversion, enum vers_op change)
4351 {
4352 - if (minorversion > NFSD_SUPPORTED_MINOR_VERSION)
4353 + if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
4354 + change != NFSD_AVAIL)
4355 return -1;
4356 switch(change) {
4357 case NFSD_SET:
4358 @@ -399,23 +400,20 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
4359
4360 void nfsd_reset_versions(void)
4361 {
4362 - int found_one = 0;
4363 int i;
4364
4365 - for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
4366 - if (nfsd_program.pg_vers[i])
4367 - found_one = 1;
4368 - }
4369 -
4370 - if (!found_one) {
4371 - for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++)
4372 - nfsd_program.pg_vers[i] = nfsd_version[i];
4373 -#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
4374 - for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++)
4375 - nfsd_acl_program.pg_vers[i] =
4376 - nfsd_acl_version[i];
4377 -#endif
4378 - }
4379 + for (i = 0; i < NFSD_NRVERS; i++)
4380 + if (nfsd_vers(i, NFSD_TEST))
4381 + return;
4382 +
4383 + for (i = 0; i < NFSD_NRVERS; i++)
4384 + if (i != 4)
4385 + nfsd_vers(i, NFSD_SET);
4386 + else {
4387 + int minor = 0;
4388 + while (nfsd_minorversion(minor, NFSD_SET) >= 0)
4389 + minor++;
4390 + }
4391 }
4392
4393 /*
4394 diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
4395 index 15f327bed8c6..7340c36978a3 100644
4396 --- a/fs/proc/proc_tty.c
4397 +++ b/fs/proc/proc_tty.c
4398 @@ -14,6 +14,7 @@
4399 #include <linux/tty.h>
4400 #include <linux/seq_file.h>
4401 #include <linux/bitops.h>
4402 +#include "internal.h"
4403
4404 /*
4405 * The /proc/tty directory inodes...
4406 @@ -164,7 +165,7 @@ void proc_tty_unregister_driver(struct tty_driver *driver)
4407 if (!ent)
4408 return;
4409
4410 - remove_proc_entry(driver->driver_name, proc_tty_driver);
4411 + remove_proc_entry(ent->name, proc_tty_driver);
4412
4413 driver->proc_entry = NULL;
4414 }
4415 diff --git a/fs/udf/super.c b/fs/udf/super.c
4416 index 4942549e7dc8..4b1f6d5372c3 100644
4417 --- a/fs/udf/super.c
4418 +++ b/fs/udf/super.c
4419 @@ -710,7 +710,7 @@ static loff_t udf_check_vsd(struct super_block *sb)
4420 else
4421 sectorsize = sb->s_blocksize;
4422
4423 - sector += (sbi->s_session << sb->s_blocksize_bits);
4424 + sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
4425
4426 udf_debug("Starting at sector %u (%ld byte sectors)\n",
4427 (unsigned int)(sector >> sb->s_blocksize_bits),
4428 diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
4429 index b86054cc41db..784d667475ae 100644
4430 --- a/fs/userfaultfd.c
4431 +++ b/fs/userfaultfd.c
4432 @@ -419,7 +419,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
4433 * in such case.
4434 */
4435 down_read(&mm->mmap_sem);
4436 - ret = 0;
4437 + ret = VM_FAULT_NOPAGE;
4438 }
4439 }
4440
4441 diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
4442 index 7eb99701054f..8ad65d43b65d 100644
4443 --- a/fs/xfs/libxfs/xfs_bmap.c
4444 +++ b/fs/xfs/libxfs/xfs_bmap.c
4445 @@ -2713,7 +2713,7 @@ xfs_bmap_add_extent_unwritten_real(
4446 &i)))
4447 goto done;
4448 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
4449 - cur->bc_rec.b.br_state = XFS_EXT_NORM;
4450 + cur->bc_rec.b.br_state = new->br_state;
4451 if ((error = xfs_btree_insert(cur, &i)))
4452 goto done;
4453 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4454 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
4455 index 5b81f7f41b80..33c389934238 100644
4456 --- a/fs/xfs/xfs_iops.c
4457 +++ b/fs/xfs/xfs_iops.c
4458 @@ -870,22 +870,6 @@ xfs_setattr_size(
4459 if (error)
4460 return error;
4461
4462 - /*
4463 - * We are going to log the inode size change in this transaction so
4464 - * any previous writes that are beyond the on disk EOF and the new
4465 - * EOF that have not been written out need to be written here. If we
4466 - * do not write the data out, we expose ourselves to the null files
4467 - * problem. Note that this includes any block zeroing we did above;
4468 - * otherwise those blocks may not be zeroed after a crash.
4469 - */
4470 - if (did_zeroing ||
4471 - (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) {
4472 - error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
4473 - ip->i_d.di_size, newsize);
4474 - if (error)
4475 - return error;
4476 - }
4477 -
4478 /*
4479 * We've already locked out new page faults, so now we can safely remove
4480 * pages from the page cache knowing they won't get refaulted until we
4481 @@ -902,9 +886,29 @@ xfs_setattr_size(
4482 * user visible changes). There's not much we can do about this, except
4483 * to hope that the caller sees ENOMEM and retries the truncate
4484 * operation.
4485 + *
4486 + * And we update in-core i_size and truncate page cache beyond newsize
4487 + * before writeback the [di_size, newsize] range, so we're guaranteed
4488 + * not to write stale data past the new EOF on truncate down.
4489 */
4490 truncate_setsize(inode, newsize);
4491
4492 + /*
4493 + * We are going to log the inode size change in this transaction so
4494 + * any previous writes that are beyond the on disk EOF and the new
4495 + * EOF that have not been written out need to be written here. If we
4496 + * do not write the data out, we expose ourselves to the null files
4497 + * problem. Note that this includes any block zeroing we did above;
4498 + * otherwise those blocks may not be zeroed after a crash.
4499 + */
4500 + if (did_zeroing ||
4501 + (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) {
4502 + error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
4503 + ip->i_d.di_size, newsize - 1);
4504 + if (error)
4505 + return error;
4506 + }
4507 +
4508 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
4509 if (error)
4510 return error;
4511 diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
4512 index 05909269f973..1e26f4504eed 100644
4513 --- a/fs/xfs/xfs_log_recover.c
4514 +++ b/fs/xfs/xfs_log_recover.c
4515 @@ -753,7 +753,7 @@ xlog_find_head(
4516 * in the in-core log. The following number can be made tighter if
4517 * we actually look at the block size of the filesystem.
4518 */
4519 - num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
4520 + num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
4521 if (head_blk >= num_scan_bblks) {
4522 /*
4523 * We are guaranteed that the entire check can be performed
4524 diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
4525 index f6d9af3efa45..cac57358f7af 100644
4526 --- a/include/crypto/internal/hash.h
4527 +++ b/include/crypto/internal/hash.h
4528 @@ -80,6 +80,14 @@ int ahash_register_instance(struct crypto_template *tmpl,
4529 struct ahash_instance *inst);
4530 void ahash_free_instance(struct crypto_instance *inst);
4531
4532 +int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
4533 + unsigned int keylen);
4534 +
4535 +static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
4536 +{
4537 + return alg->setkey != shash_no_setkey;
4538 +}
4539 +
4540 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
4541 struct hash_alg_common *alg,
4542 struct crypto_instance *inst);
4543 diff --git a/include/linux/acpi.h b/include/linux/acpi.h
4544 index 61a3d90f32b3..ca2b4c4aec42 100644
4545 --- a/include/linux/acpi.h
4546 +++ b/include/linux/acpi.h
4547 @@ -276,11 +276,8 @@ bool acpi_processor_validate_proc_id(int proc_id);
4548 /* Arch dependent functions for cpu hotplug support */
4549 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
4550 int acpi_unmap_cpu(int cpu);
4551 -int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
4552 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
4553
4554 -void acpi_set_processor_mapping(void);
4555 -
4556 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
4557 int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
4558 #endif
4559 diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
4560 index 80faf44b8887..dd1b009106a5 100644
4561 --- a/include/linux/mlx4/device.h
4562 +++ b/include/linux/mlx4/device.h
4563 @@ -476,6 +476,7 @@ enum {
4564 enum {
4565 MLX4_INTERFACE_STATE_UP = 1 << 0,
4566 MLX4_INTERFACE_STATE_DELETION = 1 << 1,
4567 + MLX4_INTERFACE_STATE_NOWAIT = 1 << 2,
4568 };
4569
4570 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
4571 diff --git a/include/linux/mman.h b/include/linux/mman.h
4572 index 634c4c51fe3a..c540001ca861 100644
4573 --- a/include/linux/mman.h
4574 +++ b/include/linux/mman.h
4575 @@ -63,8 +63,9 @@ static inline bool arch_validate_prot(unsigned long prot)
4576 * ("bit1" and "bit2" must be single bits)
4577 */
4578 #define _calc_vm_trans(x, bit1, bit2) \
4579 + ((!(bit1) || !(bit2)) ? 0 : \
4580 ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
4581 - : ((x) & (bit1)) / ((bit1) / (bit2)))
4582 + : ((x) & (bit1)) / ((bit1) / (bit2))))
4583
4584 /*
4585 * Combine the mmap "prot" argument into "vm_flags" used internally.
4586 diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
4587 index 1beab5532035..818a38f99221 100644
4588 --- a/include/rdma/ib_addr.h
4589 +++ b/include/rdma/ib_addr.h
4590 @@ -243,10 +243,11 @@ static inline void rdma_addr_set_dgid(struct rdma_dev_addr *dev_addr, union ib_g
4591 static inline enum ib_mtu iboe_get_mtu(int mtu)
4592 {
4593 /*
4594 - * reduce IB headers from effective IBoE MTU. 28 stands for
4595 - * atomic header which is the biggest possible header after BTH
4596 + * Reduce IB headers from effective IBoE MTU.
4597 */
4598 - mtu = mtu - IB_GRH_BYTES - IB_BTH_BYTES - 28;
4599 + mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES +
4600 + IB_EXT_XRC_BYTES + IB_EXT_ATOMICETH_BYTES +
4601 + IB_ICRC_BYTES);
4602
4603 if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096))
4604 return IB_MTU_4096;
4605 diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
4606 index b13419ce99ff..e02b78a38eba 100644
4607 --- a/include/rdma/ib_pack.h
4608 +++ b/include/rdma/ib_pack.h
4609 @@ -37,14 +37,17 @@
4610 #include <uapi/linux/if_ether.h>
4611
4612 enum {
4613 - IB_LRH_BYTES = 8,
4614 - IB_ETH_BYTES = 14,
4615 - IB_VLAN_BYTES = 4,
4616 - IB_GRH_BYTES = 40,
4617 - IB_IP4_BYTES = 20,
4618 - IB_UDP_BYTES = 8,
4619 - IB_BTH_BYTES = 12,
4620 - IB_DETH_BYTES = 8
4621 + IB_LRH_BYTES = 8,
4622 + IB_ETH_BYTES = 14,
4623 + IB_VLAN_BYTES = 4,
4624 + IB_GRH_BYTES = 40,
4625 + IB_IP4_BYTES = 20,
4626 + IB_UDP_BYTES = 8,
4627 + IB_BTH_BYTES = 12,
4628 + IB_DETH_BYTES = 8,
4629 + IB_EXT_ATOMICETH_BYTES = 28,
4630 + IB_EXT_XRC_BYTES = 4,
4631 + IB_ICRC_BYTES = 4
4632 };
4633
4634 struct ib_field {
4635 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
4636 index a87e8940fe57..eb3b23b6ec54 100644
4637 --- a/include/target/target_core_base.h
4638 +++ b/include/target/target_core_base.h
4639 @@ -297,7 +297,7 @@ struct t10_alua_tg_pt_gp {
4640 struct list_head tg_pt_gp_lun_list;
4641 struct se_lun *tg_pt_gp_alua_lun;
4642 struct se_node_acl *tg_pt_gp_alua_nacl;
4643 - struct delayed_work tg_pt_gp_transition_work;
4644 + struct work_struct tg_pt_gp_transition_work;
4645 struct completion *tg_pt_gp_transition_complete;
4646 };
4647
4648 diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
4649 index ab1dadba9923..33c603dd7cd3 100644
4650 --- a/include/uapi/linux/usb/ch9.h
4651 +++ b/include/uapi/linux/usb/ch9.h
4652 @@ -423,6 +423,11 @@ struct usb_endpoint_descriptor {
4653 #define USB_ENDPOINT_XFER_INT 3
4654 #define USB_ENDPOINT_MAX_ADJUSTABLE 0x80
4655
4656 +#define USB_EP_MAXP_MULT_SHIFT 11
4657 +#define USB_EP_MAXP_MULT_MASK (3 << USB_EP_MAXP_MULT_SHIFT)
4658 +#define USB_EP_MAXP_MULT(m) \
4659 + (((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
4660 +
4661 /* The USB 3.0 spec redefines bits 5:4 of bmAttributes as interrupt ep type. */
4662 #define USB_ENDPOINT_INTRTYPE 0x30
4663 #define USB_ENDPOINT_INTR_PERIODIC (0 << 4)
4664 @@ -630,6 +635,20 @@ static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd)
4665 return __le16_to_cpu(epd->wMaxPacketSize);
4666 }
4667
4668 +/**
4669 + * usb_endpoint_maxp_mult - get endpoint's transactional opportunities
4670 + * @epd: endpoint to be checked
4671 + *
4672 + * Return @epd's wMaxPacketSize[12:11] + 1
4673 + */
4674 +static inline int
4675 +usb_endpoint_maxp_mult(const struct usb_endpoint_descriptor *epd)
4676 +{
4677 + int maxp = __le16_to_cpu(epd->wMaxPacketSize);
4678 +
4679 + return USB_EP_MAXP_MULT(maxp) + 1;
4680 +}
4681 +
4682 static inline int usb_endpoint_interrupt_type(
4683 const struct usb_endpoint_descriptor *epd)
4684 {
4685 diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
4686 index c95c5122b105..df5c32a0c6ed 100644
4687 --- a/kernel/sched/deadline.c
4688 +++ b/kernel/sched/deadline.c
4689 @@ -445,13 +445,13 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
4690 *
4691 * This function returns true if:
4692 *
4693 - * runtime / (deadline - t) > dl_runtime / dl_period ,
4694 + * runtime / (deadline - t) > dl_runtime / dl_deadline ,
4695 *
4696 * IOW we can't recycle current parameters.
4697 *
4698 - * Notice that the bandwidth check is done against the period. For
4699 + * Notice that the bandwidth check is done against the deadline. For
4700 * task with deadline equal to period this is the same of using
4701 - * dl_deadline instead of dl_period in the equation above.
4702 + * dl_period instead of dl_deadline in the equation above.
4703 */
4704 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
4705 struct sched_dl_entity *pi_se, u64 t)
4706 @@ -476,7 +476,7 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
4707 * of anything below microseconds resolution is actually fiction
4708 * (but still we want to give the user that illusion >;).
4709 */
4710 - left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
4711 + left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
4712 right = ((dl_se->deadline - t) >> DL_SCALE) *
4713 (pi_se->dl_runtime >> DL_SCALE);
4714
4715 @@ -505,10 +505,15 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
4716 }
4717 }
4718
4719 +static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
4720 +{
4721 + return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
4722 +}
4723 +
4724 /*
4725 * If the entity depleted all its runtime, and if we want it to sleep
4726 * while waiting for some new execution time to become available, we
4727 - * set the bandwidth enforcement timer to the replenishment instant
4728 + * set the bandwidth replenishment timer to the replenishment instant
4729 * and try to activate it.
4730 *
4731 * Notice that it is important for the caller to know if the timer
4732 @@ -530,7 +535,7 @@ static int start_dl_timer(struct task_struct *p)
4733 * that it is actually coming from rq->clock and not from
4734 * hrtimer's time base reading.
4735 */
4736 - act = ns_to_ktime(dl_se->deadline);
4737 + act = ns_to_ktime(dl_next_period(dl_se));
4738 now = hrtimer_cb_get_time(timer);
4739 delta = ktime_to_ns(now) - rq_clock(rq);
4740 act = ktime_add_ns(act, delta);
4741 @@ -638,6 +643,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
4742 lockdep_unpin_lock(&rq->lock, rf.cookie);
4743 rq = dl_task_offline_migration(rq, p);
4744 rf.cookie = lockdep_pin_lock(&rq->lock);
4745 + update_rq_clock(rq);
4746
4747 /*
4748 * Now that the task has been migrated to the new RQ and we
4749 @@ -689,6 +695,37 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
4750 timer->function = dl_task_timer;
4751 }
4752
4753 +/*
4754 + * During the activation, CBS checks if it can reuse the current task's
4755 + * runtime and period. If the deadline of the task is in the past, CBS
4756 + * cannot use the runtime, and so it replenishes the task. This rule
4757 + * works fine for implicit deadline tasks (deadline == period), and the
4758 + * CBS was designed for implicit deadline tasks. However, a task with
4759 + * constrained deadline (deadine < period) might be awakened after the
4760 + * deadline, but before the next period. In this case, replenishing the
4761 + * task would allow it to run for runtime / deadline. As in this case
4762 + * deadline < period, CBS enables a task to run for more than the
4763 + * runtime / period. In a very loaded system, this can cause a domino
4764 + * effect, making other tasks miss their deadlines.
4765 + *
4766 + * To avoid this problem, in the activation of a constrained deadline
4767 + * task after the deadline but before the next period, throttle the
4768 + * task and set the replenishing timer to the begin of the next period,
4769 + * unless it is boosted.
4770 + */
4771 +static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
4772 +{
4773 + struct task_struct *p = dl_task_of(dl_se);
4774 + struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
4775 +
4776 + if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
4777 + dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
4778 + if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
4779 + return;
4780 + dl_se->dl_throttled = 1;
4781 + }
4782 +}
4783 +
4784 static
4785 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
4786 {
4787 @@ -922,6 +959,11 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
4788 __dequeue_dl_entity(dl_se);
4789 }
4790
4791 +static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
4792 +{
4793 + return dl_se->dl_deadline < dl_se->dl_period;
4794 +}
4795 +
4796 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
4797 {
4798 struct task_struct *pi_task = rt_mutex_get_top_task(p);
4799 @@ -947,6 +989,15 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
4800 return;
4801 }
4802
4803 + /*
4804 + * Check if a constrained deadline task was activated
4805 + * after the deadline but before the next period.
4806 + * If that is the case, the task will be throttled and
4807 + * the replenishment timer will be set to the next period.
4808 + */
4809 + if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
4810 + dl_check_constrained_dl(&p->dl);
4811 +
4812 /*
4813 * If p is throttled, we do nothing. In fact, if it exhausted
4814 * its budget it needs a replenishment and, since it now is on
4815 diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
4816 index 9c131168d933..7a360d6f6798 100644
4817 --- a/kernel/sched/rt.c
4818 +++ b/kernel/sched/rt.c
4819 @@ -2022,8 +2022,9 @@ static void pull_rt_task(struct rq *this_rq)
4820 bool resched = false;
4821 struct task_struct *p;
4822 struct rq *src_rq;
4823 + int rt_overload_count = rt_overloaded(this_rq);
4824
4825 - if (likely(!rt_overloaded(this_rq)))
4826 + if (likely(!rt_overload_count))
4827 return;
4828
4829 /*
4830 @@ -2032,6 +2033,11 @@ static void pull_rt_task(struct rq *this_rq)
4831 */
4832 smp_rmb();
4833
4834 + /* If we are the only overloaded CPU do nothing */
4835 + if (rt_overload_count == 1 &&
4836 + cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
4837 + return;
4838 +
4839 #ifdef HAVE_RT_PUSH_IPI
4840 if (sched_feat(RT_PUSH_IPI)) {
4841 tell_cpu_to_push(this_rq);
4842 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4843 index c1e50cc0d7b0..4214cd960b8e 100644
4844 --- a/kernel/trace/trace.c
4845 +++ b/kernel/trace/trace.c
4846 @@ -3727,37 +3727,30 @@ static const struct file_operations show_traces_fops = {
4847 .llseek = seq_lseek,
4848 };
4849
4850 -/*
4851 - * The tracer itself will not take this lock, but still we want
4852 - * to provide a consistent cpumask to user-space:
4853 - */
4854 -static DEFINE_MUTEX(tracing_cpumask_update_lock);
4855 -
4856 -/*
4857 - * Temporary storage for the character representation of the
4858 - * CPU bitmask (and one more byte for the newline):
4859 - */
4860 -static char mask_str[NR_CPUS + 1];
4861 -
4862 static ssize_t
4863 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4864 size_t count, loff_t *ppos)
4865 {
4866 struct trace_array *tr = file_inode(filp)->i_private;
4867 + char *mask_str;
4868 int len;
4869
4870 - mutex_lock(&tracing_cpumask_update_lock);
4871 + len = snprintf(NULL, 0, "%*pb\n",
4872 + cpumask_pr_args(tr->tracing_cpumask)) + 1;
4873 + mask_str = kmalloc(len, GFP_KERNEL);
4874 + if (!mask_str)
4875 + return -ENOMEM;
4876
4877 - len = snprintf(mask_str, count, "%*pb\n",
4878 + len = snprintf(mask_str, len, "%*pb\n",
4879 cpumask_pr_args(tr->tracing_cpumask));
4880 if (len >= count) {
4881 count = -EINVAL;
4882 goto out_err;
4883 }
4884 - count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
4885 + count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4886
4887 out_err:
4888 - mutex_unlock(&tracing_cpumask_update_lock);
4889 + kfree(mask_str);
4890
4891 return count;
4892 }
4893 @@ -3777,8 +3770,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4894 if (err)
4895 goto err_unlock;
4896
4897 - mutex_lock(&tracing_cpumask_update_lock);
4898 -
4899 local_irq_disable();
4900 arch_spin_lock(&tr->max_lock);
4901 for_each_tracing_cpu(cpu) {
4902 @@ -3801,8 +3792,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4903 local_irq_enable();
4904
4905 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4906 -
4907 - mutex_unlock(&tracing_cpumask_update_lock);
4908 free_cpumask_var(tracing_cpumask_new);
4909
4910 return count;
4911 diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
4912 index aa1df1a10dd7..82ce5713f744 100644
4913 --- a/net/bridge/br_netfilter_hooks.c
4914 +++ b/net/bridge/br_netfilter_hooks.c
4915 @@ -706,18 +706,20 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
4916
4917 static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
4918 {
4919 - struct nf_bridge_info *nf_bridge;
4920 - unsigned int mtu_reserved;
4921 + struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
4922 + unsigned int mtu, mtu_reserved;
4923
4924 mtu_reserved = nf_bridge_mtu_reduction(skb);
4925 + mtu = skb->dev->mtu;
4926 +
4927 + if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
4928 + mtu = nf_bridge->frag_max_size;
4929
4930 - if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) {
4931 + if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
4932 nf_bridge_info_free(skb);
4933 return br_dev_queue_push_xmit(net, sk, skb);
4934 }
4935
4936 - nf_bridge = nf_bridge_info_get(skb);
4937 -
4938 /* This is wrong! We should preserve the original fragment
4939 * boundaries by preserving frag_list rather than refragmenting.
4940 */
4941 diff --git a/net/core/dev.c b/net/core/dev.c
4942 index c37891828e4e..09007a71c8dd 100644
4943 --- a/net/core/dev.c
4944 +++ b/net/core/dev.c
4945 @@ -1304,6 +1304,7 @@ void netdev_notify_peers(struct net_device *dev)
4946 {
4947 rtnl_lock();
4948 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
4949 + call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
4950 rtnl_unlock();
4951 }
4952 EXPORT_SYMBOL(netdev_notify_peers);
4953 diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
4954 index 48734ee6293f..31f17f0bbd1c 100644
4955 --- a/net/ipv4/icmp.c
4956 +++ b/net/ipv4/icmp.c
4957 @@ -766,7 +766,7 @@ static bool icmp_tag_validation(int proto)
4958 }
4959
4960 /*
4961 - * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, ICMP_QUENCH, and
4962 + * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEEDED, ICMP_QUENCH, and
4963 * ICMP_PARAMETERPROB.
4964 */
4965
4966 @@ -794,7 +794,8 @@ static bool icmp_unreach(struct sk_buff *skb)
4967 if (iph->ihl < 5) /* Mangled header, drop. */
4968 goto out_err;
4969
4970 - if (icmph->type == ICMP_DEST_UNREACH) {
4971 + switch (icmph->type) {
4972 + case ICMP_DEST_UNREACH:
4973 switch (icmph->code & 15) {
4974 case ICMP_NET_UNREACH:
4975 case ICMP_HOST_UNREACH:
4976 @@ -830,8 +831,16 @@ static bool icmp_unreach(struct sk_buff *skb)
4977 }
4978 if (icmph->code > NR_ICMP_UNREACH)
4979 goto out;
4980 - } else if (icmph->type == ICMP_PARAMETERPROB)
4981 + break;
4982 + case ICMP_PARAMETERPROB:
4983 info = ntohl(icmph->un.gateway) >> 24;
4984 + break;
4985 + case ICMP_TIME_EXCEEDED:
4986 + __ICMP_INC_STATS(net, ICMP_MIB_INTIMEEXCDS);
4987 + if (icmph->code == ICMP_EXC_FRAGTIME)
4988 + goto out;
4989 + break;
4990 + }
4991
4992 /*
4993 * Throw it at our lower layers
4994 diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
4995 index b06acd0f400d..cfc4dd8997e5 100644
4996 --- a/net/l2tp/l2tp_core.c
4997 +++ b/net/l2tp/l2tp_core.c
4998 @@ -1944,7 +1944,7 @@ static __net_exit void l2tp_exit_net(struct net *net)
4999
5000 rcu_read_lock_bh();
5001 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
5002 - (void)l2tp_tunnel_delete(tunnel);
5003 + l2tp_tunnel_delete(tunnel);
5004 }
5005 rcu_read_unlock_bh();
5006
5007 diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
5008 index 1ccd310d01a5..ee03bc866d1b 100644
5009 --- a/net/l2tp/l2tp_netlink.c
5010 +++ b/net/l2tp/l2tp_netlink.c
5011 @@ -287,7 +287,7 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
5012 l2tp_tunnel_notify(&l2tp_nl_family, info,
5013 tunnel, L2TP_CMD_TUNNEL_DELETE);
5014
5015 - (void) l2tp_tunnel_delete(tunnel);
5016 + l2tp_tunnel_delete(tunnel);
5017
5018 out:
5019 return ret;
5020 diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
5021 index 5c67a696e046..b4b3fe078868 100644
5022 --- a/net/mac80211/mesh.c
5023 +++ b/net/mac80211/mesh.c
5024 @@ -279,8 +279,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
5025 /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
5026 *pos |= ifmsh->ps_peers_deep_sleep ?
5027 IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
5028 - *pos++ = 0x00;
5029 -
5030 return 0;
5031 }
5032
5033 diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
5034 index 1309e2c34764..c5a5a6959c1b 100644
5035 --- a/net/mpls/af_mpls.c
5036 +++ b/net/mpls/af_mpls.c
5037 @@ -937,6 +937,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
5038 {
5039 struct mpls_route __rcu **platform_label;
5040 struct net *net = dev_net(dev);
5041 + unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN;
5042 + unsigned int alive;
5043 unsigned index;
5044
5045 platform_label = rtnl_dereference(net->mpls.platform_label);
5046 @@ -946,9 +948,11 @@ static void mpls_ifdown(struct net_device *dev, int event)
5047 if (!rt)
5048 continue;
5049
5050 + alive = 0;
5051 change_nexthops(rt) {
5052 if (rtnl_dereference(nh->nh_dev) != dev)
5053 - continue;
5054 + goto next;
5055 +
5056 switch (event) {
5057 case NETDEV_DOWN:
5058 case NETDEV_UNREGISTER:
5059 @@ -956,13 +960,16 @@ static void mpls_ifdown(struct net_device *dev, int event)
5060 /* fall through */
5061 case NETDEV_CHANGE:
5062 nh->nh_flags |= RTNH_F_LINKDOWN;
5063 - if (event != NETDEV_UNREGISTER)
5064 - ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
5065 break;
5066 }
5067 if (event == NETDEV_UNREGISTER)
5068 RCU_INIT_POINTER(nh->nh_dev, NULL);
5069 +next:
5070 + if (!(nh->nh_flags & nh_flags))
5071 + alive++;
5072 } endfor_nexthops(rt);
5073 +
5074 + WRITE_ONCE(rt->rt_nhn_alive, alive);
5075 }
5076 }
5077
5078 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
5079 index a6e44ef2ec9a..2155c2498aed 100644
5080 --- a/net/netfilter/ipvs/ip_vs_ctl.c
5081 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
5082 @@ -2040,12 +2040,16 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
5083 seq_puts(seq,
5084 " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n");
5085 } else {
5086 + struct net *net = seq_file_net(seq);
5087 + struct netns_ipvs *ipvs = net_ipvs(net);
5088 const struct ip_vs_service *svc = v;
5089 const struct ip_vs_iter *iter = seq->private;
5090 const struct ip_vs_dest *dest;
5091 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
5092 char *sched_name = sched ? sched->name : "none";
5093
5094 + if (svc->ipvs != ipvs)
5095 + return 0;
5096 if (iter->table == ip_vs_svc_table) {
5097 #ifdef CONFIG_IP_VS_IPV6
5098 if (svc->af == AF_INET6)
5099 diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
5100 index 3f9d8d7ec632..b099b64366f3 100644
5101 --- a/net/rxrpc/conn_event.c
5102 +++ b/net/rxrpc/conn_event.c
5103 @@ -275,6 +275,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
5104 rxrpc_conn_retransmit_call(conn, skb);
5105 return 0;
5106
5107 + case RXRPC_PACKET_TYPE_BUSY:
5108 + /* Just ignore BUSY packets for now. */
5109 + return 0;
5110 +
5111 case RXRPC_PACKET_TYPE_ABORT:
5112 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
5113 &wtmp, sizeof(wtmp)) < 0)
5114 diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
5115 index 44fb8d893c7d..1060d14d4e6a 100644
5116 --- a/net/rxrpc/input.c
5117 +++ b/net/rxrpc/input.c
5118 @@ -649,6 +649,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
5119 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
5120 struct rxrpc_peer *peer;
5121 unsigned int mtu;
5122 + bool wake = false;
5123 u32 rwind = ntohl(ackinfo->rwind);
5124
5125 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
5126 @@ -656,9 +657,14 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
5127 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
5128 rwind, ntohl(ackinfo->jumbo_max));
5129
5130 - if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
5131 - rwind = RXRPC_RXTX_BUFF_SIZE - 1;
5132 - call->tx_winsize = rwind;
5133 + if (call->tx_winsize != rwind) {
5134 + if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
5135 + rwind = RXRPC_RXTX_BUFF_SIZE - 1;
5136 + if (rwind > call->tx_winsize)
5137 + wake = true;
5138 + call->tx_winsize = rwind;
5139 + }
5140 +
5141 if (call->cong_ssthresh > rwind)
5142 call->cong_ssthresh = rwind;
5143
5144 @@ -672,6 +678,9 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
5145 spin_unlock_bh(&peer->lock);
5146 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
5147 }
5148 +
5149 + if (wake)
5150 + wake_up(&call->waitq);
5151 }
5152
5153 /*
5154 diff --git a/net/socket.c b/net/socket.c
5155 index 6bbccf05854f..05f13b24572c 100644
5156 --- a/net/socket.c
5157 +++ b/net/socket.c
5158 @@ -1702,6 +1702,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
5159 /* We assume all kernel code knows the size of sockaddr_storage */
5160 msg.msg_namelen = 0;
5161 msg.msg_iocb = NULL;
5162 + msg.msg_flags = 0;
5163 if (sock->file->f_flags & O_NONBLOCK)
5164 flags |= MSG_DONTWAIT;
5165 err = sock_recvmsg(sock, &msg, flags);
5166 diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
5167 index ea162fbf68e5..d5adc04bb724 100644
5168 --- a/sound/soc/intel/skylake/skl-sst-utils.c
5169 +++ b/sound/soc/intel/skylake/skl-sst-utils.c
5170 @@ -295,6 +295,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
5171 struct uuid_module *module;
5172 struct firmware stripped_fw;
5173 unsigned int safe_file;
5174 + int ret = 0;
5175
5176 /* Get the FW pointer to derive ADSP header */
5177 stripped_fw.data = fw->data;
5178 @@ -343,8 +344,10 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
5179
5180 for (i = 0; i < num_entry; i++, mod_entry++) {
5181 module = kzalloc(sizeof(*module), GFP_KERNEL);
5182 - if (!module)
5183 - return -ENOMEM;
5184 + if (!module) {
5185 + ret = -ENOMEM;
5186 + goto free_uuid_list;
5187 + }
5188
5189 uuid_bin = (uuid_le *)mod_entry->uuid.id;
5190 memcpy(&module->uuid, uuid_bin, sizeof(module->uuid));
5191 @@ -355,8 +358,8 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
5192 size = sizeof(int) * mod_entry->instance_max_count;
5193 module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
5194 if (!module->instance_id) {
5195 - kfree(module);
5196 - return -ENOMEM;
5197 + ret = -ENOMEM;
5198 + goto free_uuid_list;
5199 }
5200
5201 list_add_tail(&module->list, &skl->uuid_list);
5202 @@ -367,6 +370,10 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
5203 }
5204
5205 return 0;
5206 +
5207 +free_uuid_list:
5208 + skl_freeup_uuid_list(skl);
5209 + return ret;
5210 }
5211
5212 void skl_freeup_uuid_list(struct skl_sst *ctx)
5213 diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
5214 index abb5eaac854a..7d92a24b7cfa 100644
5215 --- a/sound/soc/sh/rcar/cmd.c
5216 +++ b/sound/soc/sh/rcar/cmd.c
5217 @@ -31,23 +31,24 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
5218 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
5219 struct device *dev = rsnd_priv_to_dev(priv);
5220 u32 data;
5221 + u32 path[] = {
5222 + [1] = 1 << 0,
5223 + [5] = 1 << 8,
5224 + [6] = 1 << 12,
5225 + [9] = 1 << 15,
5226 + };
5227
5228 if (!mix && !dvc)
5229 return 0;
5230
5231 + if (ARRAY_SIZE(path) < rsnd_mod_id(mod) + 1)
5232 + return -ENXIO;
5233 +
5234 if (mix) {
5235 struct rsnd_dai *rdai;
5236 struct rsnd_mod *src;
5237 struct rsnd_dai_stream *tio;
5238 int i;
5239 - u32 path[] = {
5240 - [0] = 0,
5241 - [1] = 1 << 0,
5242 - [2] = 0,
5243 - [3] = 0,
5244 - [4] = 0,
5245 - [5] = 1 << 8
5246 - };
5247
5248 /*
5249 * it is assuming that integrater is well understanding about
5250 @@ -70,16 +71,19 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
5251 } else {
5252 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
5253
5254 - u32 path[] = {
5255 - [0] = 0x30000,
5256 - [1] = 0x30001,
5257 - [2] = 0x40000,
5258 - [3] = 0x10000,
5259 - [4] = 0x20000,
5260 - [5] = 0x40100
5261 + u8 cmd_case[] = {
5262 + [0] = 0x3,
5263 + [1] = 0x3,
5264 + [2] = 0x4,
5265 + [3] = 0x1,
5266 + [4] = 0x2,
5267 + [5] = 0x4,
5268 + [6] = 0x1,
5269 + [9] = 0x2,
5270 };
5271
5272 - data = path[rsnd_mod_id(src)];
5273 + data = path[rsnd_mod_id(src)] |
5274 + cmd_case[rsnd_mod_id(src)] << 16;
5275 }
5276
5277 dev_dbg(dev, "ctu/mix path = 0x%08x", data);
5278 diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
5279 index 6bc93cbb3049..edeb74a13c0f 100644
5280 --- a/sound/soc/sh/rcar/dma.c
5281 +++ b/sound/soc/sh/rcar/dma.c
5282 @@ -361,6 +361,20 @@ static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
5283 return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
5284 }
5285
5286 +static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
5287 +{
5288 + struct rsnd_mod *mod = rsnd_mod_get(dma);
5289 + struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
5290 + struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
5291 + volatile void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
5292 + u32 val = ioread32(addr);
5293 +
5294 + val &= ~mask;
5295 + val |= (data & mask);
5296 +
5297 + iowrite32(val, addr);
5298 +}
5299 +
5300 static int rsnd_dmapp_stop(struct rsnd_mod *mod,
5301 struct rsnd_dai_stream *io,
5302 struct rsnd_priv *priv)
5303 @@ -368,10 +382,10 @@ static int rsnd_dmapp_stop(struct rsnd_mod *mod,
5304 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
5305 int i;
5306
5307 - rsnd_dmapp_write(dma, 0, PDMACHCR);
5308 + rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
5309
5310 for (i = 0; i < 1024; i++) {
5311 - if (0 == rsnd_dmapp_read(dma, PDMACHCR))
5312 + if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
5313 return 0;
5314 udelay(1);
5315 }
5316 diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
5317 index 6cb6db005fc4..560cf4b51a99 100644
5318 --- a/sound/soc/sh/rcar/ssi.c
5319 +++ b/sound/soc/sh/rcar/ssi.c
5320 @@ -172,10 +172,15 @@ static u32 rsnd_ssi_run_mods(struct rsnd_dai_stream *io)
5321 {
5322 struct rsnd_mod *ssi_mod = rsnd_io_to_mod_ssi(io);
5323 struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
5324 + u32 mods;
5325
5326 - return rsnd_ssi_multi_slaves_runtime(io) |
5327 - 1 << rsnd_mod_id(ssi_mod) |
5328 - 1 << rsnd_mod_id(ssi_parent_mod);
5329 + mods = rsnd_ssi_multi_slaves_runtime(io) |
5330 + 1 << rsnd_mod_id(ssi_mod);
5331 +
5332 + if (ssi_parent_mod)
5333 + mods |= 1 << rsnd_mod_id(ssi_parent_mod);
5334 +
5335 + return mods;
5336 }
5337
5338 u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io)
5339 diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
5340 index f7b35e178582..f199d5b11d76 100644
5341 --- a/tools/perf/util/symbol.c
5342 +++ b/tools/perf/util/symbol.c
5343 @@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols)
5344
5345 /* Last entry */
5346 if (curr->end == curr->start)
5347 - curr->end = roundup(curr->start, 4096);
5348 + curr->end = roundup(curr->start, 4096) + 4096;
5349 }
5350
5351 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
5352 diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
5353 index bbab7f4664ac..d116a19477a7 100644
5354 --- a/tools/testing/selftests/vm/Makefile
5355 +++ b/tools/testing/selftests/vm/Makefile
5356 @@ -1,5 +1,9 @@
5357 # Makefile for vm selftests
5358
5359 +ifndef OUTPUT
5360 + OUTPUT := $(shell pwd)
5361 +endif
5362 +
5363 CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
5364 BINARIES = compaction_test
5365 BINARIES += hugepage-mmap