Magellan Linux

Contents of /trunk/kernel-alx/patches-4.4/0122-4.4.23-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2834 - (show annotations) (download)
Thu Oct 13 09:54:05 2016 UTC (7 years, 6 months ago) by niro
File size: 94867 byte(s)
-linux-4.4.23
1 diff --git a/Makefile b/Makefile
2 index a6512f4eec9f..95421b688f23 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 4
8 -SUBLEVEL = 22
9 +SUBLEVEL = 23
10 EXTRAVERSION =
11 NAME = Blurry Fish Butt
12
13 @@ -128,6 +128,10 @@ _all:
14 # Cancel implicit rules on top Makefile
15 $(CURDIR)/Makefile Makefile: ;
16
17 +ifneq ($(words $(subst :, ,$(CURDIR))), 1)
18 + $(error main directory cannot contain spaces nor colons)
19 +endif
20 +
21 ifneq ($(KBUILD_OUTPUT),)
22 # Invoke a second make in the output directory, passing relevant variables
23 # check that the output directory actually exists
24 @@ -495,6 +499,12 @@ ifeq ($(KBUILD_EXTMOD),)
25 endif
26 endif
27 endif
28 +# install and module_install need also be processed one by one
29 +ifneq ($(filter install,$(MAKECMDGOALS)),)
30 + ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
31 + mixed-targets := 1
32 + endif
33 +endif
34
35 ifeq ($(mixed-targets),1)
36 # ===========================================================================
37 @@ -606,11 +616,16 @@ ARCH_CFLAGS :=
38 include arch/$(SRCARCH)/Makefile
39
40 KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
41 +KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
42
43 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
44 -KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
45 +KBUILD_CFLAGS += -Os
46 else
47 +ifdef CONFIG_PROFILE_ALL_BRANCHES
48 KBUILD_CFLAGS += -O2
49 +else
50 +KBUILD_CFLAGS += -O2
51 +endif
52 endif
53
54 # Tell gcc to never replace conditional load with a non-conditional one
55 @@ -1260,7 +1275,7 @@ help:
56 @echo ' firmware_install- Install all firmware to INSTALL_FW_PATH'
57 @echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)'
58 @echo ' dir/ - Build all files in dir and below'
59 - @echo ' dir/file.[oisS] - Build specified target only'
60 + @echo ' dir/file.[ois] - Build specified target only'
61 @echo ' dir/file.lst - Build specified mixed source/assembly target only'
62 @echo ' (requires a recent binutils and recent build (System.map))'
63 @echo ' dir/file.ko - Build module including final link'
64 @@ -1500,11 +1515,11 @@ image_name:
65 # Clear a bunch of variables before executing the submake
66 tools/: FORCE
67 $(Q)mkdir -p $(objtree)/tools
68 - $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/
69 + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/
70
71 tools/%: FORCE
72 $(Q)mkdir -p $(objtree)/tools
73 - $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/ $*
74 + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ $*
75
76 # Single targets
77 # ---------------------------------------------------------------------------
78 diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
79 index b445a5d56f43..593da7ffb449 100644
80 --- a/arch/arm/crypto/aes-ce-glue.c
81 +++ b/arch/arm/crypto/aes-ce-glue.c
82 @@ -279,7 +279,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
83 err = blkcipher_walk_done(desc, &walk,
84 walk.nbytes % AES_BLOCK_SIZE);
85 }
86 - if (nbytes) {
87 + if (walk.nbytes % AES_BLOCK_SIZE) {
88 u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
89 u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
90 u8 __aligned(8) tail[AES_BLOCK_SIZE];
91 diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
92 index f6d02e4cbcda..5c87dff5d46e 100644
93 --- a/arch/arm/mach-pxa/idp.c
94 +++ b/arch/arm/mach-pxa/idp.c
95 @@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = {
96 };
97
98 static struct smc91x_platdata smc91x_platdata = {
99 - .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
100 + .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
101 + SMC91X_USE_DMA | SMC91X_NOWAIT,
102 };
103
104 static struct platform_device smc91x_device = {
105 diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
106 index 13b1d4586d7d..9001312710f7 100644
107 --- a/arch/arm/mach-pxa/xcep.c
108 +++ b/arch/arm/mach-pxa/xcep.c
109 @@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = {
110 };
111
112 static struct smc91x_platdata xcep_smc91x_info = {
113 - .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA,
114 + .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
115 + SMC91X_NOWAIT | SMC91X_USE_DMA,
116 };
117
118 static struct platform_device smc91x_device = {
119 diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
120 index 44575edc44b1..cf0a7c2359f0 100644
121 --- a/arch/arm/mach-realview/core.c
122 +++ b/arch/arm/mach-realview/core.c
123 @@ -95,7 +95,8 @@ static struct smsc911x_platform_config smsc911x_config = {
124 };
125
126 static struct smc91x_platdata smc91x_platdata = {
127 - .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
128 + .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
129 + SMC91X_NOWAIT,
130 };
131
132 static struct platform_device realview_eth_device = {
133 diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
134 index 1525d7b5f1b7..88149f85bc49 100644
135 --- a/arch/arm/mach-sa1100/pleb.c
136 +++ b/arch/arm/mach-sa1100/pleb.c
137 @@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = {
138 };
139
140 static struct smc91x_platdata smc91x_platdata = {
141 - .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
142 + .flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT,
143 };
144
145 static struct platform_device smc91x_device = {
146 diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
147 index 05d9e16c0dfd..6a51dfccfe71 100644
148 --- a/arch/arm64/crypto/aes-glue.c
149 +++ b/arch/arm64/crypto/aes-glue.c
150 @@ -211,7 +211,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
151 err = blkcipher_walk_done(desc, &walk,
152 walk.nbytes % AES_BLOCK_SIZE);
153 }
154 - if (nbytes) {
155 + if (walk.nbytes % AES_BLOCK_SIZE) {
156 u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
157 u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
158 u8 __aligned(8) tail[AES_BLOCK_SIZE];
159 diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
160 index c6db52ba3a06..10c57771822d 100644
161 --- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
162 +++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
163 @@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = {
164 #include <linux/smc91x.h>
165
166 static struct smc91x_platdata smc91x_info = {
167 - .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
168 + .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
169 + SMC91X_NOWAIT,
170 .leda = RPC_LED_100_10,
171 .ledb = RPC_LED_TX_RX,
172 };
173 diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
174 index 2de71e8c104b..93c22468cc14 100644
175 --- a/arch/blackfin/mach-bf561/boards/ezkit.c
176 +++ b/arch/blackfin/mach-bf561/boards/ezkit.c
177 @@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = {
178 #include <linux/smc91x.h>
179
180 static struct smc91x_platdata smc91x_info = {
181 - .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
182 + .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
183 + SMC91X_NOWAIT,
184 .leda = RPC_LED_100_10,
185 .ledb = RPC_LED_TX_RX,
186 };
187 diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
188 index f0e314ceb8ba..7f975b20b20c 100644
189 --- a/arch/mips/Kconfig.debug
190 +++ b/arch/mips/Kconfig.debug
191 @@ -113,42 +113,6 @@ config SPINLOCK_TEST
192 help
193 Add several files to the debugfs to test spinlock speed.
194
195 -if CPU_MIPSR6
196 -
197 -choice
198 - prompt "Compact branch policy"
199 - default MIPS_COMPACT_BRANCHES_OPTIMAL
200 -
201 -config MIPS_COMPACT_BRANCHES_NEVER
202 - bool "Never (force delay slot branches)"
203 - help
204 - Pass the -mcompact-branches=never flag to the compiler in order to
205 - force it to always emit branches with delay slots, and make no use
206 - of the compact branch instructions introduced by MIPSr6. This is
207 - useful if you suspect there may be an issue with compact branches in
208 - either the compiler or the CPU.
209 -
210 -config MIPS_COMPACT_BRANCHES_OPTIMAL
211 - bool "Optimal (use where beneficial)"
212 - help
213 - Pass the -mcompact-branches=optimal flag to the compiler in order for
214 - it to make use of compact branch instructions where it deems them
215 - beneficial, and use branches with delay slots elsewhere. This is the
216 - default compiler behaviour, and should be used unless you have a
217 - reason to choose otherwise.
218 -
219 -config MIPS_COMPACT_BRANCHES_ALWAYS
220 - bool "Always (force compact branches)"
221 - help
222 - Pass the -mcompact-branches=always flag to the compiler in order to
223 - force it to always emit compact branches, making no use of branch
224 - instructions with delay slots. This can result in more compact code
225 - which may be beneficial in some scenarios.
226 -
227 -endchoice
228 -
229 -endif # CPU_MIPSR6
230 -
231 config SCACHE_DEBUGFS
232 bool "L2 cache debugfs entries"
233 depends on DEBUG_FS
234 diff --git a/arch/mips/Makefile b/arch/mips/Makefile
235 index 3f70ba54ae21..252e347958f3 100644
236 --- a/arch/mips/Makefile
237 +++ b/arch/mips/Makefile
238 @@ -204,10 +204,6 @@ toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(
239 cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
240 endif
241
242 -cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never
243 -cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
244 -cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS) += -mcompact-branches=always
245 -
246 #
247 # Firmware support
248 #
249 diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
250 index e689b894353c..8dedee1def83 100644
251 --- a/arch/mips/include/asm/asmmacro.h
252 +++ b/arch/mips/include/asm/asmmacro.h
253 @@ -135,6 +135,7 @@
254 ldc1 $f28, THREAD_FPR28(\thread)
255 ldc1 $f30, THREAD_FPR30(\thread)
256 ctc1 \tmp, fcr31
257 + .set pop
258 .endm
259
260 .macro fpu_restore_16odd thread
261 diff --git a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
262 index 2f82bfa3a773..c9f5769dfc8f 100644
263 --- a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
264 +++ b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
265 @@ -11,11 +11,13 @@
266 #define CP0_EBASE $15, 1
267
268 .macro kernel_entry_setup
269 +#ifdef CONFIG_SMP
270 mfc0 t0, CP0_EBASE
271 andi t0, t0, 0x3ff # CPUNum
272 beqz t0, 1f
273 # CPUs other than zero goto smp_bootstrap
274 j smp_bootstrap
275 +#endif /* CONFIG_SMP */
276
277 1:
278 .endm
279 diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
280 index 4674a74a08b5..af27334d6809 100644
281 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c
282 +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
283 @@ -1164,7 +1164,9 @@ fpu_emul:
284 regs->regs[31] = r31;
285 regs->cp0_epc = epc;
286 if (!used_math()) { /* First time FPU user. */
287 + preempt_disable();
288 err = init_fpu();
289 + preempt_enable();
290 set_used_math();
291 }
292 lose_fpu(1); /* Save FPU state for the emulator. */
293 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
294 index 89847bee2b53..44a6f25e902e 100644
295 --- a/arch/mips/kernel/process.c
296 +++ b/arch/mips/kernel/process.c
297 @@ -593,14 +593,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
298 return -EOPNOTSUPP;
299
300 /* Avoid inadvertently triggering emulation */
301 - if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
302 - !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
303 + if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
304 + !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
305 return -EOPNOTSUPP;
306 - if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
307 + if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
308 return -EOPNOTSUPP;
309
310 /* FR = 0 not supported in MIPS R6 */
311 - if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
312 + if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
313 return -EOPNOTSUPP;
314
315 /* Proceed with the mode switch */
316 diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
317 index 2b521e07b860..7fef02a9eb85 100644
318 --- a/arch/mips/kernel/smp.c
319 +++ b/arch/mips/kernel/smp.c
320 @@ -174,6 +174,9 @@ asmlinkage void start_secondary(void)
321 cpumask_set_cpu(cpu, &cpu_coherent_mask);
322 notify_cpu_starting(cpu);
323
324 + cpumask_set_cpu(cpu, &cpu_callin_map);
325 + synchronise_count_slave(cpu);
326 +
327 set_cpu_online(cpu, true);
328
329 set_cpu_sibling_map(cpu);
330 @@ -181,10 +184,6 @@ asmlinkage void start_secondary(void)
331
332 calculate_cpu_foreign_map();
333
334 - cpumask_set_cpu(cpu, &cpu_callin_map);
335 -
336 - synchronise_count_slave(cpu);
337 -
338 /*
339 * irq will be enabled in ->smp_finish(), enabling it too early
340 * is dangerous.
341 diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
342 index 975e99759bab..5649a9e429e0 100644
343 --- a/arch/mips/kernel/vdso.c
344 +++ b/arch/mips/kernel/vdso.c
345 @@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = {
346 static void __init init_vdso_image(struct mips_vdso_image *image)
347 {
348 unsigned long num_pages, i;
349 + unsigned long data_pfn;
350
351 BUG_ON(!PAGE_ALIGNED(image->data));
352 BUG_ON(!PAGE_ALIGNED(image->size));
353
354 num_pages = image->size / PAGE_SIZE;
355
356 - for (i = 0; i < num_pages; i++) {
357 - image->mapping.pages[i] =
358 - virt_to_page(image->data + (i * PAGE_SIZE));
359 - }
360 + data_pfn = __phys_to_pfn(__pa_symbol(image->data));
361 + for (i = 0; i < num_pages; i++)
362 + image->mapping.pages[i] = pfn_to_page(data_pfn + i);
363 }
364
365 static int __init init_vdso(void)
366 diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
367 index 8cc1622b2ee0..dca7bc87dad9 100644
368 --- a/crypto/blkcipher.c
369 +++ b/crypto/blkcipher.c
370 @@ -234,6 +234,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
371 return blkcipher_walk_done(desc, walk, -EINVAL);
372 }
373
374 + bsize = min(walk->walk_blocksize, n);
375 +
376 walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
377 BLKCIPHER_WALK_DIFF);
378 if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
379 @@ -246,7 +248,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
380 }
381 }
382
383 - bsize = min(walk->walk_blocksize, n);
384 n = scatterwalk_clamp(&walk->in, n);
385 n = scatterwalk_clamp(&walk->out, n);
386
387 diff --git a/crypto/echainiv.c b/crypto/echainiv.c
388 index b96a84560b67..343a74e96e2a 100644
389 --- a/crypto/echainiv.c
390 +++ b/crypto/echainiv.c
391 @@ -1,8 +1,8 @@
392 /*
393 * echainiv: Encrypted Chain IV Generator
394 *
395 - * This generator generates an IV based on a sequence number by xoring it
396 - * with a salt and then encrypting it with the same key as used to encrypt
397 + * This generator generates an IV based on a sequence number by multiplying
398 + * it with a salt and then encrypting it with the same key as used to encrypt
399 * the plain text. This algorithm requires that the block size be equal
400 * to the IV size. It is mainly useful for CBC.
401 *
402 @@ -23,81 +23,17 @@
403 #include <linux/err.h>
404 #include <linux/init.h>
405 #include <linux/kernel.h>
406 -#include <linux/mm.h>
407 #include <linux/module.h>
408 -#include <linux/percpu.h>
409 -#include <linux/spinlock.h>
410 +#include <linux/slab.h>
411 #include <linux/string.h>
412
413 -#define MAX_IV_SIZE 16
414 -
415 -static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
416 -
417 -/* We don't care if we get preempted and read/write IVs from the next CPU. */
418 -static void echainiv_read_iv(u8 *dst, unsigned size)
419 -{
420 - u32 *a = (u32 *)dst;
421 - u32 __percpu *b = echainiv_iv;
422 -
423 - for (; size >= 4; size -= 4) {
424 - *a++ = this_cpu_read(*b);
425 - b++;
426 - }
427 -}
428 -
429 -static void echainiv_write_iv(const u8 *src, unsigned size)
430 -{
431 - const u32 *a = (const u32 *)src;
432 - u32 __percpu *b = echainiv_iv;
433 -
434 - for (; size >= 4; size -= 4) {
435 - this_cpu_write(*b, *a);
436 - a++;
437 - b++;
438 - }
439 -}
440 -
441 -static void echainiv_encrypt_complete2(struct aead_request *req, int err)
442 -{
443 - struct aead_request *subreq = aead_request_ctx(req);
444 - struct crypto_aead *geniv;
445 - unsigned int ivsize;
446 -
447 - if (err == -EINPROGRESS)
448 - return;
449 -
450 - if (err)
451 - goto out;
452 -
453 - geniv = crypto_aead_reqtfm(req);
454 - ivsize = crypto_aead_ivsize(geniv);
455 -
456 - echainiv_write_iv(subreq->iv, ivsize);
457 -
458 - if (req->iv != subreq->iv)
459 - memcpy(req->iv, subreq->iv, ivsize);
460 -
461 -out:
462 - if (req->iv != subreq->iv)
463 - kzfree(subreq->iv);
464 -}
465 -
466 -static void echainiv_encrypt_complete(struct crypto_async_request *base,
467 - int err)
468 -{
469 - struct aead_request *req = base->data;
470 -
471 - echainiv_encrypt_complete2(req, err);
472 - aead_request_complete(req, err);
473 -}
474 -
475 static int echainiv_encrypt(struct aead_request *req)
476 {
477 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
478 struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
479 struct aead_request *subreq = aead_request_ctx(req);
480 - crypto_completion_t compl;
481 - void *data;
482 + __be64 nseqno;
483 + u64 seqno;
484 u8 *info;
485 unsigned int ivsize = crypto_aead_ivsize(geniv);
486 int err;
487 @@ -107,8 +43,6 @@ static int echainiv_encrypt(struct aead_request *req)
488
489 aead_request_set_tfm(subreq, ctx->child);
490
491 - compl = echainiv_encrypt_complete;
492 - data = req;
493 info = req->iv;
494
495 if (req->src != req->dst) {
496 @@ -123,29 +57,30 @@ static int echainiv_encrypt(struct aead_request *req)
497 return err;
498 }
499
500 - if (unlikely(!IS_ALIGNED((unsigned long)info,
501 - crypto_aead_alignmask(geniv) + 1))) {
502 - info = kmalloc(ivsize, req->base.flags &
503 - CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
504 - GFP_ATOMIC);
505 - if (!info)
506 - return -ENOMEM;
507 -
508 - memcpy(info, req->iv, ivsize);
509 - }
510 -
511 - aead_request_set_callback(subreq, req->base.flags, compl, data);
512 + aead_request_set_callback(subreq, req->base.flags,
513 + req->base.complete, req->base.data);
514 aead_request_set_crypt(subreq, req->dst, req->dst,
515 req->cryptlen, info);
516 aead_request_set_ad(subreq, req->assoclen);
517
518 - crypto_xor(info, ctx->salt, ivsize);
519 + memcpy(&nseqno, info + ivsize - 8, 8);
520 + seqno = be64_to_cpu(nseqno);
521 + memset(info, 0, ivsize);
522 +
523 scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
524 - echainiv_read_iv(info, ivsize);
525
526 - err = crypto_aead_encrypt(subreq);
527 - echainiv_encrypt_complete2(req, err);
528 - return err;
529 + do {
530 + u64 a;
531 +
532 + memcpy(&a, ctx->salt + ivsize - 8, 8);
533 +
534 + a |= 1;
535 + a *= seqno;
536 +
537 + memcpy(info + ivsize - 8, &a, 8);
538 + } while ((ivsize -= 8));
539 +
540 + return crypto_aead_encrypt(subreq);
541 }
542
543 static int echainiv_decrypt(struct aead_request *req)
544 @@ -192,8 +127,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
545 alg = crypto_spawn_aead_alg(spawn);
546
547 err = -EINVAL;
548 - if (inst->alg.ivsize & (sizeof(u32) - 1) ||
549 - inst->alg.ivsize > MAX_IV_SIZE)
550 + if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
551 goto free_inst;
552
553 inst->alg.encrypt = echainiv_encrypt;
554 @@ -202,7 +136,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
555 inst->alg.init = aead_init_geniv;
556 inst->alg.exit = aead_exit_geniv;
557
558 - inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
559 inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
560 inst->alg.base.cra_ctxsize += inst->alg.ivsize;
561
562 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
563 index 4bef72a9d106..3fda594700e0 100644
564 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
565 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
566 @@ -59,9 +59,11 @@ static void
567 nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
568 {
569 struct nvkm_device *device = pm->engine.subdev.device;
570 - if (pm->sequence != pm->sequence) {
571 + struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base);
572 +
573 + if (nv40pm->sequence != pm->sequence) {
574 nvkm_wr32(device, 0x400084, 0x00000020);
575 - pm->sequence = pm->sequence;
576 + nv40pm->sequence = pm->sequence;
577 }
578 }
579
580 diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
581 index 56e1d633875e..6e6c76080d6a 100644
582 --- a/drivers/gpu/drm/qxl/qxl_draw.c
583 +++ b/drivers/gpu/drm/qxl/qxl_draw.c
584 @@ -136,6 +136,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
585 * correctly globaly, since that would require
586 * tracking all of our palettes. */
587 ret = qxl_bo_kmap(palette_bo, (void **)&pal);
588 + if (ret)
589 + return ret;
590 pal->num_ents = 2;
591 pal->unique = unique++;
592 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
593 diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
594 index 76e699f9ed97..eef3aa6007f1 100644
595 --- a/drivers/i2c/busses/i2c-eg20t.c
596 +++ b/drivers/i2c/busses/i2c-eg20t.c
597 @@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
598 /* Set the number of I2C channel instance */
599 adap_info->ch_num = id->driver_data;
600
601 - ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
602 - KBUILD_MODNAME, adap_info);
603 - if (ret) {
604 - pch_pci_err(pdev, "request_irq FAILED\n");
605 - goto err_request_irq;
606 - }
607 -
608 for (i = 0; i < adap_info->ch_num; i++) {
609 pch_adap = &adap_info->pch_data[i].pch_adapter;
610 adap_info->pch_i2c_suspended = false;
611 @@ -796,6 +789,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
612 adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i;
613
614 pch_adap->dev.parent = &pdev->dev;
615 + }
616 +
617 + ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
618 + KBUILD_MODNAME, adap_info);
619 + if (ret) {
620 + pch_pci_err(pdev, "request_irq FAILED\n");
621 + goto err_request_irq;
622 + }
623 +
624 + for (i = 0; i < adap_info->ch_num; i++) {
625 + pch_adap = &adap_info->pch_data[i].pch_adapter;
626
627 pch_i2c_init(&adap_info->pch_data[i]);
628
629 diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
630 index fdcbdab808e9..33b11563cde7 100644
631 --- a/drivers/i2c/busses/i2c-qup.c
632 +++ b/drivers/i2c/busses/i2c-qup.c
633 @@ -727,7 +727,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
634 #ifdef CONFIG_PM_SLEEP
635 static int qup_i2c_suspend(struct device *device)
636 {
637 - qup_i2c_pm_suspend_runtime(device);
638 + if (!pm_runtime_suspended(device))
639 + return qup_i2c_pm_suspend_runtime(device);
640 return 0;
641 }
642
643 diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
644 index 7ede941e9301..131b434af994 100644
645 --- a/drivers/iio/industrialio-core.c
646 +++ b/drivers/iio/industrialio-core.c
647 @@ -433,16 +433,15 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
648 scale_db = true;
649 case IIO_VAL_INT_PLUS_MICRO:
650 if (vals[1] < 0)
651 - return sprintf(buf, "-%ld.%06u%s\n", abs(vals[0]),
652 - -vals[1],
653 - scale_db ? " dB" : "");
654 + return sprintf(buf, "-%d.%06u%s\n", abs(vals[0]),
655 + -vals[1], scale_db ? " dB" : "");
656 else
657 return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1],
658 scale_db ? " dB" : "");
659 case IIO_VAL_INT_PLUS_NANO:
660 if (vals[1] < 0)
661 - return sprintf(buf, "-%ld.%09u\n", abs(vals[0]),
662 - -vals[1]);
663 + return sprintf(buf, "-%d.%09u\n", abs(vals[0]),
664 + -vals[1]);
665 else
666 return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
667 case IIO_VAL_FRACTIONAL:
668 diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
669 index 3821c4786662..565bb2c140ed 100644
670 --- a/drivers/iommu/dmar.c
671 +++ b/drivers/iommu/dmar.c
672 @@ -1858,10 +1858,11 @@ static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
673 /*
674 * All PCI devices managed by this unit should have been destroyed.
675 */
676 - if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
677 + if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
678 for_each_active_dev_scope(dmaru->devices,
679 dmaru->devices_cnt, i, dev)
680 return -EBUSY;
681 + }
682
683 ret = dmar_ir_hotplug(dmaru, false);
684 if (ret == 0)
685 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
686 index 24d81308a1a6..b7f852d824a3 100644
687 --- a/drivers/iommu/intel-iommu.c
688 +++ b/drivers/iommu/intel-iommu.c
689 @@ -4182,10 +4182,11 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
690 if (!atsru)
691 return 0;
692
693 - if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
694 + if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
695 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
696 i, dev)
697 return -EBUSY;
698 + }
699
700 return 0;
701 }
702 diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
703 index f0480d687f17..ba780c45f645 100644
704 --- a/drivers/media/platform/am437x/am437x-vpfe.c
705 +++ b/drivers/media/platform/am437x/am437x-vpfe.c
706 @@ -1706,7 +1706,7 @@ static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
707 sdinfo = &cfg->sub_devs[i];
708 client = v4l2_get_subdevdata(sdinfo->sd);
709 if (client->addr == curr_client->addr &&
710 - client->adapter->nr == client->adapter->nr) {
711 + client->adapter->nr == curr_client->adapter->nr) {
712 if (vpfe->current_input >= 1)
713 return -1;
714 *app_input_index = j + vpfe->current_input;
715 diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
716 index 744ca5cacc9b..f9fa3fad728e 100644
717 --- a/drivers/mtd/maps/pmcmsp-flash.c
718 +++ b/drivers/mtd/maps/pmcmsp-flash.c
719 @@ -75,15 +75,15 @@ static int __init init_msp_flash(void)
720
721 printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
722
723 - msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
724 + msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL);
725 if (!msp_flash)
726 return -ENOMEM;
727
728 - msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
729 + msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL);
730 if (!msp_parts)
731 goto free_msp_flash;
732
733 - msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
734 + msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL);
735 if (!msp_maps)
736 goto free_msp_parts;
737
738 diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
739 index 142fc3d79463..784c6e1a0391 100644
740 --- a/drivers/mtd/maps/sa1100-flash.c
741 +++ b/drivers/mtd/maps/sa1100-flash.c
742 @@ -230,8 +230,10 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
743
744 info->mtd = mtd_concat_create(cdev, info->num_subdev,
745 plat->name);
746 - if (info->mtd == NULL)
747 + if (info->mtd == NULL) {
748 ret = -ENXIO;
749 + goto err;
750 + }
751 }
752 info->mtd->dev.parent = &pdev->dev;
753
754 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
755 index b3d70a7a5262..5dca77e0ffed 100644
756 --- a/drivers/net/bonding/bond_main.c
757 +++ b/drivers/net/bonding/bond_main.c
758 @@ -1317,9 +1317,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
759 slave_dev->name);
760 }
761
762 - /* already enslaved */
763 - if (slave_dev->flags & IFF_SLAVE) {
764 - netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
765 + /* already in-use? */
766 + if (netdev_is_rx_handler_busy(slave_dev)) {
767 + netdev_err(bond_dev,
768 + "Error: Device is in use and cannot be enslaved\n");
769 return -EBUSY;
770 }
771
772 diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
773 index 41c0fc9f3b14..16f7cadda5c3 100644
774 --- a/drivers/net/can/flexcan.c
775 +++ b/drivers/net/can/flexcan.c
776 @@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspend(struct device *device)
777 struct flexcan_priv *priv = netdev_priv(dev);
778 int err;
779
780 - err = flexcan_chip_disable(priv);
781 - if (err)
782 - return err;
783 -
784 if (netif_running(dev)) {
785 + err = flexcan_chip_disable(priv);
786 + if (err)
787 + return err;
788 netif_stop_queue(dev);
789 netif_device_detach(dev);
790 }
791 @@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
792 {
793 struct net_device *dev = dev_get_drvdata(device);
794 struct flexcan_priv *priv = netdev_priv(dev);
795 + int err;
796
797 priv->can.state = CAN_STATE_ERROR_ACTIVE;
798 if (netif_running(dev)) {
799 netif_device_attach(dev);
800 netif_start_queue(dev);
801 + err = flexcan_chip_enable(priv);
802 + if (err)
803 + return err;
804 }
805 - return flexcan_chip_enable(priv);
806 + return 0;
807 }
808
809 static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
810 diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
811 index 6bba1c98d764..c7994e372284 100644
812 --- a/drivers/net/dsa/bcm_sf2.h
813 +++ b/drivers/net/dsa/bcm_sf2.h
814 @@ -187,8 +187,8 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
815 static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
816 u32 mask) \
817 { \
818 - intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
819 priv->irq##which##_mask &= ~(mask); \
820 + intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
821 } \
822 static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
823 u32 mask) \
824 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
825 index 037fc4cdf5af..cc199063612a 100644
826 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
827 +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
828 @@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
829 return cmd->cmd_buf + (idx << cmd->log_stride);
830 }
831
832 -static u8 xor8_buf(void *buf, int len)
833 +static u8 xor8_buf(void *buf, size_t offset, int len)
834 {
835 u8 *ptr = buf;
836 u8 sum = 0;
837 int i;
838 + int end = len + offset;
839
840 - for (i = 0; i < len; i++)
841 + for (i = offset; i < end; i++)
842 sum ^= ptr[i];
843
844 return sum;
845 @@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)
846
847 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
848 {
849 - if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
850 + size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
851 + int xor_len = sizeof(*block) - sizeof(block->data) - 1;
852 +
853 + if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
854 return -EINVAL;
855
856 - if (xor8_buf(block, sizeof(*block)) != 0xff)
857 + if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
858 return -EINVAL;
859
860 return 0;
861 }
862
863 -static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
864 - int csum)
865 +static void calc_block_sig(struct mlx5_cmd_prot_block *block)
866 {
867 - block->token = token;
868 - if (csum) {
869 - block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
870 - sizeof(block->data) - 2);
871 - block->sig = ~xor8_buf(block, sizeof(*block) - 1);
872 - }
873 + int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
874 + size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
875 +
876 + block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
877 + block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
878 }
879
880 -static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
881 +static void calc_chain_sig(struct mlx5_cmd_msg *msg)
882 {
883 struct mlx5_cmd_mailbox *next = msg->next;
884 -
885 - while (next) {
886 - calc_block_sig(next->buf, token, csum);
887 + int size = msg->len;
888 + int blen = size - min_t(int, sizeof(msg->first.data), size);
889 + int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
890 + / MLX5_CMD_DATA_BLOCK_SIZE;
891 + int i = 0;
892 +
893 + for (i = 0; i < n && next; i++) {
894 + calc_block_sig(next->buf);
895 next = next->next;
896 }
897 }
898
899 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
900 {
901 - ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
902 - calc_chain_sig(ent->in, ent->token, csum);
903 - calc_chain_sig(ent->out, ent->token, csum);
904 + ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
905 + if (csum) {
906 + calc_chain_sig(ent->in);
907 + calc_chain_sig(ent->out);
908 + }
909 }
910
911 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
912 @@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
913 struct mlx5_cmd_mailbox *next = ent->out->next;
914 int err;
915 u8 sig;
916 + int size = ent->out->len;
917 + int blen = size - min_t(int, sizeof(ent->out->first.data), size);
918 + int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
919 + / MLX5_CMD_DATA_BLOCK_SIZE;
920 + int i = 0;
921
922 - sig = xor8_buf(ent->lay, sizeof(*ent->lay));
923 + sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
924 if (sig != 0xff)
925 return -EINVAL;
926
927 - while (next) {
928 + for (i = 0; i < n && next; i++) {
929 err = verify_block_sig(next->buf);
930 if (err)
931 return err;
932 @@ -641,7 +655,6 @@ static void cmd_work_handler(struct work_struct *work)
933 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
934 }
935
936 - ent->token = alloc_token(cmd);
937 cmd->ent_arr[ent->idx] = ent;
938 lay = get_inst(cmd, ent->idx);
939 ent->lay = lay;
940 @@ -755,7 +768,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
941 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
942 struct mlx5_cmd_msg *out, void *uout, int uout_size,
943 mlx5_cmd_cbk_t callback,
944 - void *context, int page_queue, u8 *status)
945 + void *context, int page_queue, u8 *status,
946 + u8 token)
947 {
948 struct mlx5_cmd *cmd = &dev->cmd;
949 struct mlx5_cmd_work_ent *ent;
950 @@ -772,6 +786,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
951 if (IS_ERR(ent))
952 return PTR_ERR(ent);
953
954 + ent->token = token;
955 +
956 if (!callback)
957 init_completion(&ent->done);
958
959 @@ -844,7 +860,8 @@ static const struct file_operations fops = {
960 .write = dbg_write,
961 };
962
963 -static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
964 +static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
965 + u8 token)
966 {
967 struct mlx5_cmd_prot_block *block;
968 struct mlx5_cmd_mailbox *next;
969 @@ -870,6 +887,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
970 memcpy(block->data, from, copy);
971 from += copy;
972 size -= copy;
973 + block->token = token;
974 next = next->next;
975 }
976
977 @@ -939,7 +957,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
978 }
979
980 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
981 - gfp_t flags, int size)
982 + gfp_t flags, int size,
983 + u8 token)
984 {
985 struct mlx5_cmd_mailbox *tmp, *head = NULL;
986 struct mlx5_cmd_prot_block *block;
987 @@ -968,6 +987,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
988 tmp->next = head;
989 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
990 block->block_num = cpu_to_be32(n - i - 1);
991 + block->token = token;
992 head = tmp;
993 }
994 msg->next = head;
995 @@ -1351,7 +1371,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
996 }
997
998 if (IS_ERR(msg))
999 - msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
1000 + msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1001
1002 return msg;
1003 }
1004 @@ -1376,6 +1396,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1005 int err;
1006 u8 status = 0;
1007 u32 drv_synd;
1008 + u8 token;
1009
1010 if (pci_channel_offline(dev->pdev) ||
1011 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1012 @@ -1394,20 +1415,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1013 return err;
1014 }
1015
1016 - err = mlx5_copy_to_msg(inb, in, in_size);
1017 + token = alloc_token(&dev->cmd);
1018 +
1019 + err = mlx5_copy_to_msg(inb, in, in_size, token);
1020 if (err) {
1021 mlx5_core_warn(dev, "err %d\n", err);
1022 goto out_in;
1023 }
1024
1025 - outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
1026 + outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1027 if (IS_ERR(outb)) {
1028 err = PTR_ERR(outb);
1029 goto out_in;
1030 }
1031
1032 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1033 - pages_queue, &status);
1034 + pages_queue, &status, token);
1035 if (err)
1036 goto out_out;
1037
1038 @@ -1475,7 +1498,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
1039 INIT_LIST_HEAD(&cmd->cache.med.head);
1040
1041 for (i = 0; i < NUM_LONG_LISTS; i++) {
1042 - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1043 + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
1044 if (IS_ERR(msg)) {
1045 err = PTR_ERR(msg);
1046 goto ex_err;
1047 @@ -1485,7 +1508,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
1048 }
1049
1050 for (i = 0; i < NUM_MED_LISTS; i++) {
1051 - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1052 + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
1053 if (IS_ERR(msg)) {
1054 err = PTR_ERR(msg);
1055 goto ex_err;
1056 diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
1057 index 0e2fc1a844ab..8c44cf6ff7a2 100644
1058 --- a/drivers/net/ethernet/smsc/smc91x.c
1059 +++ b/drivers/net/ethernet/smsc/smc91x.c
1060 @@ -2269,6 +2269,13 @@ static int smc_drv_probe(struct platform_device *pdev)
1061 if (pd) {
1062 memcpy(&lp->cfg, pd, sizeof(lp->cfg));
1063 lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
1064 +
1065 + if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
1066 + dev_err(&pdev->dev,
1067 + "at least one of 8-bit or 16-bit access support is required.\n");
1068 + ret = -ENXIO;
1069 + goto out_free_netdev;
1070 + }
1071 }
1072
1073 #if IS_BUILTIN(CONFIG_OF)
1074 diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
1075 index a3c129e1e40a..29df0465daf4 100644
1076 --- a/drivers/net/ethernet/smsc/smc91x.h
1077 +++ b/drivers/net/ethernet/smsc/smc91x.h
1078 @@ -37,6 +37,27 @@
1079 #include <linux/smc91x.h>
1080
1081 /*
1082 + * Any 16-bit access is performed with two 8-bit accesses if the hardware
1083 + * can't do it directly. Most registers are 16-bit so those are mandatory.
1084 + */
1085 +#define SMC_outw_b(x, a, r) \
1086 + do { \
1087 + unsigned int __val16 = (x); \
1088 + unsigned int __reg = (r); \
1089 + SMC_outb(__val16, a, __reg); \
1090 + SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
1091 + } while (0)
1092 +
1093 +#define SMC_inw_b(a, r) \
1094 + ({ \
1095 + unsigned int __val16; \
1096 + unsigned int __reg = r; \
1097 + __val16 = SMC_inb(a, __reg); \
1098 + __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
1099 + __val16; \
1100 + })
1101 +
1102 +/*
1103 * Define your architecture specific bus configuration parameters here.
1104 */
1105
1106 @@ -55,10 +76,30 @@
1107 #define SMC_IO_SHIFT (lp->io_shift)
1108
1109 #define SMC_inb(a, r) readb((a) + (r))
1110 -#define SMC_inw(a, r) readw((a) + (r))
1111 +#define SMC_inw(a, r) \
1112 + ({ \
1113 + unsigned int __smc_r = r; \
1114 + SMC_16BIT(lp) ? readw((a) + __smc_r) : \
1115 + SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \
1116 + ({ BUG(); 0; }); \
1117 + })
1118 +
1119 #define SMC_inl(a, r) readl((a) + (r))
1120 #define SMC_outb(v, a, r) writeb(v, (a) + (r))
1121 +#define SMC_outw(v, a, r) \
1122 + do { \
1123 + unsigned int __v = v, __smc_r = r; \
1124 + if (SMC_16BIT(lp)) \
1125 + __SMC_outw(__v, a, __smc_r); \
1126 + else if (SMC_8BIT(lp)) \
1127 + SMC_outw_b(__v, a, __smc_r); \
1128 + else \
1129 + BUG(); \
1130 + } while (0)
1131 +
1132 #define SMC_outl(v, a, r) writel(v, (a) + (r))
1133 +#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l)
1134 +#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l)
1135 #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
1136 #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
1137 #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
1138 @@ -66,7 +107,7 @@
1139 #define SMC_IRQ_FLAGS (-1) /* from resource */
1140
1141 /* We actually can't write halfwords properly if not word aligned */
1142 -static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
1143 +static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
1144 {
1145 if ((machine_is_mainstone() || machine_is_stargate2() ||
1146 machine_is_pxa_idp()) && reg & 2) {
1147 @@ -405,24 +446,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
1148
1149 #if ! SMC_CAN_USE_16BIT
1150
1151 -/*
1152 - * Any 16-bit access is performed with two 8-bit accesses if the hardware
1153 - * can't do it directly. Most registers are 16-bit so those are mandatory.
1154 - */
1155 -#define SMC_outw(x, ioaddr, reg) \
1156 - do { \
1157 - unsigned int __val16 = (x); \
1158 - SMC_outb( __val16, ioaddr, reg ); \
1159 - SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
1160 - } while (0)
1161 -#define SMC_inw(ioaddr, reg) \
1162 - ({ \
1163 - unsigned int __val16; \
1164 - __val16 = SMC_inb( ioaddr, reg ); \
1165 - __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
1166 - __val16; \
1167 - })
1168 -
1169 +#define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
1170 +#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
1171 #define SMC_insw(a, r, p, l) BUG()
1172 #define SMC_outsw(a, r, p, l) BUG()
1173
1174 diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
1175 index 47cd306dbb3c..bba0ca786aaa 100644
1176 --- a/drivers/net/phy/phy.c
1177 +++ b/drivers/net/phy/phy.c
1178 @@ -640,8 +640,10 @@ phy_err:
1179 int phy_start_interrupts(struct phy_device *phydev)
1180 {
1181 atomic_set(&phydev->irq_disable, 0);
1182 - if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
1183 - phydev) < 0) {
1184 + if (request_irq(phydev->irq, phy_interrupt,
1185 + IRQF_SHARED,
1186 + "phy_interrupt",
1187 + phydev) < 0) {
1188 pr_warn("%s: Can't get IRQ %d (PHY)\n",
1189 phydev->bus->name, phydev->irq);
1190 phydev->irq = PHY_POLL;
1191 diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
1192 index 1bdeacf7b257..bc70ce62bc03 100644
1193 --- a/drivers/net/wireless/ath/ath9k/init.c
1194 +++ b/drivers/net/wireless/ath/ath9k/init.c
1195 @@ -869,8 +869,8 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1196 hw->wiphy->interface_modes |=
1197 BIT(NL80211_IFTYPE_P2P_DEVICE);
1198
1199 - hw->wiphy->iface_combinations = if_comb;
1200 - hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
1201 + hw->wiphy->iface_combinations = if_comb;
1202 + hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
1203 }
1204
1205 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1206 diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
1207 index 93bdf684babe..ae047ab7a4df 100644
1208 --- a/drivers/net/wireless/iwlegacy/3945.c
1209 +++ b/drivers/net/wireless/iwlegacy/3945.c
1210 @@ -1019,12 +1019,13 @@ il3945_hw_txq_ctx_free(struct il_priv *il)
1211 int txq_id;
1212
1213 /* Tx queues */
1214 - if (il->txq)
1215 + if (il->txq) {
1216 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1217 if (txq_id == IL39_CMD_QUEUE_NUM)
1218 il_cmd_queue_free(il);
1219 else
1220 il_tx_queue_free(il, txq_id);
1221 + }
1222
1223 /* free tx queue structure */
1224 il_free_txq_mem(il);
1225 diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
1226 index 20e6aa910700..c148085742a0 100644
1227 --- a/drivers/net/wireless/iwlwifi/dvm/calib.c
1228 +++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
1229 @@ -901,7 +901,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
1230 /* bound gain by 2 bits value max, 3rd bit is sign */
1231 data->delta_gain_code[i] =
1232 min(abs(delta_g),
1233 - (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
1234 + (s32) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
1235
1236 if (delta_g < 0)
1237 /*
1238 diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
1239 index 9c65f134d447..da7a75f82489 100644
1240 --- a/drivers/power/max17042_battery.c
1241 +++ b/drivers/power/max17042_battery.c
1242 @@ -457,13 +457,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip,
1243 }
1244
1245 static inline void max17042_read_model_data(struct max17042_chip *chip,
1246 - u8 addr, u32 *data, int size)
1247 + u8 addr, u16 *data, int size)
1248 {
1249 struct regmap *map = chip->regmap;
1250 int i;
1251 + u32 tmp;
1252
1253 - for (i = 0; i < size; i++)
1254 - regmap_read(map, addr + i, &data[i]);
1255 + for (i = 0; i < size; i++) {
1256 + regmap_read(map, addr + i, &tmp);
1257 + data[i] = (u16)tmp;
1258 + }
1259 }
1260
1261 static inline int max17042_model_data_compare(struct max17042_chip *chip,
1262 @@ -486,7 +489,7 @@ static int max17042_init_model(struct max17042_chip *chip)
1263 {
1264 int ret;
1265 int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
1266 - u32 *temp_data;
1267 + u16 *temp_data;
1268
1269 temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
1270 if (!temp_data)
1271 @@ -501,7 +504,7 @@ static int max17042_init_model(struct max17042_chip *chip)
1272 ret = max17042_model_data_compare(
1273 chip,
1274 chip->pdata->config_data->cell_char_tbl,
1275 - (u16 *)temp_data,
1276 + temp_data,
1277 table_size);
1278
1279 max10742_lock_model(chip);
1280 @@ -514,7 +517,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip)
1281 {
1282 int i;
1283 int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
1284 - u32 *temp_data;
1285 + u16 *temp_data;
1286 int ret = 0;
1287
1288 temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
1289 diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c
1290 index 9ab7f562a83b..f69387e12c1e 100644
1291 --- a/drivers/power/reset/hisi-reboot.c
1292 +++ b/drivers/power/reset/hisi-reboot.c
1293 @@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct platform_device *pdev)
1294
1295 if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) {
1296 pr_err("failed to find reboot-offset property\n");
1297 + iounmap(base);
1298 return -EINVAL;
1299 }
1300
1301 err = register_restart_handler(&hisi_restart_nb);
1302 - if (err)
1303 + if (err) {
1304 dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n",
1305 err);
1306 + iounmap(base);
1307 + }
1308
1309 return err;
1310 }
1311 diff --git a/drivers/power/tps65217_charger.c b/drivers/power/tps65217_charger.c
1312 index d9f56730c735..040a40b4b173 100644
1313 --- a/drivers/power/tps65217_charger.c
1314 +++ b/drivers/power/tps65217_charger.c
1315 @@ -205,6 +205,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
1316 if (!charger)
1317 return -ENOMEM;
1318
1319 + platform_set_drvdata(pdev, charger);
1320 charger->tps = tps;
1321 charger->dev = &pdev->dev;
1322
1323 diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
1324 index d24ca5f281b4..7831bc6b51dd 100644
1325 --- a/drivers/pwm/core.c
1326 +++ b/drivers/pwm/core.c
1327 @@ -889,7 +889,7 @@ EXPORT_SYMBOL_GPL(devm_pwm_put);
1328 */
1329 bool pwm_can_sleep(struct pwm_device *pwm)
1330 {
1331 - return pwm->chip->can_sleep;
1332 + return true;
1333 }
1334 EXPORT_SYMBOL_GPL(pwm_can_sleep);
1335
1336 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
1337 index 3f8d357b1bac..278e10cd771f 100644
1338 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
1339 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
1340 @@ -5941,11 +5941,11 @@ static void megasas_detach_one(struct pci_dev *pdev)
1341 if (fusion->ld_drv_map[i])
1342 free_pages((ulong)fusion->ld_drv_map[i],
1343 fusion->drv_map_pages);
1344 - if (fusion->pd_seq_sync)
1345 - dma_free_coherent(&instance->pdev->dev,
1346 - pd_seq_map_sz,
1347 - fusion->pd_seq_sync[i],
1348 - fusion->pd_seq_phys[i]);
1349 + if (fusion->pd_seq_sync[i])
1350 + dma_free_coherent(&instance->pdev->dev,
1351 + pd_seq_map_sz,
1352 + fusion->pd_seq_sync[i],
1353 + fusion->pd_seq_phys[i]);
1354 }
1355 free_pages((ulong)instance->ctrl_context,
1356 instance->ctrl_context_pages);
1357 diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
1358 index bb40f3728742..20314ff08be0 100644
1359 --- a/drivers/staging/iio/adc/ad7192.c
1360 +++ b/drivers/staging/iio/adc/ad7192.c
1361 @@ -236,7 +236,7 @@ static int ad7192_setup(struct ad7192_state *st,
1362 st->mclk = pdata->ext_clk_hz;
1363 else
1364 st->mclk = AD7192_INT_FREQ_MHZ;
1365 - break;
1366 + break;
1367 default:
1368 ret = -EINVAL;
1369 goto out;
1370 diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
1371 index c37149b929be..502d3892d8a4 100644
1372 --- a/fs/autofs4/autofs_i.h
1373 +++ b/fs/autofs4/autofs_i.h
1374 @@ -79,9 +79,13 @@ struct autofs_info {
1375 };
1376
1377 #define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
1378 -#define AUTOFS_INF_NO_RCU (1<<1) /* the dentry is being considered
1379 +#define AUTOFS_INF_WANT_EXPIRE (1<<1) /* the dentry is being considered
1380 * for expiry, so RCU_walk is
1381 - * not permitted
1382 + * not permitted. If it progresses to
1383 + * actual expiry attempt, the flag is
1384 + * not cleared when EXPIRING is set -
1385 + * in that case it gets cleared only
1386 + * when it comes to clearing EXPIRING.
1387 */
1388 #define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
1389
1390 diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
1391 index 1cebc3c52fa5..7a5a598a2d94 100644
1392 --- a/fs/autofs4/expire.c
1393 +++ b/fs/autofs4/expire.c
1394 @@ -315,19 +315,17 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
1395 if (ino->flags & AUTOFS_INF_PENDING)
1396 goto out;
1397 if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
1398 - ino->flags |= AUTOFS_INF_NO_RCU;
1399 + ino->flags |= AUTOFS_INF_WANT_EXPIRE;
1400 spin_unlock(&sbi->fs_lock);
1401 synchronize_rcu();
1402 spin_lock(&sbi->fs_lock);
1403 if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
1404 ino->flags |= AUTOFS_INF_EXPIRING;
1405 - smp_mb();
1406 - ino->flags &= ~AUTOFS_INF_NO_RCU;
1407 init_completion(&ino->expire_complete);
1408 spin_unlock(&sbi->fs_lock);
1409 return root;
1410 }
1411 - ino->flags &= ~AUTOFS_INF_NO_RCU;
1412 + ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
1413 }
1414 out:
1415 spin_unlock(&sbi->fs_lock);
1416 @@ -417,6 +415,7 @@ static struct dentry *should_expire(struct dentry *dentry,
1417 }
1418 return NULL;
1419 }
1420 +
1421 /*
1422 * Find an eligible tree to time-out
1423 * A tree is eligible if :-
1424 @@ -432,6 +431,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
1425 struct dentry *root = sb->s_root;
1426 struct dentry *dentry;
1427 struct dentry *expired;
1428 + struct dentry *found;
1429 struct autofs_info *ino;
1430
1431 if (!root)
1432 @@ -442,48 +442,54 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
1433
1434 dentry = NULL;
1435 while ((dentry = get_next_positive_subdir(dentry, root))) {
1436 + int flags = how;
1437 +
1438 spin_lock(&sbi->fs_lock);
1439 ino = autofs4_dentry_ino(dentry);
1440 - if (ino->flags & AUTOFS_INF_NO_RCU)
1441 - expired = NULL;
1442 - else
1443 - expired = should_expire(dentry, mnt, timeout, how);
1444 - if (!expired) {
1445 + if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
1446 spin_unlock(&sbi->fs_lock);
1447 continue;
1448 }
1449 + spin_unlock(&sbi->fs_lock);
1450 +
1451 + expired = should_expire(dentry, mnt, timeout, flags);
1452 + if (!expired)
1453 + continue;
1454 +
1455 + spin_lock(&sbi->fs_lock);
1456 ino = autofs4_dentry_ino(expired);
1457 - ino->flags |= AUTOFS_INF_NO_RCU;
1458 + ino->flags |= AUTOFS_INF_WANT_EXPIRE;
1459 spin_unlock(&sbi->fs_lock);
1460 synchronize_rcu();
1461 - spin_lock(&sbi->fs_lock);
1462 - if (should_expire(expired, mnt, timeout, how)) {
1463 - if (expired != dentry)
1464 - dput(dentry);
1465 - goto found;
1466 - }
1467
1468 - ino->flags &= ~AUTOFS_INF_NO_RCU;
1469 + /* Make sure a reference is not taken on found if
1470 + * things have changed.
1471 + */
1472 + flags &= ~AUTOFS_EXP_LEAVES;
1473 + found = should_expire(expired, mnt, timeout, how);
1474 + if (!found || found != expired)
1475 + /* Something has changed, continue */
1476 + goto next;
1477 +
1478 if (expired != dentry)
1479 - dput(expired);
1480 + dput(dentry);
1481 +
1482 + spin_lock(&sbi->fs_lock);
1483 + goto found;
1484 +next:
1485 + spin_lock(&sbi->fs_lock);
1486 + ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
1487 spin_unlock(&sbi->fs_lock);
1488 + if (expired != dentry)
1489 + dput(expired);
1490 }
1491 return NULL;
1492
1493 found:
1494 DPRINTK("returning %p %pd", expired, expired);
1495 ino->flags |= AUTOFS_INF_EXPIRING;
1496 - smp_mb();
1497 - ino->flags &= ~AUTOFS_INF_NO_RCU;
1498 init_completion(&ino->expire_complete);
1499 spin_unlock(&sbi->fs_lock);
1500 - spin_lock(&sbi->lookup_lock);
1501 - spin_lock(&expired->d_parent->d_lock);
1502 - spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
1503 - list_move(&expired->d_parent->d_subdirs, &expired->d_child);
1504 - spin_unlock(&expired->d_lock);
1505 - spin_unlock(&expired->d_parent->d_lock);
1506 - spin_unlock(&sbi->lookup_lock);
1507 return expired;
1508 }
1509
1510 @@ -492,15 +498,27 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
1511 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
1512 struct autofs_info *ino = autofs4_dentry_ino(dentry);
1513 int status;
1514 + int state;
1515
1516 /* Block on any pending expire */
1517 - if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)))
1518 + if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
1519 return 0;
1520 if (rcu_walk)
1521 return -ECHILD;
1522
1523 +retry:
1524 spin_lock(&sbi->fs_lock);
1525 - if (ino->flags & AUTOFS_INF_EXPIRING) {
1526 + state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING);
1527 + if (state == AUTOFS_INF_WANT_EXPIRE) {
1528 + spin_unlock(&sbi->fs_lock);
1529 + /*
1530 + * Possibly being selected for expire, wait until
1531 + * it's selected or not.
1532 + */
1533 + schedule_timeout_uninterruptible(HZ/10);
1534 + goto retry;
1535 + }
1536 + if (state & AUTOFS_INF_EXPIRING) {
1537 spin_unlock(&sbi->fs_lock);
1538
1539 DPRINTK("waiting for expire %p name=%pd", dentry, dentry);
1540 @@ -551,7 +569,7 @@ int autofs4_expire_run(struct super_block *sb,
1541 ino = autofs4_dentry_ino(dentry);
1542 /* avoid rapid-fire expire attempts if expiry fails */
1543 ino->last_used = now;
1544 - ino->flags &= ~AUTOFS_INF_EXPIRING;
1545 + ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
1546 complete_all(&ino->expire_complete);
1547 spin_unlock(&sbi->fs_lock);
1548
1549 @@ -579,7 +597,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
1550 spin_lock(&sbi->fs_lock);
1551 /* avoid rapid-fire expire attempts if expiry fails */
1552 ino->last_used = now;
1553 - ino->flags &= ~AUTOFS_INF_EXPIRING;
1554 + ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
1555 complete_all(&ino->expire_complete);
1556 spin_unlock(&sbi->fs_lock);
1557 dput(dentry);
1558 diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
1559 index c6d7d3dbd52a..7a54c6a867c8 100644
1560 --- a/fs/autofs4/root.c
1561 +++ b/fs/autofs4/root.c
1562 @@ -455,7 +455,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
1563 * a mount-trap.
1564 */
1565 struct inode *inode;
1566 - if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))
1567 + if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
1568 return 0;
1569 if (d_mountpoint(dentry))
1570 return 0;
1571 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1572 index 65f30b3b04f9..a7e18dbadf74 100644
1573 --- a/fs/btrfs/ioctl.c
1574 +++ b/fs/btrfs/ioctl.c
1575 @@ -1619,6 +1619,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1576 int namelen;
1577 int ret = 0;
1578
1579 + if (!S_ISDIR(file_inode(file)->i_mode))
1580 + return -ENOTDIR;
1581 +
1582 ret = mnt_want_write_file(file);
1583 if (ret)
1584 goto out;
1585 @@ -1676,6 +1679,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
1586 struct btrfs_ioctl_vol_args *vol_args;
1587 int ret;
1588
1589 + if (!S_ISDIR(file_inode(file)->i_mode))
1590 + return -ENOTDIR;
1591 +
1592 vol_args = memdup_user(arg, sizeof(*vol_args));
1593 if (IS_ERR(vol_args))
1594 return PTR_ERR(vol_args);
1595 @@ -1699,6 +1705,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1596 bool readonly = false;
1597 struct btrfs_qgroup_inherit *inherit = NULL;
1598
1599 + if (!S_ISDIR(file_inode(file)->i_mode))
1600 + return -ENOTDIR;
1601 +
1602 vol_args = memdup_user(arg, sizeof(*vol_args));
1603 if (IS_ERR(vol_args))
1604 return PTR_ERR(vol_args);
1605 @@ -2345,6 +2354,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1606 int ret;
1607 int err = 0;
1608
1609 + if (!S_ISDIR(dir->i_mode))
1610 + return -ENOTDIR;
1611 +
1612 vol_args = memdup_user(arg, sizeof(*vol_args));
1613 if (IS_ERR(vol_args))
1614 return PTR_ERR(vol_args);
1615 diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
1616 index 5a7b3229b956..f34d6f5a5aca 100644
1617 --- a/fs/hostfs/hostfs_kern.c
1618 +++ b/fs/hostfs/hostfs_kern.c
1619 @@ -959,10 +959,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
1620
1621 if (S_ISLNK(root_inode->i_mode)) {
1622 char *name = follow_link(host_root_path);
1623 - if (IS_ERR(name))
1624 + if (IS_ERR(name)) {
1625 err = PTR_ERR(name);
1626 - else
1627 - err = read_name(root_inode, name);
1628 + goto out_put;
1629 + }
1630 + err = read_name(root_inode, name);
1631 kfree(name);
1632 if (err)
1633 goto out_put;
1634 diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
1635 index d2f97ecca6a5..e0e5f7c3c99f 100644
1636 --- a/fs/notify/fanotify/fanotify.c
1637 +++ b/fs/notify/fanotify/fanotify.c
1638 @@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group,
1639
1640 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
1641
1642 - wait_event(group->fanotify_data.access_waitq, event->response ||
1643 - atomic_read(&group->fanotify_data.bypass_perm));
1644 -
1645 - if (!event->response) { /* bypass_perm set */
1646 - /*
1647 - * Event was canceled because group is being destroyed. Remove
1648 - * it from group's event list because we are responsible for
1649 - * freeing the permission event.
1650 - */
1651 - fsnotify_remove_event(group, &event->fae.fse);
1652 - return 0;
1653 - }
1654 + wait_event(group->fanotify_data.access_waitq, event->response);
1655
1656 /* userspace responded, convert to something usable */
1657 switch (event->response) {
1658 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
1659 index 8e8e6bcd1d43..a64313868d3a 100644
1660 --- a/fs/notify/fanotify/fanotify_user.c
1661 +++ b/fs/notify/fanotify/fanotify_user.c
1662 @@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file)
1663
1664 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
1665 struct fanotify_perm_event_info *event, *next;
1666 + struct fsnotify_event *fsn_event;
1667
1668 /*
1669 - * There may be still new events arriving in the notification queue
1670 - * but since userspace cannot use fanotify fd anymore, no event can
1671 - * enter or leave access_list by now.
1672 + * Stop new events from arriving in the notification queue. since
1673 + * userspace cannot use fanotify fd anymore, no event can enter or
1674 + * leave access_list by now either.
1675 */
1676 - spin_lock(&group->fanotify_data.access_lock);
1677 -
1678 - atomic_inc(&group->fanotify_data.bypass_perm);
1679 + fsnotify_group_stop_queueing(group);
1680
1681 + /*
1682 + * Process all permission events on access_list and notification queue
1683 + * and simulate reply from userspace.
1684 + */
1685 + spin_lock(&group->fanotify_data.access_lock);
1686 list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
1687 fae.fse.list) {
1688 pr_debug("%s: found group=%p event=%p\n", __func__, group,
1689 @@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file)
1690 spin_unlock(&group->fanotify_data.access_lock);
1691
1692 /*
1693 - * Since bypass_perm is set, newly queued events will not wait for
1694 - * access response. Wake up the already sleeping ones now.
1695 - * synchronize_srcu() in fsnotify_destroy_group() will wait for all
1696 - * processes sleeping in fanotify_handle_event() waiting for access
1697 - * response and thus also for all permission events to be freed.
1698 + * Destroy all non-permission events. For permission events just
1699 + * dequeue them and set the response. They will be freed once the
1700 + * response is consumed and fanotify_get_response() returns.
1701 */
1702 + mutex_lock(&group->notification_mutex);
1703 + while (!fsnotify_notify_queue_is_empty(group)) {
1704 + fsn_event = fsnotify_remove_first_event(group);
1705 + if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
1706 + fsnotify_destroy_event(group, fsn_event);
1707 + else
1708 + FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
1709 + }
1710 + mutex_unlock(&group->notification_mutex);
1711 +
1712 + /* Response for all permission events it set, wakeup waiters */
1713 wake_up(&group->fanotify_data.access_waitq);
1714 #endif
1715
1716 @@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
1717 spin_lock_init(&group->fanotify_data.access_lock);
1718 init_waitqueue_head(&group->fanotify_data.access_waitq);
1719 INIT_LIST_HEAD(&group->fanotify_data.access_list);
1720 - atomic_set(&group->fanotify_data.bypass_perm, 0);
1721 #endif
1722 switch (flags & FAN_ALL_CLASS_BITS) {
1723 case FAN_CLASS_NOTIF:
1724 diff --git a/fs/notify/group.c b/fs/notify/group.c
1725 index d16b62cb2854..18eb30c6bd8f 100644
1726 --- a/fs/notify/group.c
1727 +++ b/fs/notify/group.c
1728 @@ -40,6 +40,17 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
1729 }
1730
1731 /*
1732 + * Stop queueing new events for this group. Once this function returns
1733 + * fsnotify_add_event() will not add any new events to the group's queue.
1734 + */
1735 +void fsnotify_group_stop_queueing(struct fsnotify_group *group)
1736 +{
1737 + mutex_lock(&group->notification_mutex);
1738 + group->shutdown = true;
1739 + mutex_unlock(&group->notification_mutex);
1740 +}
1741 +
1742 +/*
1743 * Trying to get rid of a group. Remove all marks, flush all events and release
1744 * the group reference.
1745 * Note that another thread calling fsnotify_clear_marks_by_group() may still
1746 @@ -47,6 +58,14 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
1747 */
1748 void fsnotify_destroy_group(struct fsnotify_group *group)
1749 {
1750 + /*
1751 + * Stop queueing new events. The code below is careful enough to not
1752 + * require this but fanotify needs to stop queuing events even before
1753 + * fsnotify_destroy_group() is called and this makes the other callers
1754 + * of fsnotify_destroy_group() to see the same behavior.
1755 + */
1756 + fsnotify_group_stop_queueing(group);
1757 +
1758 /* clear all inode marks for this group */
1759 fsnotify_clear_marks_by_group(group);
1760
1761 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
1762 index a95d8e037aeb..e455e83ceeeb 100644
1763 --- a/fs/notify/notification.c
1764 +++ b/fs/notify/notification.c
1765 @@ -82,7 +82,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
1766 * Add an event to the group notification queue. The group can later pull this
1767 * event off the queue to deal with. The function returns 0 if the event was
1768 * added to the queue, 1 if the event was merged with some other queued event,
1769 - * 2 if the queue of events has overflown.
1770 + * 2 if the event was not queued - either the queue of events has overflown
1771 + * or the group is shutting down.
1772 */
1773 int fsnotify_add_event(struct fsnotify_group *group,
1774 struct fsnotify_event *event,
1775 @@ -96,6 +97,11 @@ int fsnotify_add_event(struct fsnotify_group *group,
1776
1777 mutex_lock(&group->notification_mutex);
1778
1779 + if (group->shutdown) {
1780 + mutex_unlock(&group->notification_mutex);
1781 + return 2;
1782 + }
1783 +
1784 if (group->q_len >= group->max_events) {
1785 ret = 2;
1786 /* Queue overflow event only if it isn't already queued */
1787 @@ -126,21 +132,6 @@ queue:
1788 }
1789
1790 /*
1791 - * Remove @event from group's notification queue. It is the responsibility of
1792 - * the caller to destroy the event.
1793 - */
1794 -void fsnotify_remove_event(struct fsnotify_group *group,
1795 - struct fsnotify_event *event)
1796 -{
1797 - mutex_lock(&group->notification_mutex);
1798 - if (!list_empty(&event->list)) {
1799 - list_del_init(&event->list);
1800 - group->q_len--;
1801 - }
1802 - mutex_unlock(&group->notification_mutex);
1803 -}
1804 -
1805 -/*
1806 * Remove and return the first event from the notification list. It is the
1807 * responsibility of the caller to destroy the obtained event
1808 */
1809 diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
1810 index f90931335c6b..2e11658676eb 100644
1811 --- a/fs/ocfs2/dlm/dlmconvert.c
1812 +++ b/fs/ocfs2/dlm/dlmconvert.c
1813 @@ -262,7 +262,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
1814 struct dlm_lock *lock, int flags, int type)
1815 {
1816 enum dlm_status status;
1817 - u8 old_owner = res->owner;
1818
1819 mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
1820 lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
1821 @@ -329,7 +328,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
1822
1823 spin_lock(&res->spinlock);
1824 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1825 - lock->convert_pending = 0;
1826 /* if it failed, move it back to granted queue.
1827 * if master returns DLM_NORMAL and then down before sending ast,
1828 * it may have already been moved to granted queue, reset to
1829 @@ -338,12 +336,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
1830 if (status != DLM_NOTQUEUED)
1831 dlm_error(status);
1832 dlm_revert_pending_convert(res, lock);
1833 - } else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
1834 - (old_owner != res->owner)) {
1835 - mlog(0, "res %.*s is in recovering or has been recovered.\n",
1836 - res->lockname.len, res->lockname.name);
1837 + } else if (!lock->convert_pending) {
1838 + mlog(0, "%s: res %.*s, owner died and lock has been moved back "
1839 + "to granted list, retry convert.\n",
1840 + dlm->name, res->lockname.len, res->lockname.name);
1841 status = DLM_RECOVERING;
1842 }
1843 +
1844 + lock->convert_pending = 0;
1845 bail:
1846 spin_unlock(&res->spinlock);
1847
1848 diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
1849 index 77d30cbd944d..56dd3957cc91 100644
1850 --- a/fs/ocfs2/file.c
1851 +++ b/fs/ocfs2/file.c
1852 @@ -1536,7 +1536,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
1853 u64 start, u64 len)
1854 {
1855 int ret = 0;
1856 - u64 tmpend, end = start + len;
1857 + u64 tmpend = 0;
1858 + u64 end = start + len;
1859 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1860 unsigned int csize = osb->s_clustersize;
1861 handle_t *handle;
1862 @@ -1568,18 +1569,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
1863 }
1864
1865 /*
1866 - * We want to get the byte offset of the end of the 1st cluster.
1867 + * If start is on a cluster boundary and end is somewhere in another
1868 + * cluster, we have not COWed the cluster starting at start, unless
1869 + * end is also within the same cluster. So, in this case, we skip this
1870 + * first call to ocfs2_zero_range_for_truncate() truncate and move on
1871 + * to the next one.
1872 */
1873 - tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
1874 - if (tmpend > end)
1875 - tmpend = end;
1876 + if ((start & (csize - 1)) != 0) {
1877 + /*
1878 + * We want to get the byte offset of the end of the 1st
1879 + * cluster.
1880 + */
1881 + tmpend = (u64)osb->s_clustersize +
1882 + (start & ~(osb->s_clustersize - 1));
1883 + if (tmpend > end)
1884 + tmpend = end;
1885
1886 - trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
1887 - (unsigned long long)tmpend);
1888 + trace_ocfs2_zero_partial_clusters_range1(
1889 + (unsigned long long)start,
1890 + (unsigned long long)tmpend);
1891
1892 - ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1893 - if (ret)
1894 - mlog_errno(ret);
1895 + ret = ocfs2_zero_range_for_truncate(inode, handle, start,
1896 + tmpend);
1897 + if (ret)
1898 + mlog_errno(ret);
1899 + }
1900
1901 if (tmpend < end) {
1902 /*
1903 diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c
1904 index b751eea32e20..5db6f45b3fed 100644
1905 --- a/fs/reiserfs/ibalance.c
1906 +++ b/fs/reiserfs/ibalance.c
1907 @@ -1153,8 +1153,9 @@ int balance_internal(struct tree_balance *tb,
1908 insert_ptr);
1909 }
1910
1911 - memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
1912 insert_ptr[0] = new_insert_ptr;
1913 + if (new_insert_ptr)
1914 + memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
1915
1916 return order;
1917 }
1918 diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
1919 index 39090fc56f09..eb1b8c8acfcb 100644
1920 --- a/fs/xfs/xfs_buf.c
1921 +++ b/fs/xfs/xfs_buf.c
1922 @@ -1535,7 +1535,7 @@ xfs_wait_buftarg(
1923 * ensure here that all reference counts have been dropped before we
1924 * start walking the LRU list.
1925 */
1926 - drain_workqueue(btp->bt_mount->m_buf_workqueue);
1927 + flush_workqueue(btp->bt_mount->m_buf_workqueue);
1928
1929 /* loop until there is nothing left on the lru list. */
1930 while (list_lru_count(&btp->bt_lru)) {
1931 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
1932 index 533c4408529a..850d8822e8ff 100644
1933 --- a/include/linux/fsnotify_backend.h
1934 +++ b/include/linux/fsnotify_backend.h
1935 @@ -148,6 +148,7 @@ struct fsnotify_group {
1936 #define FS_PRIO_1 1 /* fanotify content based access control */
1937 #define FS_PRIO_2 2 /* fanotify pre-content access */
1938 unsigned int priority;
1939 + bool shutdown; /* group is being shut down, don't queue more events */
1940
1941 /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
1942 struct mutex mark_mutex; /* protect marks_list */
1943 @@ -179,7 +180,6 @@ struct fsnotify_group {
1944 spinlock_t access_lock;
1945 struct list_head access_list;
1946 wait_queue_head_t access_waitq;
1947 - atomic_t bypass_perm;
1948 #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
1949 int f_flags;
1950 unsigned int max_marks;
1951 @@ -308,6 +308,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
1952 extern void fsnotify_get_group(struct fsnotify_group *group);
1953 /* drop reference on a group from fsnotify_alloc_group */
1954 extern void fsnotify_put_group(struct fsnotify_group *group);
1955 +/* group destruction begins, stop queuing new events */
1956 +extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
1957 /* destroy group */
1958 extern void fsnotify_destroy_group(struct fsnotify_group *group);
1959 /* fasync handler function */
1960 @@ -320,8 +322,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
1961 struct fsnotify_event *event,
1962 int (*merge)(struct list_head *,
1963 struct fsnotify_event *));
1964 -/* Remove passed event from groups notification queue */
1965 -extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
1966 /* true if the group notification queue is empty */
1967 extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
1968 /* return, but do not dequeue the first event on the notification queue */
1969 diff --git a/include/linux/kernel.h b/include/linux/kernel.h
1970 index 924853d33a13..e571e592e53a 100644
1971 --- a/include/linux/kernel.h
1972 +++ b/include/linux/kernel.h
1973 @@ -202,26 +202,26 @@ extern int _cond_resched(void);
1974
1975 /**
1976 * abs - return absolute value of an argument
1977 - * @x: the value. If it is unsigned type, it is converted to signed type first
1978 - * (s64, long or int depending on its size).
1979 + * @x: the value. If it is unsigned type, it is converted to signed type first.
1980 + * char is treated as if it was signed (regardless of whether it really is)
1981 + * but the macro's return type is preserved as char.
1982 *
1983 - * Return: an absolute value of x. If x is 64-bit, macro's return type is s64,
1984 - * otherwise it is signed long.
1985 + * Return: an absolute value of x.
1986 */
1987 -#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \
1988 - s64 __x = (x); \
1989 - (__x < 0) ? -__x : __x; \
1990 - }), ({ \
1991 - long ret; \
1992 - if (sizeof(x) == sizeof(long)) { \
1993 - long __x = (x); \
1994 - ret = (__x < 0) ? -__x : __x; \
1995 - } else { \
1996 - int __x = (x); \
1997 - ret = (__x < 0) ? -__x : __x; \
1998 - } \
1999 - ret; \
2000 - }))
2001 +#define abs(x) __abs_choose_expr(x, long long, \
2002 + __abs_choose_expr(x, long, \
2003 + __abs_choose_expr(x, int, \
2004 + __abs_choose_expr(x, short, \
2005 + __abs_choose_expr(x, char, \
2006 + __builtin_choose_expr( \
2007 + __builtin_types_compatible_p(typeof(x), char), \
2008 + (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
2009 + ((void)0)))))))
2010 +
2011 +#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
2012 + __builtin_types_compatible_p(typeof(x), signed type) || \
2013 + __builtin_types_compatible_p(typeof(x), unsigned type), \
2014 + ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
2015
2016 /**
2017 * reciprocal_scale - "scale" a value into range [0, ep_ro)
2018 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
2019 index b97d6823ef3c..4e9c75226f07 100644
2020 --- a/include/linux/netdevice.h
2021 +++ b/include/linux/netdevice.h
2022 @@ -3036,6 +3036,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
2023 napi->skb = NULL;
2024 }
2025
2026 +bool netdev_is_rx_handler_busy(struct net_device *dev);
2027 int netdev_rx_handler_register(struct net_device *dev,
2028 rx_handler_func_t *rx_handler,
2029 void *rx_handler_data);
2030 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
2031 index 26eabf5ec718..fbfadba81c5a 100644
2032 --- a/include/linux/pagemap.h
2033 +++ b/include/linux/pagemap.h
2034 @@ -601,56 +601,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
2035 */
2036 static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
2037 {
2038 - int ret = 0;
2039 char __user *end = uaddr + size - 1;
2040
2041 if (unlikely(size == 0))
2042 - return ret;
2043 + return 0;
2044
2045 + if (unlikely(uaddr > end))
2046 + return -EFAULT;
2047 /*
2048 * Writing zeroes into userspace here is OK, because we know that if
2049 * the zero gets there, we'll be overwriting it.
2050 */
2051 - while (uaddr <= end) {
2052 - ret = __put_user(0, uaddr);
2053 - if (ret != 0)
2054 - return ret;
2055 + do {
2056 + if (unlikely(__put_user(0, uaddr) != 0))
2057 + return -EFAULT;
2058 uaddr += PAGE_SIZE;
2059 - }
2060 + } while (uaddr <= end);
2061
2062 /* Check whether the range spilled into the next page. */
2063 if (((unsigned long)uaddr & PAGE_MASK) ==
2064 ((unsigned long)end & PAGE_MASK))
2065 - ret = __put_user(0, end);
2066 + return __put_user(0, end);
2067
2068 - return ret;
2069 + return 0;
2070 }
2071
2072 static inline int fault_in_multipages_readable(const char __user *uaddr,
2073 int size)
2074 {
2075 volatile char c;
2076 - int ret = 0;
2077 const char __user *end = uaddr + size - 1;
2078
2079 if (unlikely(size == 0))
2080 - return ret;
2081 + return 0;
2082
2083 - while (uaddr <= end) {
2084 - ret = __get_user(c, uaddr);
2085 - if (ret != 0)
2086 - return ret;
2087 + if (unlikely(uaddr > end))
2088 + return -EFAULT;
2089 +
2090 + do {
2091 + if (unlikely(__get_user(c, uaddr) != 0))
2092 + return -EFAULT;
2093 uaddr += PAGE_SIZE;
2094 - }
2095 + } while (uaddr <= end);
2096
2097 /* Check whether the range spilled into the next page. */
2098 if (((unsigned long)uaddr & PAGE_MASK) ==
2099 ((unsigned long)end & PAGE_MASK)) {
2100 - ret = __get_user(c, end);
2101 - (void)c;
2102 + return __get_user(c, end);
2103 }
2104
2105 - return ret;
2106 + return 0;
2107 }
2108
2109 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
2110 diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h
2111 index 76199b75d584..e302c447e057 100644
2112 --- a/include/linux/smc91x.h
2113 +++ b/include/linux/smc91x.h
2114 @@ -1,6 +1,16 @@
2115 #ifndef __SMC91X_H__
2116 #define __SMC91X_H__
2117
2118 +/*
2119 + * These bits define which access sizes a platform can support, rather
2120 + * than the maximal access size. So, if your platform can do 16-bit
2121 + * and 32-bit accesses to the SMC91x device, but not 8-bit, set both
2122 + * SMC91X_USE_16BIT and SMC91X_USE_32BIT.
2123 + *
2124 + * The SMC91x driver requires at least one of SMC91X_USE_8BIT or
2125 + * SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is
2126 + * an invalid configuration.
2127 + */
2128 #define SMC91X_USE_8BIT (1 << 0)
2129 #define SMC91X_USE_16BIT (1 << 1)
2130 #define SMC91X_USE_32BIT (1 << 2)
2131 diff --git a/include/net/af_unix.h b/include/net/af_unix.h
2132 index 9b4c418bebd8..fd60eccb59a6 100644
2133 --- a/include/net/af_unix.h
2134 +++ b/include/net/af_unix.h
2135 @@ -52,7 +52,7 @@ struct unix_sock {
2136 struct sock sk;
2137 struct unix_address *addr;
2138 struct path path;
2139 - struct mutex readlock;
2140 + struct mutex iolock, bindlock;
2141 struct sock *peer;
2142 struct list_head link;
2143 atomic_long_t inflight;
2144 diff --git a/include/net/tcp.h b/include/net/tcp.h
2145 index 414d822bc1db..9c3ab544d3a8 100644
2146 --- a/include/net/tcp.h
2147 +++ b/include/net/tcp.h
2148 @@ -1510,6 +1510,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
2149 {
2150 if (sk->sk_send_head == skb_unlinked)
2151 sk->sk_send_head = NULL;
2152 + if (tcp_sk(sk)->highest_sack == skb_unlinked)
2153 + tcp_sk(sk)->highest_sack = NULL;
2154 }
2155
2156 static inline void tcp_init_send_head(struct sock *sk)
2157 diff --git a/kernel/cpuset.c b/kernel/cpuset.c
2158 index e120bd983ad0..b9279a2844d8 100644
2159 --- a/kernel/cpuset.c
2160 +++ b/kernel/cpuset.c
2161 @@ -2079,7 +2079,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
2162 * which could have been changed by cpuset just after it inherits the
2163 * state from the parent and before it sits on the cgroup's task list.
2164 */
2165 -void cpuset_fork(struct task_struct *task)
2166 +void cpuset_fork(struct task_struct *task, void *priv)
2167 {
2168 if (task_css_is_root(task, cpuset_cgrp_id))
2169 return;
2170 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
2171 index b7dd5718836e..3124cebaec31 100644
2172 --- a/kernel/power/hibernate.c
2173 +++ b/kernel/power/hibernate.c
2174 @@ -299,12 +299,12 @@ static int create_image(int platform_mode)
2175 save_processor_state();
2176 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
2177 error = swsusp_arch_suspend();
2178 + /* Restore control flow magically appears here */
2179 + restore_processor_state();
2180 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
2181 if (error)
2182 printk(KERN_ERR "PM: Error %d creating hibernation image\n",
2183 error);
2184 - /* Restore control flow magically appears here */
2185 - restore_processor_state();
2186 if (!in_suspend)
2187 events_check_enabled = false;
2188
2189 diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
2190 index 3a970604308f..f155c62f1f2c 100644
2191 --- a/kernel/power/snapshot.c
2192 +++ b/kernel/power/snapshot.c
2193 @@ -765,9 +765,9 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
2194 */
2195 static bool rtree_next_node(struct memory_bitmap *bm)
2196 {
2197 - bm->cur.node = list_entry(bm->cur.node->list.next,
2198 - struct rtree_node, list);
2199 - if (&bm->cur.node->list != &bm->cur.zone->leaves) {
2200 + if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
2201 + bm->cur.node = list_entry(bm->cur.node->list.next,
2202 + struct rtree_node, list);
2203 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
2204 bm->cur.node_bit = 0;
2205 touch_softlockup_watchdog();
2206 @@ -775,9 +775,9 @@ static bool rtree_next_node(struct memory_bitmap *bm)
2207 }
2208
2209 /* No more nodes, goto next zone */
2210 - bm->cur.zone = list_entry(bm->cur.zone->list.next,
2211 + if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
2212 + bm->cur.zone = list_entry(bm->cur.zone->list.next,
2213 struct mem_zone_bm_rtree, list);
2214 - if (&bm->cur.zone->list != &bm->zones) {
2215 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
2216 struct rtree_node, list);
2217 bm->cur.node_pfn = 0;
2218 diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
2219 index 9b1044e936a6..05ea5167e6bb 100644
2220 --- a/kernel/trace/Makefile
2221 +++ b/kernel/trace/Makefile
2222 @@ -1,4 +1,8 @@
2223
2224 +# We are fully aware of the dangers of __builtin_return_address()
2225 +FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
2226 +KBUILD_CFLAGS += $(FRAME_CFLAGS)
2227 +
2228 # Do not instrument the tracer itself:
2229
2230 ifdef CONFIG_FUNCTION_TRACER
2231 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2232 index 8305cbb2d5a2..059233abcfcf 100644
2233 --- a/kernel/trace/trace.c
2234 +++ b/kernel/trace/trace.c
2235 @@ -4727,19 +4727,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2236 struct trace_iterator *iter = filp->private_data;
2237 ssize_t sret;
2238
2239 - /* return any leftover data */
2240 - sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2241 - if (sret != -EBUSY)
2242 - return sret;
2243 -
2244 - trace_seq_init(&iter->seq);
2245 -
2246 /*
2247 * Avoid more than one consumer on a single file descriptor
2248 * This is just a matter of traces coherency, the ring buffer itself
2249 * is protected.
2250 */
2251 mutex_lock(&iter->mutex);
2252 +
2253 + /* return any leftover data */
2254 + sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2255 + if (sret != -EBUSY)
2256 + goto out;
2257 +
2258 + trace_seq_init(&iter->seq);
2259 +
2260 if (iter->trace->read) {
2261 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
2262 if (sret)
2263 @@ -5766,9 +5767,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
2264 return -EBUSY;
2265 #endif
2266
2267 - if (splice_grow_spd(pipe, &spd))
2268 - return -ENOMEM;
2269 -
2270 if (*ppos & (PAGE_SIZE - 1))
2271 return -EINVAL;
2272
2273 @@ -5778,6 +5776,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
2274 len &= PAGE_MASK;
2275 }
2276
2277 + if (splice_grow_spd(pipe, &spd))
2278 + return -ENOMEM;
2279 +
2280 again:
2281 trace_access_lock(iter->cpu_file);
2282 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2283 @@ -5835,19 +5836,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
2284 /* did we read anything? */
2285 if (!spd.nr_pages) {
2286 if (ret)
2287 - return ret;
2288 + goto out;
2289
2290 + ret = -EAGAIN;
2291 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
2292 - return -EAGAIN;
2293 + goto out;
2294
2295 ret = wait_on_pipe(iter, true);
2296 if (ret)
2297 - return ret;
2298 + goto out;
2299
2300 goto again;
2301 }
2302
2303 ret = splice_to_pipe(pipe, &spd);
2304 +out:
2305 splice_shrink_spd(&spd);
2306
2307 return ret;
2308 diff --git a/mm/vmscan.c b/mm/vmscan.c
2309 index 0c114e2b01d3..0838e9f02b11 100644
2310 --- a/mm/vmscan.c
2311 +++ b/mm/vmscan.c
2312 @@ -2159,23 +2159,6 @@ out:
2313 }
2314 }
2315
2316 -#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
2317 -static void init_tlb_ubc(void)
2318 -{
2319 - /*
2320 - * This deliberately does not clear the cpumask as it's expensive
2321 - * and unnecessary. If there happens to be data in there then the
2322 - * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
2323 - * then will be cleared.
2324 - */
2325 - current->tlb_ubc.flush_required = false;
2326 -}
2327 -#else
2328 -static inline void init_tlb_ubc(void)
2329 -{
2330 -}
2331 -#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
2332 -
2333 /*
2334 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
2335 */
2336 @@ -2210,8 +2193,6 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
2337 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2338 sc->priority == DEF_PRIORITY);
2339
2340 - init_tlb_ubc();
2341 -
2342 blk_start_plug(&plug);
2343 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2344 nr[LRU_INACTIVE_FILE]) {
2345 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
2346 index 7173a685309a..9542e84a9455 100644
2347 --- a/net/bridge/br_multicast.c
2348 +++ b/net/bridge/br_multicast.c
2349 @@ -1113,7 +1113,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
2350 } else {
2351 err = br_ip6_multicast_add_group(br, port,
2352 &grec->grec_mca, vid);
2353 - if (!err)
2354 + if (err)
2355 break;
2356 }
2357 }
2358 diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
2359 index f6c3b2137eea..59ce1fcc220c 100644
2360 --- a/net/caif/cfpkt_skbuff.c
2361 +++ b/net/caif/cfpkt_skbuff.c
2362 @@ -286,7 +286,7 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
2363 else
2364 skb_trim(skb, len);
2365
2366 - return cfpkt_getlen(pkt);
2367 + return cfpkt_getlen(pkt);
2368 }
2369
2370 /* Need to expand SKB */
2371 diff --git a/net/core/dev.c b/net/core/dev.c
2372 index 9efbdb3ff78a..de4ed2b5a221 100644
2373 --- a/net/core/dev.c
2374 +++ b/net/core/dev.c
2375 @@ -3722,6 +3722,22 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2376 }
2377
2378 /**
2379 + * netdev_is_rx_handler_busy - check if receive handler is registered
2380 + * @dev: device to check
2381 + *
2382 + * Check if a receive handler is already registered for a given device.
2383 + * Return true if there one.
2384 + *
2385 + * The caller must hold the rtnl_mutex.
2386 + */
2387 +bool netdev_is_rx_handler_busy(struct net_device *dev)
2388 +{
2389 + ASSERT_RTNL();
2390 + return dev && rtnl_dereference(dev->rx_handler);
2391 +}
2392 +EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
2393 +
2394 +/**
2395 * netdev_rx_handler_register - register receive handler
2396 * @dev: device to register a handler for
2397 * @rx_handler: receive handler to register
2398 diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
2399 index 744e5936c10d..e5a3ff210fec 100644
2400 --- a/net/ipv4/fib_trie.c
2401 +++ b/net/ipv4/fib_trie.c
2402 @@ -2453,9 +2453,7 @@ struct fib_route_iter {
2403 static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2404 loff_t pos)
2405 {
2406 - struct fib_table *tb = iter->main_tb;
2407 struct key_vector *l, **tp = &iter->tnode;
2408 - struct trie *t;
2409 t_key key;
2410
2411 /* use cache location of next-to-find key */
2412 @@ -2463,8 +2461,6 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2413 pos -= iter->pos;
2414 key = iter->key;
2415 } else {
2416 - t = (struct trie *)tb->tb_data;
2417 - iter->tnode = t->kv;
2418 iter->pos = 0;
2419 key = 0;
2420 }
2421 @@ -2505,12 +2501,12 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2422 return NULL;
2423
2424 iter->main_tb = tb;
2425 + t = (struct trie *)tb->tb_data;
2426 + iter->tnode = t->kv;
2427
2428 if (*pos != 0)
2429 return fib_route_get_idx(iter, *pos);
2430
2431 - t = (struct trie *)tb->tb_data;
2432 - iter->tnode = t->kv;
2433 iter->pos = 0;
2434 iter->key = 0;
2435
2436 diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
2437 index 4d8f0b698777..65036891e080 100644
2438 --- a/net/ipv4/ip_vti.c
2439 +++ b/net/ipv4/ip_vti.c
2440 @@ -540,6 +540,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
2441 .get_link_net = ip_tunnel_get_link_net,
2442 };
2443
2444 +static bool is_vti_tunnel(const struct net_device *dev)
2445 +{
2446 + return dev->netdev_ops == &vti_netdev_ops;
2447 +}
2448 +
2449 +static int vti_device_event(struct notifier_block *unused,
2450 + unsigned long event, void *ptr)
2451 +{
2452 + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2453 + struct ip_tunnel *tunnel = netdev_priv(dev);
2454 +
2455 + if (!is_vti_tunnel(dev))
2456 + return NOTIFY_DONE;
2457 +
2458 + switch (event) {
2459 + case NETDEV_DOWN:
2460 + if (!net_eq(tunnel->net, dev_net(dev)))
2461 + xfrm_garbage_collect(tunnel->net);
2462 + break;
2463 + }
2464 + return NOTIFY_DONE;
2465 +}
2466 +
2467 +static struct notifier_block vti_notifier_block __read_mostly = {
2468 + .notifier_call = vti_device_event,
2469 +};
2470 +
2471 static int __init vti_init(void)
2472 {
2473 const char *msg;
2474 @@ -547,6 +574,8 @@ static int __init vti_init(void)
2475
2476 pr_info("IPv4 over IPsec tunneling driver\n");
2477
2478 + register_netdevice_notifier(&vti_notifier_block);
2479 +
2480 msg = "tunnel device";
2481 err = register_pernet_device(&vti_net_ops);
2482 if (err < 0)
2483 @@ -579,6 +608,7 @@ xfrm_proto_ah_failed:
2484 xfrm_proto_esp_failed:
2485 unregister_pernet_device(&vti_net_ops);
2486 pernet_dev_failed:
2487 + unregister_netdevice_notifier(&vti_notifier_block);
2488 pr_err("vti init: failed to register %s\n", msg);
2489 return err;
2490 }
2491 @@ -590,6 +620,7 @@ static void __exit vti_fini(void)
2492 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
2493 xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
2494 unregister_pernet_device(&vti_net_ops);
2495 + unregister_netdevice_notifier(&vti_notifier_block);
2496 }
2497
2498 module_init(vti_init);
2499 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
2500 index 048418b049d8..b5853cac3269 100644
2501 --- a/net/ipv4/tcp_ipv4.c
2502 +++ b/net/ipv4/tcp_ipv4.c
2503 @@ -808,8 +808,14 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
2504 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
2505 tcp_sk(sk)->snd_nxt;
2506
2507 + /* RFC 7323 2.3
2508 + * The window field (SEG.WND) of every outgoing segment, with the
2509 + * exception of <SYN> segments, MUST be right-shifted by
2510 + * Rcv.Wind.Shift bits:
2511 + */
2512 tcp_v4_send_ack(sock_net(sk), skb, seq,
2513 - tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
2514 + tcp_rsk(req)->rcv_nxt,
2515 + req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
2516 tcp_time_stamp,
2517 req->ts_recent,
2518 0,
2519 diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
2520 index 3e6a472e6b88..92ab5bc91592 100644
2521 --- a/net/ipv4/tcp_yeah.c
2522 +++ b/net/ipv4/tcp_yeah.c
2523 @@ -75,7 +75,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
2524 if (!tcp_is_cwnd_limited(sk))
2525 return;
2526
2527 - if (tp->snd_cwnd <= tp->snd_ssthresh)
2528 + if (tcp_in_slow_start(tp))
2529 tcp_slow_start(tp, acked);
2530
2531 else if (!yeah->doing_reno_now) {
2532 diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
2533 index 263a5164a6f5..3e55447b63a4 100644
2534 --- a/net/ipv6/ping.c
2535 +++ b/net/ipv6/ping.c
2536 @@ -150,8 +150,10 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
2537 rt = (struct rt6_info *) dst;
2538
2539 np = inet6_sk(sk);
2540 - if (!np)
2541 - return -EBADF;
2542 + if (!np) {
2543 + err = -EBADF;
2544 + goto dst_err_out;
2545 + }
2546
2547 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
2548 fl6.flowi6_oif = np->mcast_oif;
2549 @@ -186,6 +188,9 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
2550 }
2551 release_sock(sk);
2552
2553 +dst_err_out:
2554 + dst_release(dst);
2555 +
2556 if (err)
2557 return err;
2558
2559 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
2560 index 1a1cd3938fd0..2d81e2f33ef2 100644
2561 --- a/net/ipv6/tcp_ipv6.c
2562 +++ b/net/ipv6/tcp_ipv6.c
2563 @@ -932,9 +932,15 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
2564 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
2565 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
2566 */
2567 + /* RFC 7323 2.3
2568 + * The window field (SEG.WND) of every outgoing segment, with the
2569 + * exception of <SYN> segments, MUST be right-shifted by
2570 + * Rcv.Wind.Shift bits:
2571 + */
2572 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
2573 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
2574 - tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
2575 + tcp_rsk(req)->rcv_nxt,
2576 + req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
2577 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
2578 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
2579 0, 0);
2580 diff --git a/net/irda/iriap.c b/net/irda/iriap.c
2581 index 4a7ae32afa09..1138eaf5c682 100644
2582 --- a/net/irda/iriap.c
2583 +++ b/net/irda/iriap.c
2584 @@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
2585
2586 self->magic = IAS_MAGIC;
2587 self->mode = mode;
2588 - if (mode == IAS_CLIENT)
2589 - iriap_register_lsap(self, slsap_sel, mode);
2590 + if (mode == IAS_CLIENT) {
2591 + if (iriap_register_lsap(self, slsap_sel, mode)) {
2592 + kfree(self);
2593 + return NULL;
2594 + }
2595 + }
2596
2597 self->confirm = callback;
2598 self->priv = priv;
2599 diff --git a/net/tipc/socket.c b/net/tipc/socket.c
2600 index 9b713e0ce00d..b26b7a127773 100644
2601 --- a/net/tipc/socket.c
2602 +++ b/net/tipc/socket.c
2603 @@ -2111,7 +2111,8 @@ restart:
2604 TIPC_CONN_MSG, SHORT_H_SIZE,
2605 0, dnode, onode, dport, oport,
2606 TIPC_CONN_SHUTDOWN);
2607 - tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
2608 + if (skb)
2609 + tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
2610 }
2611 tsk->connected = 0;
2612 sock->state = SS_DISCONNECTING;
2613 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
2614 index 6579fd6e7459..824cc1e160bc 100644
2615 --- a/net/unix/af_unix.c
2616 +++ b/net/unix/af_unix.c
2617 @@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
2618 {
2619 struct unix_sock *u = unix_sk(sk);
2620
2621 - if (mutex_lock_interruptible(&u->readlock))
2622 + if (mutex_lock_interruptible(&u->iolock))
2623 return -EINTR;
2624
2625 sk->sk_peek_off = val;
2626 - mutex_unlock(&u->readlock);
2627 + mutex_unlock(&u->iolock);
2628
2629 return 0;
2630 }
2631 @@ -778,7 +778,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
2632 spin_lock_init(&u->lock);
2633 atomic_long_set(&u->inflight, 0);
2634 INIT_LIST_HEAD(&u->link);
2635 - mutex_init(&u->readlock); /* single task reading lock */
2636 + mutex_init(&u->iolock); /* single task reading lock */
2637 + mutex_init(&u->bindlock); /* single task binding lock */
2638 init_waitqueue_head(&u->peer_wait);
2639 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
2640 unix_insert_socket(unix_sockets_unbound(sk), sk);
2641 @@ -847,7 +848,7 @@ static int unix_autobind(struct socket *sock)
2642 int err;
2643 unsigned int retries = 0;
2644
2645 - err = mutex_lock_interruptible(&u->readlock);
2646 + err = mutex_lock_interruptible(&u->bindlock);
2647 if (err)
2648 return err;
2649
2650 @@ -894,7 +895,7 @@ retry:
2651 spin_unlock(&unix_table_lock);
2652 err = 0;
2653
2654 -out: mutex_unlock(&u->readlock);
2655 +out: mutex_unlock(&u->bindlock);
2656 return err;
2657 }
2658
2659 @@ -953,20 +954,32 @@ fail:
2660 return NULL;
2661 }
2662
2663 -static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode,
2664 - struct path *res)
2665 +static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
2666 {
2667 - int err;
2668 + struct dentry *dentry;
2669 + struct path path;
2670 + int err = 0;
2671 + /*
2672 + * Get the parent directory, calculate the hash for last
2673 + * component.
2674 + */
2675 + dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
2676 + err = PTR_ERR(dentry);
2677 + if (IS_ERR(dentry))
2678 + return err;
2679
2680 - err = security_path_mknod(path, dentry, mode, 0);
2681 + /*
2682 + * All right, let's create it.
2683 + */
2684 + err = security_path_mknod(&path, dentry, mode, 0);
2685 if (!err) {
2686 - err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
2687 + err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
2688 if (!err) {
2689 - res->mnt = mntget(path->mnt);
2690 + res->mnt = mntget(path.mnt);
2691 res->dentry = dget(dentry);
2692 }
2693 }
2694 -
2695 + done_path_create(&path, dentry);
2696 return err;
2697 }
2698
2699 @@ -977,12 +990,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2700 struct unix_sock *u = unix_sk(sk);
2701 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
2702 char *sun_path = sunaddr->sun_path;
2703 - int err, name_err;
2704 + int err;
2705 unsigned int hash;
2706 struct unix_address *addr;
2707 struct hlist_head *list;
2708 - struct path path;
2709 - struct dentry *dentry;
2710
2711 err = -EINVAL;
2712 if (sunaddr->sun_family != AF_UNIX)
2713 @@ -998,34 +1009,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2714 goto out;
2715 addr_len = err;
2716
2717 - name_err = 0;
2718 - dentry = NULL;
2719 - if (sun_path[0]) {
2720 - /* Get the parent directory, calculate the hash for last
2721 - * component.
2722 - */
2723 - dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
2724 -
2725 - if (IS_ERR(dentry)) {
2726 - /* delay report until after 'already bound' check */
2727 - name_err = PTR_ERR(dentry);
2728 - dentry = NULL;
2729 - }
2730 - }
2731 -
2732 - err = mutex_lock_interruptible(&u->readlock);
2733 + err = mutex_lock_interruptible(&u->bindlock);
2734 if (err)
2735 - goto out_path;
2736 + goto out;
2737
2738 err = -EINVAL;
2739 if (u->addr)
2740 goto out_up;
2741
2742 - if (name_err) {
2743 - err = name_err == -EEXIST ? -EADDRINUSE : name_err;
2744 - goto out_up;
2745 - }
2746 -
2747 err = -ENOMEM;
2748 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
2749 if (!addr)
2750 @@ -1036,11 +1027,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2751 addr->hash = hash ^ sk->sk_type;
2752 atomic_set(&addr->refcnt, 1);
2753
2754 - if (dentry) {
2755 - struct path u_path;
2756 + if (sun_path[0]) {
2757 + struct path path;
2758 umode_t mode = S_IFSOCK |
2759 (SOCK_INODE(sock)->i_mode & ~current_umask());
2760 - err = unix_mknod(dentry, &path, mode, &u_path);
2761 + err = unix_mknod(sun_path, mode, &path);
2762 if (err) {
2763 if (err == -EEXIST)
2764 err = -EADDRINUSE;
2765 @@ -1048,9 +1039,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2766 goto out_up;
2767 }
2768 addr->hash = UNIX_HASH_SIZE;
2769 - hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
2770 + hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
2771 spin_lock(&unix_table_lock);
2772 - u->path = u_path;
2773 + u->path = path;
2774 list = &unix_socket_table[hash];
2775 } else {
2776 spin_lock(&unix_table_lock);
2777 @@ -1072,11 +1063,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2778 out_unlock:
2779 spin_unlock(&unix_table_lock);
2780 out_up:
2781 - mutex_unlock(&u->readlock);
2782 -out_path:
2783 - if (dentry)
2784 - done_path_create(&path, dentry);
2785 -
2786 + mutex_unlock(&u->bindlock);
2787 out:
2788 return err;
2789 }
2790 @@ -1971,17 +1958,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
2791 if (false) {
2792 alloc_skb:
2793 unix_state_unlock(other);
2794 - mutex_unlock(&unix_sk(other)->readlock);
2795 + mutex_unlock(&unix_sk(other)->iolock);
2796 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
2797 &err, 0);
2798 if (!newskb)
2799 goto err;
2800 }
2801
2802 - /* we must acquire readlock as we modify already present
2803 + /* we must acquire iolock as we modify already present
2804 * skbs in the sk_receive_queue and mess with skb->len
2805 */
2806 - err = mutex_lock_interruptible(&unix_sk(other)->readlock);
2807 + err = mutex_lock_interruptible(&unix_sk(other)->iolock);
2808 if (err) {
2809 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
2810 goto err;
2811 @@ -2048,7 +2035,7 @@ alloc_skb:
2812 }
2813
2814 unix_state_unlock(other);
2815 - mutex_unlock(&unix_sk(other)->readlock);
2816 + mutex_unlock(&unix_sk(other)->iolock);
2817
2818 other->sk_data_ready(other);
2819 scm_destroy(&scm);
2820 @@ -2057,7 +2044,7 @@ alloc_skb:
2821 err_state_unlock:
2822 unix_state_unlock(other);
2823 err_unlock:
2824 - mutex_unlock(&unix_sk(other)->readlock);
2825 + mutex_unlock(&unix_sk(other)->iolock);
2826 err:
2827 kfree_skb(newskb);
2828 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
2829 @@ -2122,7 +2109,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2830 if (flags&MSG_OOB)
2831 goto out;
2832
2833 - err = mutex_lock_interruptible(&u->readlock);
2834 + err = mutex_lock_interruptible(&u->iolock);
2835 if (unlikely(err)) {
2836 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2837 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2838 @@ -2198,7 +2185,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2839 out_free:
2840 skb_free_datagram(sk, skb);
2841 out_unlock:
2842 - mutex_unlock(&u->readlock);
2843 + mutex_unlock(&u->iolock);
2844 out:
2845 return err;
2846 }
2847 @@ -2293,7 +2280,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2848 /* Lock the socket to prevent queue disordering
2849 * while sleeps in memcpy_tomsg
2850 */
2851 - mutex_lock(&u->readlock);
2852 + mutex_lock(&u->iolock);
2853
2854 if (flags & MSG_PEEK)
2855 skip = sk_peek_offset(sk, flags);
2856 @@ -2334,7 +2321,7 @@ again:
2857 break;
2858 }
2859
2860 - mutex_unlock(&u->readlock);
2861 + mutex_unlock(&u->iolock);
2862
2863 timeo = unix_stream_data_wait(sk, timeo, last,
2864 last_len);
2865 @@ -2345,7 +2332,7 @@ again:
2866 goto out;
2867 }
2868
2869 - mutex_lock(&u->readlock);
2870 + mutex_lock(&u->iolock);
2871 continue;
2872 unlock:
2873 unix_state_unlock(sk);
2874 @@ -2448,7 +2435,7 @@ unlock:
2875 }
2876 } while (size);
2877
2878 - mutex_unlock(&u->readlock);
2879 + mutex_unlock(&u->iolock);
2880 if (state->msg)
2881 scm_recv(sock, state->msg, &scm, flags);
2882 else
2883 @@ -2489,9 +2476,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
2884 int ret;
2885 struct unix_sock *u = unix_sk(sk);
2886
2887 - mutex_unlock(&u->readlock);
2888 + mutex_unlock(&u->iolock);
2889 ret = splice_to_pipe(pipe, spd);
2890 - mutex_lock(&u->readlock);
2891 + mutex_lock(&u->iolock);
2892
2893 return ret;
2894 }
2895 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
2896 index 5d89f13a98db..bf65f31bd55e 100644
2897 --- a/net/wireless/nl80211.c
2898 +++ b/net/wireless/nl80211.c
2899 @@ -6628,7 +6628,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
2900
2901 params.n_counter_offsets_presp = len / sizeof(u16);
2902 if (rdev->wiphy.max_num_csa_counters &&
2903 - (params.n_counter_offsets_beacon >
2904 + (params.n_counter_offsets_presp >
2905 rdev->wiphy.max_num_csa_counters))
2906 return -EINVAL;
2907