Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0295-4.9.196-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3570 - (show annotations) (download)
Thu Aug 13 10:21:13 2020 UTC (3 years, 8 months ago) by niro
File size: 45803 byte(s)
linux-196
1 diff --git a/Makefile b/Makefile
2 index bee0218e3fb5..194c35eff19c 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 195
9 +SUBLEVEL = 196
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
14 index 5ca207ada852..2539c8f9fb3f 100644
15 --- a/arch/arm/mm/fault.c
16 +++ b/arch/arm/mm/fault.c
17 @@ -214,7 +214,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
18 {
19 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
20
21 - if (fsr & FSR_WRITE)
22 + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
23 mask = VM_WRITE;
24 if (fsr & FSR_LNX_PF)
25 mask = VM_EXEC;
26 @@ -284,7 +284,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
27
28 if (user_mode(regs))
29 flags |= FAULT_FLAG_USER;
30 - if (fsr & FSR_WRITE)
31 + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
32 flags |= FAULT_FLAG_WRITE;
33
34 /*
35 diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
36 index afc1f84e763b..9bc272642d55 100644
37 --- a/arch/arm/mm/fault.h
38 +++ b/arch/arm/mm/fault.h
39 @@ -5,6 +5,7 @@
40 * Fault status register encodings. We steal bit 31 for our own purposes.
41 */
42 #define FSR_LNX_PF (1 << 31)
43 +#define FSR_CM (1 << 13)
44 #define FSR_WRITE (1 << 11)
45 #define FSR_FS4 (1 << 10)
46 #define FSR_FS3_0 (15)
47 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
48 index f7c741358f37..241bf898adf5 100644
49 --- a/arch/arm/mm/mmu.c
50 +++ b/arch/arm/mm/mmu.c
51 @@ -1168,6 +1168,22 @@ void __init adjust_lowmem_bounds(void)
52 */
53 vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
54
55 + /*
56 + * The first usable region must be PMD aligned. Mark its start
57 + * as MEMBLOCK_NOMAP if it isn't
58 + */
59 + for_each_memblock(memory, reg) {
60 + if (!memblock_is_nomap(reg)) {
61 + if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
62 + phys_addr_t len;
63 +
64 + len = round_up(reg->base, PMD_SIZE) - reg->base;
65 + memblock_mark_nomap(reg->base, len);
66 + }
67 + break;
68 + }
69 + }
70 +
71 for_each_memblock(memory, reg) {
72 phys_addr_t block_start = reg->base;
73 phys_addr_t block_end = reg->base + reg->size;
74 diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
75 index 0f2e1ab5e166..9b2e2e2e728a 100644
76 --- a/arch/arm64/include/asm/cmpxchg.h
77 +++ b/arch/arm64/include/asm/cmpxchg.h
78 @@ -73,7 +73,7 @@ __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
79 #undef __XCHG_CASE
80
81 #define __XCHG_GEN(sfx) \
82 -static inline unsigned long __xchg##sfx(unsigned long x, \
83 +static __always_inline unsigned long __xchg##sfx(unsigned long x, \
84 volatile void *ptr, \
85 int size) \
86 { \
87 @@ -115,7 +115,7 @@ __XCHG_GEN(_mb)
88 #define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
89
90 #define __CMPXCHG_GEN(sfx) \
91 -static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
92 +static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
93 unsigned long old, \
94 unsigned long new, \
95 int size) \
96 @@ -248,7 +248,7 @@ __CMPWAIT_CASE( , , 8);
97 #undef __CMPWAIT_CASE
98
99 #define __CMPWAIT_GEN(sfx) \
100 -static inline void __cmpwait##sfx(volatile void *ptr, \
101 +static __always_inline void __cmpwait##sfx(volatile void *ptr, \
102 unsigned long val, \
103 int size) \
104 { \
105 diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
106 index 3cc5b2e4263c..47d50197789b 100644
107 --- a/arch/mips/mm/tlbex.c
108 +++ b/arch/mips/mm/tlbex.c
109 @@ -637,7 +637,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
110 return;
111 }
112
113 - if (cpu_has_rixi && _PAGE_NO_EXEC) {
114 + if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
115 if (fill_includes_sw_bits) {
116 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
117 } else {
118 diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
119 index f4c7467f7465..b73ab8a7ebc3 100644
120 --- a/arch/powerpc/include/asm/futex.h
121 +++ b/arch/powerpc/include/asm/futex.h
122 @@ -60,8 +60,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
123
124 pagefault_enable();
125
126 - if (!ret)
127 - *oval = oldval;
128 + *oval = oldval;
129
130 return ret;
131 }
132 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
133 index 92474227262b..0c8b966e8070 100644
134 --- a/arch/powerpc/kernel/exceptions-64s.S
135 +++ b/arch/powerpc/kernel/exceptions-64s.S
136 @@ -467,6 +467,10 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
137 RFI_TO_USER_OR_KERNEL
138 9:
139 /* Deliver the machine check to host kernel in V mode. */
140 +BEGIN_FTR_SECTION
141 + ld r10,ORIG_GPR3(r1)
142 + mtspr SPRN_CFAR,r10
143 +END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
144 MACHINE_CHECK_HANDLER_WINDUP
145 b machine_check_pSeries
146
147 diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
148 index 6a3e5de544ce..a309a7a29cc6 100644
149 --- a/arch/powerpc/kernel/rtas.c
150 +++ b/arch/powerpc/kernel/rtas.c
151 @@ -874,15 +874,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
152 return 0;
153
154 for_each_cpu(cpu, cpus) {
155 + struct device *dev = get_cpu_device(cpu);
156 +
157 switch (state) {
158 case DOWN:
159 - cpuret = cpu_down(cpu);
160 + cpuret = device_offline(dev);
161 break;
162 case UP:
163 - cpuret = cpu_up(cpu);
164 + cpuret = device_online(dev);
165 break;
166 }
167 - if (cpuret) {
168 + if (cpuret < 0) {
169 pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
170 __func__,
171 ((state == UP) ? "up" : "down"),
172 @@ -971,6 +973,8 @@ int rtas_ibm_suspend_me(u64 handle)
173 data.token = rtas_token("ibm,suspend-me");
174 data.complete = &done;
175
176 + lock_device_hotplug();
177 +
178 /* All present CPUs must be online */
179 cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
180 cpuret = rtas_online_cpus_mask(offline_mask);
181 @@ -1002,6 +1006,7 @@ int rtas_ibm_suspend_me(u64 handle)
182 __func__);
183
184 out:
185 + unlock_device_hotplug();
186 free_cpumask_var(offline_mask);
187 return atomic_read(&data.error);
188 }
189 diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
190 index 3784a7abfcc8..74791e8382d2 100644
191 --- a/arch/powerpc/platforms/pseries/mobility.c
192 +++ b/arch/powerpc/platforms/pseries/mobility.c
193 @@ -11,6 +11,7 @@
194
195 #include <linux/kernel.h>
196 #include <linux/kobject.h>
197 +#include <linux/sched.h>
198 #include <linux/smp.h>
199 #include <linux/stat.h>
200 #include <linux/completion.h>
201 @@ -206,7 +207,11 @@ static int update_dt_node(__be32 phandle, s32 scope)
202
203 prop_data += vd;
204 }
205 +
206 + cond_resched();
207 }
208 +
209 + cond_resched();
210 } while (rtas_rc == 1);
211
212 of_node_put(dn);
213 @@ -282,8 +287,12 @@ int pseries_devicetree_update(s32 scope)
214 add_dt_node(phandle, drc_index);
215 break;
216 }
217 +
218 + cond_resched();
219 }
220 }
221 +
222 + cond_resched();
223 } while (rc == 1);
224
225 kfree(rtas_buf);
226 diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
227 index adb09ab87f7c..30782859d898 100644
228 --- a/arch/powerpc/platforms/pseries/setup.c
229 +++ b/arch/powerpc/platforms/pseries/setup.c
230 @@ -298,6 +298,9 @@ static void pseries_lpar_idle(void)
231 * low power mode by ceding processor to hypervisor
232 */
233
234 + if (!prep_irq_for_idle())
235 + return;
236 +
237 /* Indicate to hypervisor that we are idle. */
238 get_lppaca()->idle = 1;
239
240 diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
241 index 2a17123130d3..224aeda1e8cc 100644
242 --- a/arch/s390/hypfs/inode.c
243 +++ b/arch/s390/hypfs/inode.c
244 @@ -267,7 +267,7 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
245 static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
246 {
247 struct inode *root_inode;
248 - struct dentry *root_dentry;
249 + struct dentry *root_dentry, *update_file;
250 int rc = 0;
251 struct hypfs_sb_info *sbi;
252
253 @@ -298,9 +298,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
254 rc = hypfs_diag_create_files(root_dentry);
255 if (rc)
256 return rc;
257 - sbi->update_file = hypfs_create_update_file(root_dentry);
258 - if (IS_ERR(sbi->update_file))
259 - return PTR_ERR(sbi->update_file);
260 + update_file = hypfs_create_update_file(root_dentry);
261 + if (IS_ERR(update_file))
262 + return PTR_ERR(update_file);
263 + sbi->update_file = update_file;
264 hypfs_update_update(sb);
265 pr_info("Hypervisor filesystem mounted\n");
266 return 0;
267 diff --git a/drivers/android/binder.c b/drivers/android/binder.c
268 index 8056759073b0..e12288c245b5 100644
269 --- a/drivers/android/binder.c
270 +++ b/drivers/android/binder.c
271 @@ -334,7 +334,8 @@ enum {
272 BINDER_LOOPER_STATE_EXITED = 0x04,
273 BINDER_LOOPER_STATE_INVALID = 0x08,
274 BINDER_LOOPER_STATE_WAITING = 0x10,
275 - BINDER_LOOPER_STATE_NEED_RETURN = 0x20
276 + BINDER_LOOPER_STATE_NEED_RETURN = 0x20,
277 + BINDER_LOOPER_STATE_POLL = 0x40,
278 };
279
280 struct binder_thread {
281 @@ -2628,6 +2629,27 @@ static int binder_free_thread(struct binder_proc *proc,
282 } else
283 BUG();
284 }
285 +
286 + /*
287 + * If this thread used poll, make sure we remove the waitqueue
288 + * from any epoll data structures holding it with POLLFREE.
289 + * waitqueue_active() is safe to use here because we're holding
290 + * the global lock.
291 + */
292 + if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
293 + waitqueue_active(&thread->wait)) {
294 + wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
295 + }
296 +
297 + /*
298 + * This is needed to avoid races between wake_up_poll() above and
299 + * and ep_remove_waitqueue() called for other reasons (eg the epoll file
300 + * descriptor being closed); ep_remove_waitqueue() holds an RCU read
301 + * lock, so we can be sure it's done after calling synchronize_rcu().
302 + */
303 + if (thread->looper & BINDER_LOOPER_STATE_POLL)
304 + synchronize_rcu();
305 +
306 if (send_reply)
307 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
308 binder_release_work(&thread->todo);
309 @@ -2651,6 +2673,8 @@ static unsigned int binder_poll(struct file *filp,
310 return POLLERR;
311 }
312
313 + thread->looper |= BINDER_LOOPER_STATE_POLL;
314 +
315 wait_for_proc_work = thread->transaction_stack == NULL &&
316 list_empty(&thread->todo) && thread->return_error == BR_OK;
317
318 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
319 index e0a53156b782..82af65818444 100644
320 --- a/drivers/char/ipmi/ipmi_si_intf.c
321 +++ b/drivers/char/ipmi/ipmi_si_intf.c
322 @@ -283,6 +283,9 @@ struct smi_info {
323 */
324 bool irq_enable_broken;
325
326 + /* Is the driver in maintenance mode? */
327 + bool in_maintenance_mode;
328 +
329 /*
330 * Did we get an attention that we did not handle?
331 */
332 @@ -1093,11 +1096,20 @@ static int ipmi_thread(void *data)
333 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
334 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
335 &busy_until);
336 - if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
337 + if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
338 ; /* do nothing */
339 - else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
340 - schedule();
341 - else if (smi_result == SI_SM_IDLE) {
342 + } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
343 + /*
344 + * In maintenance mode we run as fast as
345 + * possible to allow firmware updates to
346 + * complete as fast as possible, but normally
347 + * don't bang on the scheduler.
348 + */
349 + if (smi_info->in_maintenance_mode)
350 + schedule();
351 + else
352 + usleep_range(100, 200);
353 + } else if (smi_result == SI_SM_IDLE) {
354 if (atomic_read(&smi_info->need_watch)) {
355 schedule_timeout_interruptible(100);
356 } else {
357 @@ -1105,8 +1117,9 @@ static int ipmi_thread(void *data)
358 __set_current_state(TASK_INTERRUPTIBLE);
359 schedule();
360 }
361 - } else
362 + } else {
363 schedule_timeout_interruptible(1);
364 + }
365 }
366 return 0;
367 }
368 @@ -1285,6 +1298,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
369
370 if (!enable)
371 atomic_set(&smi_info->req_events, 0);
372 + smi_info->in_maintenance_mode = enable;
373 }
374
375 static const struct ipmi_smi_handlers handlers = {
376 diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
377 index c813c27f2e58..2f97a843d6d6 100644
378 --- a/drivers/clk/at91/clk-main.c
379 +++ b/drivers/clk/at91/clk-main.c
380 @@ -27,6 +27,10 @@
381
382 #define MOR_KEY_MASK (0xff << 16)
383
384 +#define clk_main_parent_select(s) (((s) & \
385 + (AT91_PMC_MOSCEN | \
386 + AT91_PMC_OSCBYPASS)) ? 1 : 0)
387 +
388 struct clk_main_osc {
389 struct clk_hw hw;
390 struct regmap *regmap;
391 @@ -119,7 +123,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw)
392
393 regmap_read(regmap, AT91_PMC_SR, &status);
394
395 - return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
396 + return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp);
397 }
398
399 static const struct clk_ops main_osc_ops = {
400 @@ -530,7 +534,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
401
402 regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
403
404 - return status & AT91_PMC_MOSCEN ? 1 : 0;
405 + return clk_main_parent_select(status);
406 }
407
408 static const struct clk_ops sam9x5_main_ops = {
409 @@ -572,7 +576,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
410 clkmain->hw.init = &init;
411 clkmain->regmap = regmap;
412 regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
413 - clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
414 + clkmain->parent = clk_main_parent_select(status);
415
416 hw = &clkmain->hw;
417 ret = clk_hw_register(NULL, &clkmain->hw);
418 diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
419 index 80ae2a51452d..cdce49f6476a 100644
420 --- a/drivers/clk/clk-qoriq.c
421 +++ b/drivers/clk/clk-qoriq.c
422 @@ -540,7 +540,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
423 .guts_compat = "fsl,qoriq-device-config-1.0",
424 .init_periph = p5020_init_periph,
425 .cmux_groups = {
426 - &p2041_cmux_grp1, &p2041_cmux_grp2
427 + &p5020_cmux_grp1, &p5020_cmux_grp2
428 },
429 .cmux_to_group = {
430 0, 1, -1
431 diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
432 index 77e1e2491689..edb7197cc4b4 100644
433 --- a/drivers/clk/sirf/clk-common.c
434 +++ b/drivers/clk/sirf/clk-common.c
435 @@ -298,9 +298,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw)
436 {
437 struct clk_dmn *clk = to_dmnclk(hw);
438 u32 cfg = clkc_readl(clk->regofs);
439 + const char *name = clk_hw_get_name(hw);
440
441 /* parent of io domain can only be pll3 */
442 - if (strcmp(hw->init->name, "io") == 0)
443 + if (strcmp(name, "io") == 0)
444 return 4;
445
446 WARN_ON((cfg & (BIT(3) - 1)) > 4);
447 @@ -312,9 +313,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent)
448 {
449 struct clk_dmn *clk = to_dmnclk(hw);
450 u32 cfg = clkc_readl(clk->regofs);
451 + const char *name = clk_hw_get_name(hw);
452
453 /* parent of io domain can only be pll3 */
454 - if (strcmp(hw->init->name, "io") == 0)
455 + if (strcmp(name, "io") == 0)
456 return -EINVAL;
457
458 cfg &= ~(BIT(3) - 1);
459 @@ -354,7 +356,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate,
460 {
461 unsigned long fin;
462 unsigned ratio, wait, hold;
463 - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
464 + const char *name = clk_hw_get_name(hw);
465 + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
466
467 fin = *parent_rate;
468 ratio = fin / rate;
469 @@ -376,7 +379,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
470 struct clk_dmn *clk = to_dmnclk(hw);
471 unsigned long fin;
472 unsigned ratio, wait, hold, reg;
473 - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
474 + const char *name = clk_hw_get_name(hw);
475 + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
476
477 fin = parent_rate;
478 ratio = fin / rate;
479 diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
480 index 327bdf13e8bc..b0beb5e537bc 100644
481 --- a/drivers/gpu/drm/amd/amdgpu/si.c
482 +++ b/drivers/gpu/drm/amd/amdgpu/si.c
483 @@ -1606,7 +1606,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
484 if (orig != data)
485 si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
486
487 - if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
488 + if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
489 orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
490 data &= ~PLL_RAMP_UP_TIME_0_MASK;
491 if (orig != data)
492 @@ -1655,14 +1655,14 @@ static void si_program_aspm(struct amdgpu_device *adev)
493
494 orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
495 data &= ~LS2_EXIT_TIME_MASK;
496 - if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
497 + if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
498 data |= LS2_EXIT_TIME(5);
499 if (orig != data)
500 si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
501
502 orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
503 data &= ~LS2_EXIT_TIME_MASK;
504 - if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
505 + if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
506 data |= LS2_EXIT_TIME(5);
507 if (orig != data)
508 si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
509 diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
510 index 80993a8734e0..8b6f8fac92e8 100644
511 --- a/drivers/gpu/drm/bridge/tc358767.c
512 +++ b/drivers/gpu/drm/bridge/tc358767.c
513 @@ -300,7 +300,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
514 struct drm_dp_aux_msg *msg)
515 {
516 struct tc_data *tc = aux_to_tc(aux);
517 - size_t size = min_t(size_t, 8, msg->size);
518 + size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size);
519 u8 request = msg->request & ~DP_AUX_I2C_MOT;
520 u8 *buf = msg->buffer;
521 u32 tmp = 0;
522 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
523 index c5e1aa5f1d8e..efa875120071 100644
524 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
525 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
526 @@ -764,7 +764,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
527
528 radeon_encoder->output_csc = val;
529
530 - if (connector->encoder->crtc) {
531 + if (connector->encoder && connector->encoder->crtc) {
532 struct drm_crtc *crtc = connector->encoder->crtc;
533 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
534 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
535 diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
536 index 30bd4a6a9d46..3ccf5b28b326 100644
537 --- a/drivers/gpu/drm/radeon/radeon_drv.c
538 +++ b/drivers/gpu/drm/radeon/radeon_drv.c
539 @@ -366,11 +366,19 @@ radeon_pci_remove(struct pci_dev *pdev)
540 static void
541 radeon_pci_shutdown(struct pci_dev *pdev)
542 {
543 + struct drm_device *ddev = pci_get_drvdata(pdev);
544 +
545 /* if we are running in a VM, make sure the device
546 * torn down properly on reboot/shutdown
547 */
548 if (radeon_device_is_virtual())
549 radeon_pci_remove(pdev);
550 +
551 + /* Some adapters need to be suspended before a
552 + * shutdown occurs in order to prevent an error
553 + * during kexec.
554 + */
555 + radeon_suspend_kms(ddev, true, true, false);
556 }
557
558 static int radeon_pmops_suspend(struct device *dev)
559 diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
560 index 65a0c79f212e..31c087e1746d 100644
561 --- a/drivers/hid/hid-apple.c
562 +++ b/drivers/hid/hid-apple.c
563 @@ -55,7 +55,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
564 struct apple_sc {
565 unsigned long quirks;
566 unsigned int fn_on;
567 - DECLARE_BITMAP(pressed_fn, KEY_CNT);
568 DECLARE_BITMAP(pressed_numlock, KEY_CNT);
569 };
570
571 @@ -182,6 +181,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
572 {
573 struct apple_sc *asc = hid_get_drvdata(hid);
574 const struct apple_key_translation *trans, *table;
575 + bool do_translate;
576 + u16 code = 0;
577
578 if (usage->code == KEY_FN) {
579 asc->fn_on = !!value;
580 @@ -190,8 +191,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
581 }
582
583 if (fnmode) {
584 - int do_translate;
585 -
586 if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
587 hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
588 table = macbookair_fn_keys;
589 @@ -203,25 +202,33 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
590 trans = apple_find_translation (table, usage->code);
591
592 if (trans) {
593 - if (test_bit(usage->code, asc->pressed_fn))
594 - do_translate = 1;
595 - else if (trans->flags & APPLE_FLAG_FKEY)
596 - do_translate = (fnmode == 2 && asc->fn_on) ||
597 - (fnmode == 1 && !asc->fn_on);
598 - else
599 - do_translate = asc->fn_on;
600 -
601 - if (do_translate) {
602 - if (value)
603 - set_bit(usage->code, asc->pressed_fn);
604 - else
605 - clear_bit(usage->code, asc->pressed_fn);
606 -
607 - input_event(input, usage->type, trans->to,
608 - value);
609 -
610 - return 1;
611 + if (test_bit(trans->from, input->key))
612 + code = trans->from;
613 + else if (test_bit(trans->to, input->key))
614 + code = trans->to;
615 +
616 + if (!code) {
617 + if (trans->flags & APPLE_FLAG_FKEY) {
618 + switch (fnmode) {
619 + case 1:
620 + do_translate = !asc->fn_on;
621 + break;
622 + case 2:
623 + do_translate = asc->fn_on;
624 + break;
625 + default:
626 + /* should never happen */
627 + do_translate = false;
628 + }
629 + } else {
630 + do_translate = asc->fn_on;
631 + }
632 +
633 + code = do_translate ? trans->to : trans->from;
634 }
635 +
636 + input_event(input, usage->type, code, value);
637 + return 1;
638 }
639
640 if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
641 diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
642 index 9ff243970e93..5b41111e62fd 100644
643 --- a/drivers/mfd/intel-lpss-pci.c
644 +++ b/drivers/mfd/intel-lpss-pci.c
645 @@ -39,6 +39,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
646 info->mem = &pdev->resource[0];
647 info->irq = pdev->irq;
648
649 + pdev->d3cold_delay = 0;
650 +
651 /* Probably it is enough to set this for iDMA capable devices only */
652 pci_set_master(pdev);
653
654 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
655 index 23d6c44dc459..9ce1ad3d950c 100644
656 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
657 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
658 @@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
659 static int alloc_uld_rxqs(struct adapter *adap,
660 struct sge_uld_rxq_info *rxq_info, bool lro)
661 {
662 - struct sge *s = &adap->sge;
663 unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
664 + int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
665 struct sge_ofld_rxq *q = rxq_info->uldrxq;
666 unsigned short *ids = rxq_info->rspq_id;
667 - unsigned int bmap_idx = 0;
668 + struct sge *s = &adap->sge;
669 unsigned int per_chan;
670 - int i, err, msi_idx, que_idx = 0;
671
672 per_chan = rxq_info->nrxq / adap->params.nports;
673
674 @@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
675
676 if (msi_idx >= 0) {
677 bmap_idx = get_msix_idx_from_bmap(adap);
678 + if (bmap_idx < 0) {
679 + err = -ENOSPC;
680 + goto freeout;
681 + }
682 msi_idx = adap->msix_info_ulds[bmap_idx].idx;
683 }
684 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
685 diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
686 index 355c5fb802cd..c653b97d84d5 100644
687 --- a/drivers/net/ethernet/qlogic/qla3xxx.c
688 +++ b/drivers/net/ethernet/qlogic/qla3xxx.c
689 @@ -2783,6 +2783,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
690 netdev_err(qdev->ndev,
691 "PCI mapping failed with error: %d\n",
692 err);
693 + dev_kfree_skb_irq(skb);
694 ql_free_large_buffers(qdev);
695 return -ENOMEM;
696 }
697 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
698 index 66ae647b712e..27fc699d8be5 100644
699 --- a/drivers/net/usb/hso.c
700 +++ b/drivers/net/usb/hso.c
701 @@ -2635,14 +2635,18 @@ static struct hso_device *hso_create_bulk_serial_device(
702 */
703 if (serial->tiocmget) {
704 tiocmget = serial->tiocmget;
705 + tiocmget->endp = hso_get_ep(interface,
706 + USB_ENDPOINT_XFER_INT,
707 + USB_DIR_IN);
708 + if (!tiocmget->endp) {
709 + dev_err(&interface->dev, "Failed to find INT IN ep\n");
710 + goto exit;
711 + }
712 +
713 tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
714 if (tiocmget->urb) {
715 mutex_init(&tiocmget->mutex);
716 init_waitqueue_head(&tiocmget->waitq);
717 - tiocmget->endp = hso_get_ep(
718 - interface,
719 - USB_ENDPOINT_XFER_INT,
720 - USB_DIR_IN);
721 } else
722 hso_free_tiomget(serial);
723 }
724 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
725 index 05953e14a064..0d48714c3f28 100644
726 --- a/drivers/net/usb/qmi_wwan.c
727 +++ b/drivers/net/usb/qmi_wwan.c
728 @@ -940,6 +940,7 @@ static const struct usb_device_id products[] = {
729 {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
730 {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
731 {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
732 + {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
733 {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
734 {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
735 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
736 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
737 index c31c564b8eab..6d391a268469 100644
738 --- a/drivers/net/xen-netfront.c
739 +++ b/drivers/net/xen-netfront.c
740 @@ -888,9 +888,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
741 return 0;
742 }
743
744 -static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
745 - struct sk_buff *skb,
746 - struct sk_buff_head *list)
747 +static int xennet_fill_frags(struct netfront_queue *queue,
748 + struct sk_buff *skb,
749 + struct sk_buff_head *list)
750 {
751 RING_IDX cons = queue->rx.rsp_cons;
752 struct sk_buff *nskb;
753 @@ -909,7 +909,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
754 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
755 queue->rx.rsp_cons = ++cons + skb_queue_len(list);
756 kfree_skb(nskb);
757 - return ~0U;
758 + return -ENOENT;
759 }
760
761 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
762 @@ -920,7 +920,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
763 kfree_skb(nskb);
764 }
765
766 - return cons;
767 + queue->rx.rsp_cons = cons;
768 +
769 + return 0;
770 }
771
772 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
773 @@ -1046,8 +1048,7 @@ err:
774 skb->data_len = rx->status;
775 skb->len += rx->status;
776
777 - i = xennet_fill_frags(queue, skb, &tmpq);
778 - if (unlikely(i == ~0U))
779 + if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
780 goto err;
781
782 if (rx->flags & XEN_NETRXF_csum_blank)
783 @@ -1057,7 +1058,7 @@ err:
784
785 __skb_queue_tail(&rxq, skb);
786
787 - queue->rx.rsp_cons = ++i;
788 + i = ++queue->rx.rsp_cons;
789 work_done++;
790 }
791
792 diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
793 index 8dfccf733241..8e101b19c4d6 100644
794 --- a/drivers/pci/host/pci-tegra.c
795 +++ b/drivers/pci/host/pci-tegra.c
796 @@ -1898,14 +1898,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
797 err = of_pci_get_devfn(port);
798 if (err < 0) {
799 dev_err(dev, "failed to parse address: %d\n", err);
800 - return err;
801 + goto err_node_put;
802 }
803
804 index = PCI_SLOT(err);
805
806 if (index < 1 || index > soc->num_ports) {
807 dev_err(dev, "invalid port number: %d\n", index);
808 - return -EINVAL;
809 + err = -EINVAL;
810 + goto err_node_put;
811 }
812
813 index--;
814 @@ -1914,12 +1915,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
815 if (err < 0) {
816 dev_err(dev, "failed to parse # of lanes: %d\n",
817 err);
818 - return err;
819 + goto err_node_put;
820 }
821
822 if (value > 16) {
823 dev_err(dev, "invalid # of lanes: %u\n", value);
824 - return -EINVAL;
825 + err = -EINVAL;
826 + goto err_node_put;
827 }
828
829 lanes |= value << (index << 3);
830 @@ -1933,13 +1935,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
831 lane += value;
832
833 rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
834 - if (!rp)
835 - return -ENOMEM;
836 + if (!rp) {
837 + err = -ENOMEM;
838 + goto err_node_put;
839 + }
840
841 err = of_address_to_resource(port, 0, &rp->regs);
842 if (err < 0) {
843 dev_err(dev, "failed to parse address: %d\n", err);
844 - return err;
845 + goto err_node_put;
846 }
847
848 INIT_LIST_HEAD(&rp->list);
849 @@ -1966,6 +1970,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
850 return err;
851
852 return 0;
853 +
854 +err_node_put:
855 + of_node_put(port);
856 + return err;
857 }
858
859 /*
860 diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
861 index 277622b4b6fb..1d9f63e954c7 100644
862 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c
863 +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
864 @@ -52,7 +52,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
865
866 static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg)
867 {
868 - writel(val, pmx->regs[bank] + reg);
869 + writel_relaxed(val, pmx->regs[bank] + reg);
870 + /* make sure pinmux register write completed */
871 + pmx_readl(pmx, bank, reg);
872 }
873
874 static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
875 diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
876 index bd70339c1242..03d9855a6afd 100644
877 --- a/drivers/scsi/scsi_logging.c
878 +++ b/drivers/scsi/scsi_logging.c
879 @@ -16,57 +16,15 @@
880 #include <scsi/scsi_eh.h>
881 #include <scsi/scsi_dbg.h>
882
883 -#define SCSI_LOG_SPOOLSIZE 4096
884 -
885 -#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
886 -#warning SCSI logging bitmask too large
887 -#endif
888 -
889 -struct scsi_log_buf {
890 - char buffer[SCSI_LOG_SPOOLSIZE];
891 - unsigned long map;
892 -};
893 -
894 -static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
895 -
896 static char *scsi_log_reserve_buffer(size_t *len)
897 {
898 - struct scsi_log_buf *buf;
899 - unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
900 - unsigned long idx = 0;
901 -
902 - preempt_disable();
903 - buf = this_cpu_ptr(&scsi_format_log);
904 - idx = find_first_zero_bit(&buf->map, map_bits);
905 - if (likely(idx < map_bits)) {
906 - while (test_and_set_bit(idx, &buf->map)) {
907 - idx = find_next_zero_bit(&buf->map, map_bits, idx);
908 - if (idx >= map_bits)
909 - break;
910 - }
911 - }
912 - if (WARN_ON(idx >= map_bits)) {
913 - preempt_enable();
914 - return NULL;
915 - }
916 - *len = SCSI_LOG_BUFSIZE;
917 - return buf->buffer + idx * SCSI_LOG_BUFSIZE;
918 + *len = 128;
919 + return kmalloc(*len, GFP_ATOMIC);
920 }
921
922 static void scsi_log_release_buffer(char *bufptr)
923 {
924 - struct scsi_log_buf *buf;
925 - unsigned long idx;
926 - int ret;
927 -
928 - buf = this_cpu_ptr(&scsi_format_log);
929 - if (bufptr >= buf->buffer &&
930 - bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
931 - idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
932 - ret = test_and_clear_bit(idx, &buf->map);
933 - WARN_ON(!ret);
934 - }
935 - preempt_enable();
936 + kfree(bufptr);
937 }
938
939 static inline const char *scmd_name(const struct scsi_cmnd *scmd)
940 diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
941 index f9a75df2d22d..a1a712d18e02 100644
942 --- a/drivers/vfio/pci/vfio_pci.c
943 +++ b/drivers/vfio/pci/vfio_pci.c
944 @@ -356,11 +356,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
945 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
946
947 /*
948 - * Try to reset the device. The success of this is dependent on
949 - * being able to lock the device, which is not always possible.
950 + * Try to get the locks ourselves to prevent a deadlock. The
951 + * success of this is dependent on being able to lock the device,
952 + * which is not always possible.
953 + * We can not use the "try" reset interface here, which will
954 + * overwrite the previously restored configuration information.
955 */
956 - if (vdev->reset_works && !pci_try_reset_function(pdev))
957 - vdev->needs_reset = false;
958 + if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
959 + if (device_trylock(&pdev->dev)) {
960 + if (!__pci_reset_function_locked(pdev))
961 + vdev->needs_reset = false;
962 + device_unlock(&pdev->dev);
963 + }
964 + pci_cfg_access_unlock(pdev);
965 + }
966
967 pci_restore_state(pdev);
968 out:
969 diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
970 index 2925d5ce8d3e..1267b93c03bd 100644
971 --- a/drivers/video/fbdev/ssd1307fb.c
972 +++ b/drivers/video/fbdev/ssd1307fb.c
973 @@ -430,7 +430,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
974 if (ret < 0)
975 return ret;
976
977 - ret = ssd1307fb_write_cmd(par->client, 0x0);
978 + ret = ssd1307fb_write_cmd(par->client, par->page_offset);
979 if (ret < 0)
980 return ret;
981
982 diff --git a/fs/fat/dir.c b/fs/fat/dir.c
983 index 81cecbe6d7cf..971e369517a7 100644
984 --- a/fs/fat/dir.c
985 +++ b/fs/fat/dir.c
986 @@ -1097,8 +1097,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
987 err = -ENOMEM;
988 goto error;
989 }
990 + /* Avoid race with userspace read via bdev */
991 + lock_buffer(bhs[n]);
992 memset(bhs[n]->b_data, 0, sb->s_blocksize);
993 set_buffer_uptodate(bhs[n]);
994 + unlock_buffer(bhs[n]);
995 mark_buffer_dirty_inode(bhs[n], dir);
996
997 n++;
998 @@ -1155,6 +1158,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec *ts)
999 fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
1000
1001 de = (struct msdos_dir_entry *)bhs[0]->b_data;
1002 + /* Avoid race with userspace read via bdev */
1003 + lock_buffer(bhs[0]);
1004 /* filling the new directory slots ("." and ".." entries) */
1005 memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
1006 memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
1007 @@ -1177,6 +1182,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec *ts)
1008 de[0].size = de[1].size = 0;
1009 memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
1010 set_buffer_uptodate(bhs[0]);
1011 + unlock_buffer(bhs[0]);
1012 mark_buffer_dirty_inode(bhs[0], dir);
1013
1014 err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
1015 @@ -1234,11 +1240,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
1016
1017 /* fill the directory entry */
1018 copy = min(size, sb->s_blocksize);
1019 + /* Avoid race with userspace read via bdev */
1020 + lock_buffer(bhs[n]);
1021 memcpy(bhs[n]->b_data, slots, copy);
1022 - slots += copy;
1023 - size -= copy;
1024 set_buffer_uptodate(bhs[n]);
1025 + unlock_buffer(bhs[n]);
1026 mark_buffer_dirty_inode(bhs[n], dir);
1027 + slots += copy;
1028 + size -= copy;
1029 if (!size)
1030 break;
1031 n++;
1032 diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
1033 index a9cad9b60790..0129d4d07a54 100644
1034 --- a/fs/fat/fatent.c
1035 +++ b/fs/fat/fatent.c
1036 @@ -389,8 +389,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
1037 err = -ENOMEM;
1038 goto error;
1039 }
1040 + /* Avoid race with userspace read via bdev */
1041 + lock_buffer(c_bh);
1042 memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
1043 set_buffer_uptodate(c_bh);
1044 + unlock_buffer(c_bh);
1045 mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
1046 if (sb->s_flags & MS_SYNCHRONOUS)
1047 err = sync_dirty_buffer(c_bh);
1048 diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
1049 index 1082b2c3014b..5f2a120240e5 100644
1050 --- a/fs/ocfs2/dlm/dlmunlock.c
1051 +++ b/fs/ocfs2/dlm/dlmunlock.c
1052 @@ -105,7 +105,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
1053 enum dlm_status status;
1054 int actions = 0;
1055 int in_use;
1056 - u8 owner;
1057 + u8 owner;
1058 + int recovery_wait = 0;
1059
1060 mlog(0, "master_node = %d, valblk = %d\n", master_node,
1061 flags & LKM_VALBLK);
1062 @@ -208,9 +209,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
1063 }
1064 if (flags & LKM_CANCEL)
1065 lock->cancel_pending = 0;
1066 - else
1067 - lock->unlock_pending = 0;
1068 -
1069 + else {
1070 + if (!lock->unlock_pending)
1071 + recovery_wait = 1;
1072 + else
1073 + lock->unlock_pending = 0;
1074 + }
1075 }
1076
1077 /* get an extra ref on lock. if we are just switching
1078 @@ -244,6 +248,17 @@ leave:
1079 spin_unlock(&res->spinlock);
1080 wake_up(&res->wq);
1081
1082 + if (recovery_wait) {
1083 + spin_lock(&res->spinlock);
1084 + /* Unlock request will directly succeed after owner dies,
1085 + * and the lock is already removed from grant list. We have to
1086 + * wait for RECOVERING done or we miss the chance to purge it
1087 + * since the removement is much faster than RECOVERING proc.
1088 + */
1089 + __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
1090 + spin_unlock(&res->spinlock);
1091 + }
1092 +
1093 /* let the caller's final dlm_lock_put handle the actual kfree */
1094 if (actions & DLM_UNLOCK_FREE_LOCK) {
1095 /* this should always be coupled with list removal */
1096 diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
1097 index 56710e03101c..1fcf14aee28a 100644
1098 --- a/include/scsi/scsi_dbg.h
1099 +++ b/include/scsi/scsi_dbg.h
1100 @@ -5,8 +5,6 @@ struct scsi_cmnd;
1101 struct scsi_device;
1102 struct scsi_sense_hdr;
1103
1104 -#define SCSI_LOG_BUFSIZE 128
1105 -
1106 extern void scsi_print_command(struct scsi_cmnd *);
1107 extern size_t __scsi_format_command(char *, size_t,
1108 const unsigned char *, size_t);
1109 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
1110 index 4f561860bf41..bc5ff3a53d4a 100644
1111 --- a/lib/Kconfig.debug
1112 +++ b/lib/Kconfig.debug
1113 @@ -535,7 +535,7 @@ config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
1114 int "Maximum kmemleak early log entries"
1115 depends on DEBUG_KMEMLEAK
1116 range 200 40000
1117 - default 400
1118 + default 16000
1119 help
1120 Kmemleak must track all the memory allocations to avoid
1121 reporting false positives. Since memory may be allocated or
1122 diff --git a/net/core/sock.c b/net/core/sock.c
1123 index 3041aa6df602..d22493351407 100644
1124 --- a/net/core/sock.c
1125 +++ b/net/core/sock.c
1126 @@ -1426,8 +1426,6 @@ static void __sk_destruct(struct rcu_head *head)
1127 sk_filter_uncharge(sk, filter);
1128 RCU_INIT_POINTER(sk->sk_filter, NULL);
1129 }
1130 - if (rcu_access_pointer(sk->sk_reuseport_cb))
1131 - reuseport_detach_sock(sk);
1132
1133 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1134
1135 @@ -1450,7 +1448,14 @@ static void __sk_destruct(struct rcu_head *head)
1136
1137 void sk_destruct(struct sock *sk)
1138 {
1139 - if (sock_flag(sk, SOCK_RCU_FREE))
1140 + bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
1141 +
1142 + if (rcu_access_pointer(sk->sk_reuseport_cb)) {
1143 + reuseport_detach_sock(sk);
1144 + use_call_rcu = true;
1145 + }
1146 +
1147 + if (use_call_rcu)
1148 call_rcu(&sk->sk_rcu, __sk_destruct);
1149 else
1150 __sk_destruct(&sk->sk_rcu);
1151 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1152 index d558dc076577..d1a302d321fa 100644
1153 --- a/net/ipv4/route.c
1154 +++ b/net/ipv4/route.c
1155 @@ -903,16 +903,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1156 if (peer->rate_tokens == 0 ||
1157 time_after(jiffies,
1158 (peer->rate_last +
1159 - (ip_rt_redirect_load << peer->rate_tokens)))) {
1160 + (ip_rt_redirect_load << peer->n_redirects)))) {
1161 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
1162
1163 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
1164 peer->rate_last = jiffies;
1165 - ++peer->rate_tokens;
1166 ++peer->n_redirects;
1167 #ifdef CONFIG_IP_ROUTE_VERBOSE
1168 if (log_martians &&
1169 - peer->rate_tokens == ip_rt_redirect_number)
1170 + peer->n_redirects == ip_rt_redirect_number)
1171 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
1172 &ip_hdr(skb)->saddr, inet_iif(skb),
1173 &ip_hdr(skb)->daddr, &gw);
1174 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1175 index 4ce7f9195151..6b1310d5e808 100644
1176 --- a/net/ipv6/addrconf.c
1177 +++ b/net/ipv6/addrconf.c
1178 @@ -5443,13 +5443,20 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
1179 switch (event) {
1180 case RTM_NEWADDR:
1181 /*
1182 - * If the address was optimistic
1183 - * we inserted the route at the start of
1184 - * our DAD process, so we don't need
1185 - * to do it again
1186 + * If the address was optimistic we inserted the route at the
1187 + * start of our DAD process, so we don't need to do it again.
1188 + * If the device was taken down in the middle of the DAD
1189 + * cycle there is a race where we could get here without a
1190 + * host route, so nothing to insert. That will be fixed when
1191 + * the device is brought up.
1192 */
1193 - if (!rcu_access_pointer(ifp->rt->rt6i_node))
1194 + if (ifp->rt && !rcu_access_pointer(ifp->rt->rt6i_node)) {
1195 ip6_ins_rt(ifp->rt);
1196 + } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
1197 + pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
1198 + &ifp->addr, ifp->idev->dev->name);
1199 + }
1200 +
1201 if (ifp->idev->cnf.forwarding)
1202 addrconf_join_anycast(ifp);
1203 if (!ipv6_addr_any(&ifp->peer_addr))
1204 diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
1205 index aacfb4bce153..e726a61ae6dc 100644
1206 --- a/net/ipv6/ip6_input.c
1207 +++ b/net/ipv6/ip6_input.c
1208 @@ -168,6 +168,16 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
1209 if (ipv6_addr_is_multicast(&hdr->saddr))
1210 goto err;
1211
1212 + /* While RFC4291 is not explicit about v4mapped addresses
1213 + * in IPv6 headers, it seems clear linux dual-stack
1214 + * model can not deal properly with these.
1215 + * Security models could be fooled by ::ffff:127.0.0.1 for example.
1216 + *
1217 + * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
1218 + */
1219 + if (ipv6_addr_v4mapped(&hdr->saddr))
1220 + goto err;
1221 +
1222 skb->transport_header = skb->network_header + sizeof(*hdr);
1223 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
1224
1225 diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
1226 index 22d5a80a8f34..dd59fde1dac8 100644
1227 --- a/net/nfc/llcp_sock.c
1228 +++ b/net/nfc/llcp_sock.c
1229 @@ -118,9 +118,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
1230 llcp_sock->service_name = kmemdup(llcp_addr.service_name,
1231 llcp_sock->service_name_len,
1232 GFP_KERNEL);
1233 -
1234 + if (!llcp_sock->service_name) {
1235 + ret = -ENOMEM;
1236 + goto put_dev;
1237 + }
1238 llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
1239 if (llcp_sock->ssap == LLCP_SAP_MAX) {
1240 + kfree(llcp_sock->service_name);
1241 + llcp_sock->service_name = NULL;
1242 ret = -EADDRINUSE;
1243 goto put_dev;
1244 }
1245 diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
1246 index dbf74afe82fb..ad878302924f 100644
1247 --- a/net/nfc/netlink.c
1248 +++ b/net/nfc/netlink.c
1249 @@ -973,7 +973,8 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
1250 int rc;
1251 u32 idx;
1252
1253 - if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
1254 + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
1255 + !info->attrs[NFC_ATTR_TARGET_INDEX])
1256 return -EINVAL;
1257
1258 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
1259 @@ -1022,7 +1023,8 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
1260 struct sk_buff *msg = NULL;
1261 u32 idx;
1262
1263 - if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
1264 + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
1265 + !info->attrs[NFC_ATTR_FIRMWARE_NAME])
1266 return -EINVAL;
1267
1268 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
1269 diff --git a/net/rds/ib.c b/net/rds/ib.c
1270 index 0efb3d2b338d..a118686cc1ec 100644
1271 --- a/net/rds/ib.c
1272 +++ b/net/rds/ib.c
1273 @@ -138,6 +138,9 @@ static void rds_ib_add_one(struct ib_device *device)
1274 atomic_set(&rds_ibdev->refcount, 1);
1275 INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
1276
1277 + INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
1278 + INIT_LIST_HEAD(&rds_ibdev->conn_list);
1279 +
1280 rds_ibdev->max_wrs = device->attrs.max_qp_wr;
1281 rds_ibdev->max_sge = min(device->attrs.max_sge, RDS_IB_MAX_SGE);
1282
1283 @@ -189,9 +192,6 @@ static void rds_ib_add_one(struct ib_device *device)
1284 device->name,
1285 rds_ibdev->use_fastreg ? "FRMR" : "FMR");
1286
1287 - INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
1288 - INIT_LIST_HEAD(&rds_ibdev->conn_list);
1289 -
1290 down_write(&rds_ib_devices_lock);
1291 list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
1292 up_write(&rds_ib_devices_lock);
1293 diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
1294 index beb554aa8cfb..7a434a5c43fe 100644
1295 --- a/net/sched/sch_cbq.c
1296 +++ b/net/sched/sch_cbq.c
1297 @@ -1129,6 +1129,26 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1298 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1299 };
1300
1301 +static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1], struct nlattr *opt)
1302 +{
1303 + int err;
1304 +
1305 + if (!opt)
1306 + return -EINVAL;
1307 +
1308 + err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
1309 + if (err < 0)
1310 + return err;
1311 +
1312 + if (tb[TCA_CBQ_WRROPT]) {
1313 + const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
1314 +
1315 + if (wrr->priority > TC_CBQ_MAXPRIO)
1316 + err = -EINVAL;
1317 + }
1318 + return err;
1319 +}
1320 +
1321 static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1322 {
1323 struct cbq_sched_data *q = qdisc_priv(sch);
1324 @@ -1136,7 +1156,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1325 struct tc_ratespec *r;
1326 int err;
1327
1328 - err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
1329 + err = cbq_opt_parse(tb, opt);
1330 if (err < 0)
1331 return err;
1332
1333 @@ -1468,10 +1488,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1334 struct cbq_class *parent;
1335 struct qdisc_rate_table *rtab = NULL;
1336
1337 - if (opt == NULL)
1338 - return -EINVAL;
1339 -
1340 - err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
1341 + err = cbq_opt_parse(tb, opt);
1342 if (err < 0)
1343 return err;
1344
1345 diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
1346 index b56d57984439..551cf193649e 100644
1347 --- a/net/sched/sch_dsmark.c
1348 +++ b/net/sched/sch_dsmark.c
1349 @@ -346,6 +346,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
1350 goto errout;
1351
1352 err = -EINVAL;
1353 + if (!tb[TCA_DSMARK_INDICES])
1354 + goto errout;
1355 indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
1356
1357 if (hweight32(indices) != 1)
1358 diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
1359 index 23e5808a0970..e5d5c7fb2dac 100644
1360 --- a/security/smack/smack_access.c
1361 +++ b/security/smack/smack_access.c
1362 @@ -474,7 +474,7 @@ char *smk_parse_smack(const char *string, int len)
1363 if (i == 0 || i >= SMK_LONGLABEL)
1364 return ERR_PTR(-EINVAL);
1365
1366 - smack = kzalloc(i + 1, GFP_KERNEL);
1367 + smack = kzalloc(i + 1, GFP_NOFS);
1368 if (smack == NULL)
1369 return ERR_PTR(-ENOMEM);
1370
1371 @@ -545,7 +545,7 @@ struct smack_known *smk_import_entry(const char *string, int len)
1372 if (skp != NULL)
1373 goto freeout;
1374
1375 - skp = kzalloc(sizeof(*skp), GFP_KERNEL);
1376 + skp = kzalloc(sizeof(*skp), GFP_NOFS);
1377 if (skp == NULL) {
1378 skp = ERR_PTR(-ENOMEM);
1379 goto freeout;
1380 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
1381 index aeb3ba70f907..589c1c2ae6db 100644
1382 --- a/security/smack/smack_lsm.c
1383 +++ b/security/smack/smack_lsm.c
1384 @@ -268,7 +268,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
1385 if (!(ip->i_opflags & IOP_XATTR))
1386 return ERR_PTR(-EOPNOTSUPP);
1387
1388 - buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
1389 + buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
1390 if (buffer == NULL)
1391 return ERR_PTR(-ENOMEM);
1392
1393 @@ -949,7 +949,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
1394
1395 if (rc != 0)
1396 return rc;
1397 - } else if (bprm->unsafe)
1398 + }
1399 + if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
1400 return -EPERM;
1401
1402 bsp->smk_task = isp->smk_task;
1403 @@ -4037,6 +4038,8 @@ access_check:
1404 skp = smack_ipv6host_label(&sadd);
1405 if (skp == NULL)
1406 skp = smack_net_ambient;
1407 + if (skb == NULL)
1408 + break;
1409 #ifdef CONFIG_AUDIT
1410 smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
1411 ad.a.u.net->family = family;