Magellan Linux

Contents of /trunk/kernel-alx/patches-3.18/0109-3.18.10-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2610 - (show annotations) (download)
Mon Jul 13 08:28:30 2015 UTC (8 years, 9 months ago) by niro
File size: 205881 byte(s)
-linux-3.18.10
1 diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
2 index aee73e78c7d4..02f8331edb8b 100644
3 --- a/Documentation/stable_kernel_rules.txt
4 +++ b/Documentation/stable_kernel_rules.txt
5 @@ -32,18 +32,42 @@ Procedure for submitting patches to the -stable tree:
6 - If the patch covers files in net/ or drivers/net please follow netdev stable
7 submission guidelines as described in
8 Documentation/networking/netdev-FAQ.txt
9 - - Send the patch, after verifying that it follows the above rules, to
10 - stable@vger.kernel.org. You must note the upstream commit ID in the
11 - changelog of your submission, as well as the kernel version you wish
12 - it to be applied to.
13 - - To have the patch automatically included in the stable tree, add the tag
14 + - Security patches should not be handled (solely) by the -stable review
15 + process but should follow the procedures in Documentation/SecurityBugs.
16 +
17 +For all other submissions, choose one of the following procedures:
18 +
19 + --- Option 1 ---
20 +
21 + To have the patch automatically included in the stable tree, add the tag
22 Cc: stable@vger.kernel.org
23 in the sign-off area. Once the patch is merged it will be applied to
24 the stable tree without anything else needing to be done by the author
25 or subsystem maintainer.
26 - - If the patch requires other patches as prerequisites which can be
27 - cherry-picked, then this can be specified in the following format in
28 - the sign-off area:
29 +
30 + --- Option 2 ---
31 +
32 + After the patch has been merged to Linus' tree, send an email to
33 + stable@vger.kernel.org containing the subject of the patch, the commit ID,
34 + why you think it should be applied, and what kernel version you wish it to
35 + be applied to.
36 +
37 + --- Option 3 ---
38 +
39 + Send the patch, after verifying that it follows the above rules, to
40 + stable@vger.kernel.org. You must note the upstream commit ID in the
41 + changelog of your submission, as well as the kernel version you wish
42 + it to be applied to.
43 +
44 +Option 1 is probably the easiest and most common. Options 2 and 3 are more
45 +useful if the patch isn't deemed worthy at the time it is applied to a public
46 +git tree (for instance, because it deserves more regression testing first).
47 +Option 3 is especially useful if the patch needs some special handling to apply
48 +to an older kernel (e.g., if API's have changed in the meantime).
49 +
50 +Additionally, some patches submitted via Option 1 may have additional patch
51 +prerequisites which can be cherry-picked. This can be specified in the following
52 +format in the sign-off area:
53
54 Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
55 Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
56 @@ -57,13 +81,13 @@ Procedure for submitting patches to the -stable tree:
57 git cherry-pick fd21073
58 git cherry-pick <this commit>
59
60 +Following the submission:
61 +
62 - The sender will receive an ACK when the patch has been accepted into the
63 queue, or a NAK if the patch is rejected. This response might take a few
64 days, according to the developer's schedules.
65 - If accepted, the patch will be added to the -stable queue, for review by
66 other developers and by the relevant subsystem maintainer.
67 - - Security patches should not be sent to this alias, but instead to the
68 - documented security@kernel.org address.
69
70
71 Review cycle:
72 diff --git a/Makefile b/Makefile
73 index 62b333802a0e..d4ce2cb674c8 100644
74 --- a/Makefile
75 +++ b/Makefile
76 @@ -1,6 +1,6 @@
77 VERSION = 3
78 PATCHLEVEL = 18
79 -SUBLEVEL = 9
80 +SUBLEVEL = 10
81 EXTRAVERSION =
82 NAME = Diseased Newt
83
84 diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
85 index 210fe97464c3..c750af161979 100644
86 --- a/arch/arc/include/asm/processor.h
87 +++ b/arch/arc/include/asm/processor.h
88 @@ -75,18 +75,19 @@ unsigned long thread_saved_pc(struct task_struct *t);
89 #define release_segments(mm) do { } while (0)
90
91 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
92 +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
93
94 /*
95 * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
96 * Look in process.c for details of kernel stack layout
97 */
98 -#define KSTK_ESP(tsk) (tsk->thread.ksp)
99 +#define TSK_K_ESP(tsk) (tsk->thread.ksp)
100
101 -#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \
102 +#define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \
103 sizeof(struct callee_regs) + off)))
104
105 -#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
106 -#define KSTK_FP(tsk) KSTK_REG(tsk, 0)
107 +#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
108 +#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
109
110 extern void start_thread(struct pt_regs * regs, unsigned long pc,
111 unsigned long usp);
112 diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
113 index 9ce47cfe2303..fb98769b6a98 100644
114 --- a/arch/arc/kernel/stacktrace.c
115 +++ b/arch/arc/kernel/stacktrace.c
116 @@ -64,9 +64,9 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
117
118 frame_info->task = tsk;
119
120 - frame_info->regs.r27 = KSTK_FP(tsk);
121 - frame_info->regs.r28 = KSTK_ESP(tsk);
122 - frame_info->regs.r31 = KSTK_BLINK(tsk);
123 + frame_info->regs.r27 = TSK_K_FP(tsk);
124 + frame_info->regs.r28 = TSK_K_ESP(tsk);
125 + frame_info->regs.r31 = TSK_K_BLINK(tsk);
126 frame_info->regs.r63 = (unsigned int)__switch_to;
127
128 /* In the prologue of __switch_to, first FP is saved on stack
129 diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
130 index c1388d40663b..bd6437f67dc0 100644
131 --- a/arch/mips/kvm/trace.h
132 +++ b/arch/mips/kvm/trace.h
133 @@ -24,18 +24,18 @@ TRACE_EVENT(kvm_exit,
134 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
135 TP_ARGS(vcpu, reason),
136 TP_STRUCT__entry(
137 - __field(struct kvm_vcpu *, vcpu)
138 + __field(unsigned long, pc)
139 __field(unsigned int, reason)
140 ),
141
142 TP_fast_assign(
143 - __entry->vcpu = vcpu;
144 + __entry->pc = vcpu->arch.pc;
145 __entry->reason = reason;
146 ),
147
148 TP_printk("[%s]PC: 0x%08lx",
149 kvm_mips_exit_types_str[__entry->reason],
150 - __entry->vcpu->arch.pc)
151 + __entry->pc)
152 );
153
154 #endif /* _TRACE_KVM_H */
155 diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
156 index f09a22fa1bd7..bfa8f8ac51fa 100644
157 --- a/arch/powerpc/include/asm/pnv-pci.h
158 +++ b/arch/powerpc/include/asm/pnv-pci.h
159 @@ -19,7 +19,7 @@ int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
160 int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num);
161 void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num);
162 int pnv_cxl_get_irq_count(struct pci_dev *dev);
163 -struct device_node *pnv_pci_to_phb_node(struct pci_dev *dev);
164 +struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev);
165
166 #ifdef CONFIG_CXL_BASE
167 int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
168 diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
169 index 3ba435ec3dcd..3f596706a5b7 100644
170 --- a/arch/powerpc/platforms/powernv/pci-ioda.c
171 +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
172 @@ -1355,13 +1355,13 @@ static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
173
174 #ifdef CONFIG_CXL_BASE
175
176 -struct device_node *pnv_pci_to_phb_node(struct pci_dev *dev)
177 +struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
178 {
179 struct pci_controller *hose = pci_bus_to_host(dev->bus);
180
181 - return hose->dn;
182 + return of_node_get(hose->dn);
183 }
184 -EXPORT_SYMBOL(pnv_pci_to_phb_node);
185 +EXPORT_SYMBOL(pnv_pci_get_phb_node);
186
187 int pnv_phb_to_cxl(struct pci_dev *dev)
188 {
189 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
190 index 7e7a79ada658..d82b80405e45 100644
191 --- a/arch/x86/include/asm/xsave.h
192 +++ b/arch/x86/include/asm/xsave.h
193 @@ -81,18 +81,15 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
194 if (boot_cpu_has(X86_FEATURE_XSAVES))
195 asm volatile("1:"XSAVES"\n\t"
196 "2:\n\t"
197 - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
198 + xstate_fault
199 + : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
200 : "memory");
201 else
202 asm volatile("1:"XSAVE"\n\t"
203 "2:\n\t"
204 - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
205 + xstate_fault
206 + : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
207 : "memory");
208 -
209 - asm volatile(xstate_fault
210 - : "0" (0)
211 - : "memory");
212 -
213 return err;
214 }
215
216 @@ -111,18 +108,15 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
217 if (boot_cpu_has(X86_FEATURE_XSAVES))
218 asm volatile("1:"XRSTORS"\n\t"
219 "2:\n\t"
220 - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
221 + xstate_fault
222 + : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
223 : "memory");
224 else
225 asm volatile("1:"XRSTOR"\n\t"
226 "2:\n\t"
227 - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
228 + xstate_fault
229 + : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
230 : "memory");
231 -
232 - asm volatile(xstate_fault
233 - : "0" (0)
234 - : "memory");
235 -
236 return err;
237 }
238
239 @@ -148,9 +142,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask)
240 */
241 alternative_input_2(
242 "1:"XSAVE,
243 - "1:"XSAVEOPT,
244 + XSAVEOPT,
245 X86_FEATURE_XSAVEOPT,
246 - "1:"XSAVES,
247 + XSAVES,
248 X86_FEATURE_XSAVES,
249 [fx] "D" (fx), "a" (lmask), "d" (hmask) :
250 "memory");
251 @@ -177,7 +171,7 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
252 */
253 alternative_input(
254 "1: " XRSTOR,
255 - "1: " XRSTORS,
256 + XRSTORS,
257 X86_FEATURE_XSAVES,
258 "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
259 : "memory");
260 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
261 index c0226ab54106..f1dc27f457f1 100644
262 --- a/arch/x86/kernel/entry_64.S
263 +++ b/arch/x86/kernel/entry_64.S
264 @@ -334,11 +334,14 @@ ENTRY(ret_from_fork)
265 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
266 jz 1f
267
268 - testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
269 - jnz int_ret_from_sys_call
270 -
271 - RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
272 - jmp ret_from_sys_call # go to the SYSRET fastpath
273 + /*
274 + * By the time we get here, we have no idea whether our pt_regs,
275 + * ti flags, and ti status came from the 64-bit SYSCALL fast path,
276 + * the slow path, or one of the ia32entry paths.
277 + * Use int_ret_from_sys_call to return, since it can safely handle
278 + * all of the above.
279 + */
280 + jmp int_ret_from_sys_call
281
282 1:
283 subq $REST_SKIP, %rsp # leave space for volatiles
284 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
285 index c7327a7761ca..974e4d98ed29 100644
286 --- a/arch/x86/kvm/emulate.c
287 +++ b/arch/x86/kvm/emulate.c
288 @@ -4829,7 +4829,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
289 if (rc != X86EMUL_CONTINUE)
290 goto done;
291 }
292 - ctxt->dst.orig_val = ctxt->dst.val;
293 + /* Copy full 64-bit value for CMPXCHG8B. */
294 + ctxt->dst.orig_val64 = ctxt->dst.val64;
295
296 special_insn:
297
298 diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
299 index 41e9c199e874..fdb5701bed75 100644
300 --- a/drivers/acpi/acpi_lpss.c
301 +++ b/drivers/acpi/acpi_lpss.c
302 @@ -65,6 +65,7 @@ struct lpss_private_data;
303
304 struct lpss_device_desc {
305 unsigned int flags;
306 + const char *clk_con_id;
307 unsigned int prv_offset;
308 size_t prv_size_override;
309 void (*setup)(struct lpss_private_data *pdata);
310 @@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = {
311
312 static struct lpss_device_desc lpt_uart_dev_desc = {
313 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
314 + .clk_con_id = "baudclk",
315 .prv_offset = 0x800,
316 .setup = lpss_uart_setup,
317 };
318 @@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
319
320 static struct lpss_device_desc byt_uart_dev_desc = {
321 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
322 + .clk_con_id = "baudclk",
323 .prv_offset = 0x800,
324 .setup = lpss_uart_setup,
325 };
326 @@ -313,7 +316,7 @@ out:
327 return PTR_ERR(clk);
328
329 pdata->clk = clk;
330 - clk_register_clkdev(clk, NULL, devname);
331 + clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
332 return 0;
333 }
334
335 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
336 index 41322591fb43..ff7bc22b6135 100644
337 --- a/drivers/acpi/video.c
338 +++ b/drivers/acpi/video.c
339 @@ -2124,6 +2124,17 @@ EXPORT_SYMBOL(acpi_video_unregister_backlight);
340
341 static int __init acpi_video_init(void)
342 {
343 + /*
344 + * Let the module load even if ACPI is disabled (e.g. due to
345 + * a broken BIOS) so that i915.ko can still be loaded on such
346 + * old systems without an AcpiOpRegion.
347 + *
348 + * acpi_video_register() will report -ENODEV later as well due
349 + * to acpi_disabled when i915.ko tries to register itself afterwards.
350 + */
351 + if (acpi_disabled)
352 + return 0;
353 +
354 dmi_check_system(video_dmi_table);
355
356 if (intel_opregion_present())
357 diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
358 index 51fd87fb7ba6..da00eeb95dad 100644
359 --- a/drivers/clk/clk-gate.c
360 +++ b/drivers/clk/clk-gate.c
361 @@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
362 struct clk_init_data init;
363
364 if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
365 - if (bit_idx > 16) {
366 + if (bit_idx > 15) {
367 pr_err("gate bit exceeds LOWORD field\n");
368 return ERR_PTR(-EINVAL);
369 }
370 diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
371 index 26bed0889e97..7d74830e2ced 100644
372 --- a/drivers/clk/clk.c
373 +++ b/drivers/clk/clk.c
374 @@ -343,13 +343,9 @@ unlock:
375 static void clk_debug_unregister(struct clk *clk)
376 {
377 mutex_lock(&clk_debug_lock);
378 - if (!clk->dentry)
379 - goto out;
380 -
381 hlist_del_init(&clk->debug_node);
382 debugfs_remove_recursive(clk->dentry);
383 clk->dentry = NULL;
384 -out:
385 mutex_unlock(&clk_debug_lock);
386 }
387
388 diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
389 index d5dc951264ca..b18e22fb25a6 100644
390 --- a/drivers/clk/sunxi/clk-sunxi.c
391 +++ b/drivers/clk/sunxi/clk-sunxi.c
392 @@ -419,6 +419,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
393 .kwidth = 2,
394 .mshift = 0,
395 .mwidth = 2,
396 + .n_start = 1,
397 };
398
399 static struct clk_factors_config sun8i_a23_pll1_config = {
400 diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
401 index 9037bebd69f7..f870aad57711 100644
402 --- a/drivers/clk/zynq/clkc.c
403 +++ b/drivers/clk/zynq/clkc.c
404 @@ -303,6 +303,7 @@ static void __init zynq_clk_setup(struct device_node *np)
405 clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
406 "cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
407 26, 0, &armclk_lock);
408 + clk_prepare_enable(clks[cpu_2x]);
409
410 clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
411 4 + 2 * tmp);
412 diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
413 index 3c97c8fa8d02..8a37af923094 100644
414 --- a/drivers/dma-buf/reservation.c
415 +++ b/drivers/dma-buf/reservation.c
416 @@ -402,8 +402,6 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
417 int ret = 1;
418
419 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
420 - int ret;
421 -
422 fence = fence_get_rcu(lfence);
423 if (!fence)
424 return -1;
425 diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
426 index a920fec8fe88..5186eb01945a 100644
427 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c
428 +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
429 @@ -170,12 +170,12 @@ again:
430 start = desc->phys_addr;
431 end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
432
433 - if ((start + size) > end || (start + size) > max)
434 - continue;
435 -
436 - if (end - size > max)
437 + if (end > max)
438 end = max;
439
440 + if ((start + size) > end)
441 + continue;
442 +
443 if (round_down(end - size, align) < start)
444 continue;
445
446 diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
447 index 018c29a26615..87b8e3b900d2 100644
448 --- a/drivers/firmware/efi/runtime-map.c
449 +++ b/drivers/firmware/efi/runtime-map.c
450 @@ -191,7 +191,7 @@ int __init efi_runtime_map_init(struct kobject *efi_kobj)
451
452 return 0;
453 out_add_entry:
454 - for (j = i - 1; j > 0; j--) {
455 + for (j = i - 1; j >= 0; j--) {
456 entry = *(map_entries + j);
457 kobject_put(&entry->kobj);
458 }
459 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
460 index c33327d5c543..45434333b289 100644
461 --- a/drivers/gpu/drm/i915/i915_drv.h
462 +++ b/drivers/gpu/drm/i915/i915_drv.h
463 @@ -2077,6 +2077,7 @@ struct drm_i915_cmd_table {
464 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
465 #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
466 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
467 + (INTEL_DEVID(dev) & 0xf) == 0xb || \
468 (INTEL_DEVID(dev) & 0xf) == 0xe))
469 #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
470 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
471 diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
472 index 2b1eaa29ada4..6765148ea5bc 100644
473 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
474 +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
475 @@ -315,9 +315,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
476 return -EINVAL;
477 }
478
479 + mutex_lock(&dev->struct_mutex);
480 if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
481 - drm_gem_object_unreference_unlocked(&obj->base);
482 - return -EBUSY;
483 + ret = -EBUSY;
484 + goto err;
485 }
486
487 if (args->tiling_mode == I915_TILING_NONE) {
488 @@ -349,7 +350,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
489 }
490 }
491
492 - mutex_lock(&dev->struct_mutex);
493 if (args->tiling_mode != obj->tiling_mode ||
494 args->stride != obj->stride) {
495 /* We need to rebind the object if its current allocation
496 @@ -395,6 +395,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
497 obj->bit_17 = NULL;
498 }
499
500 +err:
501 drm_gem_object_unreference(&obj->base);
502 mutex_unlock(&dev->struct_mutex);
503
504 diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
505 index d182058383a9..1719078c763a 100644
506 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
507 +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
508 @@ -113,7 +113,10 @@ restart:
509 continue;
510
511 obj = mo->obj;
512 - drm_gem_object_reference(&obj->base);
513 +
514 + if (!kref_get_unless_zero(&obj->base.refcount))
515 + continue;
516 +
517 spin_unlock(&mn->lock);
518
519 cancel_userptr(obj);
520 @@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
521 it = interval_tree_iter_first(&mn->objects, start, end);
522 if (it != NULL) {
523 obj = container_of(it, struct i915_mmu_object, it)->obj;
524 - drm_gem_object_reference(&obj->base);
525 +
526 + /* The mmu_object is released late when destroying the
527 + * GEM object so it is entirely possible to gain a
528 + * reference on an object in the process of being freed
529 + * since our serialisation is via the spinlock and not
530 + * the struct_mutex - and consequently use it after it
531 + * is freed and then double free it.
532 + */
533 + if (!kref_get_unless_zero(&obj->base.refcount)) {
534 + spin_unlock(&mn->lock);
535 + serial = 0;
536 + continue;
537 + }
538 +
539 serial = mn->serial;
540 }
541 spin_unlock(&mn->lock);
542 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
543 index 9ba1177200b2..0ab77f319cef 100644
544 --- a/drivers/gpu/drm/i915/i915_irq.c
545 +++ b/drivers/gpu/drm/i915/i915_irq.c
546 @@ -2123,6 +2123,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
547 u32 iir, gt_iir, pm_iir;
548 irqreturn_t ret = IRQ_NONE;
549
550 + if (!intel_irqs_enabled(dev_priv))
551 + return IRQ_NONE;
552 +
553 while (true) {
554 /* Find, clear, then process each source of interrupt */
555
556 @@ -2167,6 +2170,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
557 u32 master_ctl, iir;
558 irqreturn_t ret = IRQ_NONE;
559
560 + if (!intel_irqs_enabled(dev_priv))
561 + return IRQ_NONE;
562 +
563 for (;;) {
564 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
565 iir = I915_READ(VLV_IIR);
566 @@ -2455,6 +2461,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
567 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
568 irqreturn_t ret = IRQ_NONE;
569
570 + if (!intel_irqs_enabled(dev_priv))
571 + return IRQ_NONE;
572 +
573 /* We get interrupts on unclaimed registers, so check for this before we
574 * do any I915_{READ,WRITE}. */
575 intel_uncore_check_errors(dev);
576 @@ -2525,6 +2534,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
577 uint32_t tmp = 0;
578 enum pipe pipe;
579
580 + if (!intel_irqs_enabled(dev_priv))
581 + return IRQ_NONE;
582 +
583 master_ctl = I915_READ(GEN8_MASTER_IRQ);
584 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
585 if (!master_ctl)
586 @@ -4052,6 +4064,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
587 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
588 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
589
590 + if (!intel_irqs_enabled(dev_priv))
591 + return IRQ_NONE;
592 +
593 iir = I915_READ16(IIR);
594 if (iir == 0)
595 return IRQ_NONE;
596 @@ -4238,6 +4253,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
597 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
598 int pipe, ret = IRQ_NONE;
599
600 + if (!intel_irqs_enabled(dev_priv))
601 + return IRQ_NONE;
602 +
603 iir = I915_READ(IIR);
604 do {
605 bool irq_received = (iir & ~flip_mask) != 0;
606 @@ -4466,6 +4484,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
607 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
608 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
609
610 + if (!intel_irqs_enabled(dev_priv))
611 + return IRQ_NONE;
612 +
613 iir = I915_READ(IIR);
614
615 for (;;) {
616 @@ -4777,4 +4798,5 @@ void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
617 dev_priv->pm._irqs_disabled = false;
618 dev->driver->irq_preinstall(dev);
619 dev->driver->irq_postinstall(dev);
620 + synchronize_irq(dev_priv->dev->irq);
621 }
622 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
623 index 31b96643b59c..7a7c445b07b4 100644
624 --- a/drivers/gpu/drm/i915/intel_display.c
625 +++ b/drivers/gpu/drm/i915/intel_display.c
626 @@ -12895,6 +12895,9 @@ static struct intel_quirk intel_quirks[] = {
627
628 /* HP Chromebook 14 (Celeron 2955U) */
629 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
630 +
631 + /* Dell Chromebook 11 */
632 + { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
633 };
634
635 static void intel_init_quirks(struct drm_device *dev)
636 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
637 index 4bcd91757321..740d9ebbebde 100644
638 --- a/drivers/gpu/drm/i915/intel_dp.c
639 +++ b/drivers/gpu/drm/i915/intel_dp.c
640 @@ -3645,8 +3645,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
641 enum port port = intel_dig_port->port;
642 struct drm_device *dev = intel_dig_port->base.base.dev;
643 struct drm_i915_private *dev_priv = dev->dev_private;
644 - struct intel_crtc *intel_crtc =
645 - to_intel_crtc(intel_dig_port->base.base.crtc);
646 uint32_t DP = intel_dp->DP;
647
648 if (WARN_ON(HAS_DDI(dev)))
649 @@ -3671,8 +3669,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
650
651 if (HAS_PCH_IBX(dev) &&
652 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
653 - struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
654 -
655 /* Hardware workaround: leaving our transcoder select
656 * set to transcoder B while it's off will prevent the
657 * corresponding HDMI output on transcoder A.
658 @@ -3683,18 +3679,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
659 */
660 DP &= ~DP_PIPEB_SELECT;
661 I915_WRITE(intel_dp->output_reg, DP);
662 -
663 - /* Changes to enable or select take place the vblank
664 - * after being written.
665 - */
666 - if (WARN_ON(crtc == NULL)) {
667 - /* We should never try to disable a port without a crtc
668 - * attached. For paranoia keep the code around for a
669 - * bit. */
670 - POSTING_READ(intel_dp->output_reg);
671 - msleep(50);
672 - } else
673 - intel_wait_for_vblank(dev, intel_crtc->pipe);
674 + POSTING_READ(intel_dp->output_reg);
675 }
676
677 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
678 diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
679 index bafd38b5703e..a97b83b78ae7 100644
680 --- a/drivers/gpu/drm/i915/intel_lrc.c
681 +++ b/drivers/gpu/drm/i915/intel_lrc.c
682 @@ -1106,15 +1106,17 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
683
684 cmd = MI_FLUSH_DW + 1;
685
686 - if (ring == &dev_priv->ring[VCS]) {
687 - if (invalidate_domains & I915_GEM_GPU_DOMAINS)
688 - cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
689 - MI_FLUSH_DW_STORE_INDEX |
690 - MI_FLUSH_DW_OP_STOREDW;
691 - } else {
692 - if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
693 - cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
694 - MI_FLUSH_DW_OP_STOREDW;
695 + /* We always require a command barrier so that subsequent
696 + * commands, such as breadcrumb interrupts, are strictly ordered
697 + * wrt the contents of the write cache being flushed to memory
698 + * (and thus being coherent from the CPU).
699 + */
700 + cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
701 +
702 + if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
703 + cmd |= MI_INVALIDATE_TLB;
704 + if (ring == &dev_priv->ring[VCS])
705 + cmd |= MI_INVALIDATE_BSD;
706 }
707
708 intel_logical_ring_emit(ringbuf, cmd);
709 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
710 index ae17e77dc08d..9f10b771319f 100644
711 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
712 +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
713 @@ -2139,6 +2139,14 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
714 cmd = MI_FLUSH_DW;
715 if (INTEL_INFO(ring->dev)->gen >= 8)
716 cmd += 1;
717 +
718 + /* We always require a command barrier so that subsequent
719 + * commands, such as breadcrumb interrupts, are strictly ordered
720 + * wrt the contents of the write cache being flushed to memory
721 + * (and thus being coherent from the CPU).
722 + */
723 + cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
724 +
725 /*
726 * Bspec vol 1c.5 - video engine command streamer:
727 * "If ENABLED, all TLBs will be invalidated once the flush
728 @@ -2146,8 +2154,8 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
729 * Post-Sync Operation field is a value of 1h or 3h."
730 */
731 if (invalidate & I915_GEM_GPU_DOMAINS)
732 - cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
733 - MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
734 + cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
735 +
736 intel_ring_emit(ring, cmd);
737 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
738 if (INTEL_INFO(ring->dev)->gen >= 8) {
739 @@ -2242,6 +2250,14 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
740 cmd = MI_FLUSH_DW;
741 if (INTEL_INFO(ring->dev)->gen >= 8)
742 cmd += 1;
743 +
744 + /* We always require a command barrier so that subsequent
745 + * commands, such as breadcrumb interrupts, are strictly ordered
746 + * wrt the contents of the write cache being flushed to memory
747 + * (and thus being coherent from the CPU).
748 + */
749 + cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
750 +
751 /*
752 * Bspec vol 1c.3 - blitter engine command streamer:
753 * "If ENABLED, all TLBs will be invalidated once the flush
754 @@ -2249,8 +2265,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
755 * Post-Sync Operation field is a value of 1h or 3h."
756 */
757 if (invalidate & I915_GEM_DOMAIN_RENDER)
758 - cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
759 - MI_FLUSH_DW_OP_STOREDW;
760 + cmd |= MI_INVALIDATE_TLB;
761 intel_ring_emit(ring, cmd);
762 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
763 if (INTEL_INFO(ring->dev)->gen >= 8) {
764 diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
765 index 01d841ea3140..731b10a09aa0 100644
766 --- a/drivers/gpu/drm/i915/intel_sideband.c
767 +++ b/drivers/gpu/drm/i915/intel_sideband.c
768 @@ -82,7 +82,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
769 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
770
771 mutex_lock(&dev_priv->dpio_lock);
772 - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
773 + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
774 SB_CRRDDA_NP, addr, &val);
775 mutex_unlock(&dev_priv->dpio_lock);
776
777 @@ -94,7 +94,7 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
778 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
779
780 mutex_lock(&dev_priv->dpio_lock);
781 - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
782 + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
783 SB_CRWRDA_NP, addr, &val);
784 mutex_unlock(&dev_priv->dpio_lock);
785 }
786 @@ -103,7 +103,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
787 {
788 u32 val = 0;
789
790 - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
791 + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
792 SB_CRRDDA_NP, reg, &val);
793
794 return val;
795 @@ -111,7 +111,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
796
797 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
798 {
799 - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
800 + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
801 SB_CRWRDA_NP, reg, &val);
802 }
803
804 @@ -122,7 +122,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
805 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
806
807 mutex_lock(&dev_priv->dpio_lock);
808 - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
809 + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
810 SB_CRRDDA_NP, addr, &val);
811 mutex_unlock(&dev_priv->dpio_lock);
812
813 @@ -132,56 +132,56 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
814 u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
815 {
816 u32 val = 0;
817 - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
818 + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
819 SB_CRRDDA_NP, reg, &val);
820 return val;
821 }
822
823 void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
824 {
825 - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
826 + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
827 SB_CRWRDA_NP, reg, &val);
828 }
829
830 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
831 {
832 u32 val = 0;
833 - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
834 + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
835 SB_CRRDDA_NP, reg, &val);
836 return val;
837 }
838
839 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
840 {
841 - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
842 + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
843 SB_CRWRDA_NP, reg, &val);
844 }
845
846 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
847 {
848 u32 val = 0;
849 - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
850 + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
851 SB_CRRDDA_NP, reg, &val);
852 return val;
853 }
854
855 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
856 {
857 - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
858 + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
859 SB_CRWRDA_NP, reg, &val);
860 }
861
862 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
863 {
864 u32 val = 0;
865 - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
866 + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
867 SB_CRRDDA_NP, reg, &val);
868 return val;
869 }
870
871 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
872 {
873 - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
874 + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
875 SB_CRWRDA_NP, reg, &val);
876 }
877
878 diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
879 index db42a670f995..5bf825dfaa09 100644
880 --- a/drivers/gpu/drm/radeon/atombios_dp.c
881 +++ b/drivers/gpu/drm/radeon/atombios_dp.c
882 @@ -623,10 +623,8 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
883 drm_dp_dpcd_writeb(dp_info->aux,
884 DP_DOWNSPREAD_CTRL, 0);
885
886 - if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
887 - (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
888 + if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)
889 drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
890 - }
891
892 /* set the lane count on the sink */
893 tmp = dp_info->dp_lane_count;
894 diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
895 index 9328fb3dcfce..5f395be9b3e3 100644
896 --- a/drivers/gpu/drm/radeon/cik.c
897 +++ b/drivers/gpu/drm/radeon/cik.c
898 @@ -3880,7 +3880,21 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
899 struct radeon_ring *ring = &rdev->ring[fence->ring];
900 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
901
902 - /* EVENT_WRITE_EOP - flush caches, send int */
903 + /* Workaround for cache flush problems. First send a dummy EOP
904 + * event down the pipe with seq one below.
905 + */
906 + radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
907 + radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
908 + EOP_TC_ACTION_EN |
909 + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
910 + EVENT_INDEX(5)));
911 + radeon_ring_write(ring, addr & 0xfffffffc);
912 + radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
913 + DATA_SEL(1) | INT_SEL(0));
914 + radeon_ring_write(ring, fence->seq - 1);
915 + radeon_ring_write(ring, 0);
916 +
917 + /* Then send the real EOP event down the pipe. */
918 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
919 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
920 EOP_TC_ACTION_EN |
921 @@ -7295,7 +7309,6 @@ int cik_irq_set(struct radeon_device *rdev)
922 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
923 u32 grbm_int_cntl = 0;
924 u32 dma_cntl, dma_cntl1;
925 - u32 thermal_int;
926
927 if (!rdev->irq.installed) {
928 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
929 @@ -7332,13 +7345,6 @@ int cik_irq_set(struct radeon_device *rdev)
930 cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
931 cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
932
933 - if (rdev->flags & RADEON_IS_IGP)
934 - thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
935 - ~(THERM_INTH_MASK | THERM_INTL_MASK);
936 - else
937 - thermal_int = RREG32_SMC(CG_THERMAL_INT) &
938 - ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
939 -
940 /* enable CP interrupts on all rings */
941 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
942 DRM_DEBUG("cik_irq_set: sw int gfx\n");
943 @@ -7496,14 +7502,6 @@ int cik_irq_set(struct radeon_device *rdev)
944 hpd6 |= DC_HPDx_INT_EN;
945 }
946
947 - if (rdev->irq.dpm_thermal) {
948 - DRM_DEBUG("dpm thermal\n");
949 - if (rdev->flags & RADEON_IS_IGP)
950 - thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
951 - else
952 - thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
953 - }
954 -
955 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
956
957 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
958 @@ -7557,11 +7555,6 @@ int cik_irq_set(struct radeon_device *rdev)
959 WREG32(DC_HPD5_INT_CONTROL, hpd5);
960 WREG32(DC_HPD6_INT_CONTROL, hpd6);
961
962 - if (rdev->flags & RADEON_IS_IGP)
963 - WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
964 - else
965 - WREG32_SMC(CG_THERMAL_INT, thermal_int);
966 -
967 return 0;
968 }
969
970 diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
971 index e3e9c10cfba9..85a109e1e56b 100644
972 --- a/drivers/gpu/drm/radeon/kv_dpm.c
973 +++ b/drivers/gpu/drm/radeon/kv_dpm.c
974 @@ -1169,6 +1169,19 @@ void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
975 }
976 }
977
978 +static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable)
979 +{
980 + u32 thermal_int;
981 +
982 + thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL);
983 + if (enable)
984 + thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
985 + else
986 + thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK);
987 + WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
988 +
989 +}
990 +
991 int kv_dpm_enable(struct radeon_device *rdev)
992 {
993 struct kv_power_info *pi = kv_get_pi(rdev);
994 @@ -1280,8 +1293,7 @@ int kv_dpm_late_enable(struct radeon_device *rdev)
995 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
996 return ret;
997 }
998 - rdev->irq.dpm_thermal = true;
999 - radeon_irq_set(rdev);
1000 + kv_enable_thermal_int(rdev, true);
1001 }
1002
1003 /* powerdown unused blocks for now */
1004 @@ -1312,6 +1324,7 @@ void kv_dpm_disable(struct radeon_device *rdev)
1005 kv_stop_dpm(rdev);
1006 kv_enable_ulv(rdev, false);
1007 kv_reset_am(rdev);
1008 + kv_enable_thermal_int(rdev, false);
1009
1010 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1011 }
1012 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
1013 index 3faee58946dd..8a83c917cf53 100644
1014 --- a/drivers/gpu/drm/radeon/ni.c
1015 +++ b/drivers/gpu/drm/radeon/ni.c
1016 @@ -1085,12 +1085,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1017
1018 if ((rdev->config.cayman.max_backends_per_se == 1) &&
1019 (rdev->flags & RADEON_IS_IGP)) {
1020 - if ((disabled_rb_mask & 3) == 1) {
1021 - /* RB0 disabled, RB1 enabled */
1022 - tmp = 0x11111111;
1023 - } else {
1024 + if ((disabled_rb_mask & 3) == 2) {
1025 /* RB1 disabled, RB0 enabled */
1026 tmp = 0x00000000;
1027 + } else {
1028 + /* RB0 disabled, RB1 enabled */
1029 + tmp = 0x11111111;
1030 }
1031 } else {
1032 tmp = gb_addr_config & NUM_PIPES_MASK;
1033 diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
1034 index b5c73df8e202..65a0c1c03c69 100644
1035 --- a/drivers/gpu/drm/radeon/r600_dpm.c
1036 +++ b/drivers/gpu/drm/radeon/r600_dpm.c
1037 @@ -188,7 +188,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
1038 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1039 radeon_crtc = to_radeon_crtc(crtc);
1040 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
1041 - vrefresh = radeon_crtc->hw_mode.vrefresh;
1042 + vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
1043 break;
1044 }
1045 }
1046 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
1047 index df69b92ba164..d79e892093b5 100644
1048 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
1049 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
1050 @@ -3280,6 +3280,7 @@ int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
1051
1052 args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
1053 args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1054 + args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
1055 args.in.ulSCLKFreq =
1056 cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
1057
1058 diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
1059 index 6b670b0bc47b..3a297037cc17 100644
1060 --- a/drivers/gpu/drm/radeon/radeon_encoders.c
1061 +++ b/drivers/gpu/drm/radeon/radeon_encoders.c
1062 @@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
1063 (rdev->pdev->subsystem_vendor == 0x1734) &&
1064 (rdev->pdev->subsystem_device == 0x1107))
1065 use_bl = false;
1066 +/* Older PPC macs use on-GPU backlight controller */
1067 +#ifndef CONFIG_PPC_PMAC
1068 /* disable native backlight control on older asics */
1069 else if (rdev->family < CHIP_R600)
1070 use_bl = false;
1071 +#endif
1072 else
1073 use_bl = true;
1074 }
1075 diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
1076 index 4c0d786d5c7a..194f6245c379 100644
1077 --- a/drivers/gpu/drm/radeon/radeon_object.c
1078 +++ b/drivers/gpu/drm/radeon/radeon_object.c
1079 @@ -218,6 +218,18 @@ int radeon_bo_create(struct radeon_device *rdev,
1080 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
1081 */
1082 bo->flags &= ~RADEON_GEM_GTT_WC;
1083 +#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
1084 + /* Don't try to enable write-combining when it can't work, or things
1085 + * may be slow
1086 + * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
1087 + */
1088 +
1089 +#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
1090 + thanks to write-combining
1091 +
1092 + DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
1093 + "better performance thanks to write-combining\n");
1094 + bo->flags &= ~RADEON_GEM_GTT_WC;
1095 #endif
1096
1097 radeon_ttm_placement_from_domain(bo, domain);
1098 diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
1099 index 59736bb810cd..1218419c12f6 100644
1100 --- a/drivers/gpu/drm/tegra/drm.c
1101 +++ b/drivers/gpu/drm/tegra/drm.c
1102 @@ -152,7 +152,7 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
1103 if (err < 0)
1104 return err;
1105
1106 - err = get_user(dest->target.offset, &src->cmdbuf.offset);
1107 + err = get_user(dest->target.offset, &src->target.offset);
1108 if (err < 0)
1109 return err;
1110
1111 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
1112 index 8df8ceb47659..01c7a08a66e1 100644
1113 --- a/drivers/hid/hid-input.c
1114 +++ b/drivers/hid/hid-input.c
1115 @@ -1104,6 +1104,23 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
1116 return;
1117 }
1118
1119 + /*
1120 + * Ignore reports for absolute data if the data didn't change. This is
1121 + * not only an optimization but also fixes 'dead' key reports. Some
1122 + * RollOver implementations for localized keys (like BACKSLASH/PIPE; HID
1123 + * 0x31 and 0x32) report multiple keys, even though a localized keyboard
1124 + * can only have one of them physically available. The 'dead' keys
1125 + * report constant 0. As all map to the same keycode, they'd confuse
1126 + * the input layer. If we filter the 'dead' keys on the HID level, we
1127 + * skip the keycode translation and only forward real events.
1128 + */
1129 + if (!(field->flags & (HID_MAIN_ITEM_RELATIVE |
1130 + HID_MAIN_ITEM_BUFFERED_BYTE)) &&
1131 + (field->flags & HID_MAIN_ITEM_VARIABLE) &&
1132 + usage->usage_index < field->maxusage &&
1133 + value == field->value[usage->usage_index])
1134 + return;
1135 +
1136 /* report the usage code as scancode if the key status has changed */
1137 if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
1138 input_event(input, EV_MSC, MSC_SCAN, usage->hid);
1139 diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1140 index 7cf998cdd011..c673eda71460 100644
1141 --- a/drivers/hid/wacom_wac.c
1142 +++ b/drivers/hid/wacom_wac.c
1143 @@ -756,6 +756,12 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
1144 input_report_key(input, BTN_7, (data[4] & 0x40)); /* Left */
1145 input_report_key(input, BTN_8, (data[4] & 0x80)); /* Down */
1146 input_report_key(input, BTN_0, (data[3] & 0x01)); /* Center */
1147 +
1148 + if (data[4] | (data[3] & 0x01)) {
1149 + input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
1150 + } else {
1151 + input_report_abs(input, ABS_MISC, 0);
1152 + }
1153 } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
1154 int i;
1155
1156 diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
1157 index 51672256072b..b96c636470ef 100644
1158 --- a/drivers/iio/adc/mcp3422.c
1159 +++ b/drivers/iio/adc/mcp3422.c
1160 @@ -58,20 +58,11 @@
1161 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
1162 }
1163
1164 -/* LSB is in nV to eliminate floating point */
1165 -static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625};
1166 -
1167 -/*
1168 - * scales calculated as:
1169 - * rates_to_lsb[sample_rate] / (1 << pga);
1170 - * pga is 1 for 0, 2
1171 - */
1172 -
1173 static const int mcp3422_scales[4][4] = {
1174 - { 1000000, 250000, 62500, 15625 },
1175 - { 500000 , 125000, 31250, 7812 },
1176 - { 250000 , 62500 , 15625, 3906 },
1177 - { 125000 , 31250 , 7812 , 1953 } };
1178 + { 1000000, 500000, 250000, 125000 },
1179 + { 250000 , 125000, 62500 , 31250 },
1180 + { 62500 , 31250 , 15625 , 7812 },
1181 + { 15625 , 7812 , 3906 , 1953 } };
1182
1183 /* Constant msleep times for data acquisitions */
1184 static const int mcp3422_read_times[4] = {
1185 diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
1186 index f57562aa396f..15c73e20272d 100644
1187 --- a/drivers/iio/dac/ad5686.c
1188 +++ b/drivers/iio/dac/ad5686.c
1189 @@ -322,7 +322,7 @@ static int ad5686_probe(struct spi_device *spi)
1190 st = iio_priv(indio_dev);
1191 spi_set_drvdata(spi, indio_dev);
1192
1193 - st->reg = devm_regulator_get(&spi->dev, "vcc");
1194 + st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
1195 if (!IS_ERR(st->reg)) {
1196 ret = regulator_enable(st->reg);
1197 if (ret)
1198 diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
1199 index b70873de04ea..fa795dcd5f75 100644
1200 --- a/drivers/iio/imu/adis16400_core.c
1201 +++ b/drivers/iio/imu/adis16400_core.c
1202 @@ -26,6 +26,7 @@
1203 #include <linux/list.h>
1204 #include <linux/module.h>
1205 #include <linux/debugfs.h>
1206 +#include <linux/bitops.h>
1207
1208 #include <linux/iio/iio.h>
1209 #include <linux/iio/sysfs.h>
1210 @@ -414,7 +415,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
1211 mutex_unlock(&indio_dev->mlock);
1212 if (ret)
1213 return ret;
1214 - val16 = ((val16 & 0xFFF) << 4) >> 4;
1215 + val16 = sign_extend32(val16, 11);
1216 *val = val16;
1217 return IIO_VAL_INT;
1218 case IIO_CHAN_INFO_OFFSET:
1219 diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
1220 index 56a4b7ca7ee3..45d67e9228d7 100644
1221 --- a/drivers/infiniband/core/ucma.c
1222 +++ b/drivers/infiniband/core/ucma.c
1223 @@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
1224 if (!optlen)
1225 return -EINVAL;
1226
1227 + memset(&sa_path, 0, sizeof(sa_path));
1228 + sa_path.vlan_id = 0xffff;
1229 +
1230 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1231 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1232 if (ret)
1233 diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
1234 index 5ba2a86aab6a..63a9f04bdb6c 100644
1235 --- a/drivers/infiniband/core/uverbs_cmd.c
1236 +++ b/drivers/infiniband/core/uverbs_cmd.c
1237 @@ -2057,20 +2057,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1238 if (qp->real_qp == qp) {
1239 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
1240 if (ret)
1241 - goto out;
1242 + goto release_qp;
1243 ret = qp->device->modify_qp(qp, attr,
1244 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
1245 } else {
1246 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
1247 }
1248
1249 - put_qp_read(qp);
1250 -
1251 if (ret)
1252 - goto out;
1253 + goto release_qp;
1254
1255 ret = in_len;
1256
1257 +release_qp:
1258 + put_qp_read(qp);
1259 +
1260 out:
1261 kfree(attr);
1262
1263 diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
1264 index 8b72cf392b34..3b619b10a372 100644
1265 --- a/drivers/infiniband/hw/mlx4/main.c
1266 +++ b/drivers/infiniband/hw/mlx4/main.c
1267 @@ -1221,8 +1221,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1268 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1269 u64 reg_id;
1270 struct mlx4_ib_steering *ib_steering = NULL;
1271 - enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1272 - MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1273 + enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1274
1275 if (mdev->dev->caps.steering_mode ==
1276 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1277 @@ -1235,8 +1234,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1278 !!(mqp->flags &
1279 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1280 prot, &reg_id);
1281 - if (err)
1282 + if (err) {
1283 + pr_err("multicast attach op failed, err %d\n", err);
1284 goto err_malloc;
1285 + }
1286
1287 err = add_gid_entry(ibqp, gid);
1288 if (err)
1289 @@ -1284,8 +1285,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1290 struct net_device *ndev;
1291 struct mlx4_ib_gid_entry *ge;
1292 u64 reg_id = 0;
1293 - enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1294 - MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1295 + enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1296
1297 if (mdev->dev->caps.steering_mode ==
1298 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1299 diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
1300 index 9c5150c3cb31..03045dd9e5de 100644
1301 --- a/drivers/infiniband/hw/mlx4/qp.c
1302 +++ b/drivers/infiniband/hw/mlx4/qp.c
1303 @@ -1669,8 +1669,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1304 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
1305 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
1306 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
1307 - if (err)
1308 - return -EINVAL;
1309 + if (err) {
1310 + err = -EINVAL;
1311 + goto out;
1312 + }
1313 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
1314 dev->qp1_proxy[qp->port - 1] = qp;
1315 }
1316 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
1317 index 1ba6c42e4df8..820fb8009ed7 100644
1318 --- a/drivers/infiniband/hw/mlx5/main.c
1319 +++ b/drivers/infiniband/hw/mlx5/main.c
1320 @@ -987,7 +987,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
1321 struct ib_device_attr *dprops = NULL;
1322 struct ib_port_attr *pprops = NULL;
1323 struct mlx5_general_caps *gen;
1324 - int err = 0;
1325 + int err = -ENOMEM;
1326 int port;
1327
1328 gen = &dev->mdev->caps.gen;
1329 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
1330 index c00ae093b6f8..b218254ee41b 100644
1331 --- a/drivers/infiniband/hw/qib/qib.h
1332 +++ b/drivers/infiniband/hw/qib/qib.h
1333 @@ -1082,12 +1082,6 @@ struct qib_devdata {
1334 /* control high-level access to EEPROM */
1335 struct mutex eep_lock;
1336 uint64_t traffic_wds;
1337 - /* active time is kept in seconds, but logged in hours */
1338 - atomic_t active_time;
1339 - /* Below are nominal shadow of EEPROM, new since last EEPROM update */
1340 - uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
1341 - uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
1342 - uint16_t eep_hrs;
1343 /*
1344 * masks for which bits of errs, hwerrs that cause
1345 * each of the counters to increment.
1346 @@ -1309,8 +1303,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
1347 int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
1348 const void *buffer, int len);
1349 void qib_get_eeprom_info(struct qib_devdata *);
1350 -int qib_update_eeprom_log(struct qib_devdata *dd);
1351 -void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
1352 +#define qib_inc_eeprom_err(dd, eidx, incr)
1353 void qib_dump_lookup_output_queue(struct qib_devdata *);
1354 void qib_force_pio_avail_update(struct qib_devdata *);
1355 void qib_clear_symerror_on_linkup(unsigned long opaque);
1356 diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
1357 index 4d5d71aaa2b4..e2280b07df02 100644
1358 --- a/drivers/infiniband/hw/qib/qib_eeprom.c
1359 +++ b/drivers/infiniband/hw/qib/qib_eeprom.c
1360 @@ -267,190 +267,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
1361 "Board SN %s did not pass functional test: %s\n",
1362 dd->serial, ifp->if_comment);
1363
1364 - memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
1365 - /*
1366 - * Power-on (actually "active") hours are kept as little-endian value
1367 - * in EEPROM, but as seconds in a (possibly as small as 24-bit)
1368 - * atomic_t while running.
1369 - */
1370 - atomic_set(&dd->active_time, 0);
1371 - dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
1372 -
1373 done:
1374 vfree(buf);
1375
1376 bail:;
1377 }
1378
1379 -/**
1380 - * qib_update_eeprom_log - copy active-time and error counters to eeprom
1381 - * @dd: the qlogic_ib device
1382 - *
1383 - * Although the time is kept as seconds in the qib_devdata struct, it is
1384 - * rounded to hours for re-write, as we have only 16 bits in EEPROM.
1385 - * First-cut code reads whole (expected) struct qib_flash, modifies,
1386 - * re-writes. Future direction: read/write only what we need, assuming
1387 - * that the EEPROM had to have been "good enough" for driver init, and
1388 - * if not, we aren't making it worse.
1389 - *
1390 - */
1391 -int qib_update_eeprom_log(struct qib_devdata *dd)
1392 -{
1393 - void *buf;
1394 - struct qib_flash *ifp;
1395 - int len, hi_water;
1396 - uint32_t new_time, new_hrs;
1397 - u8 csum;
1398 - int ret, idx;
1399 - unsigned long flags;
1400 -
1401 - /* first, check if we actually need to do anything. */
1402 - ret = 0;
1403 - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
1404 - if (dd->eep_st_new_errs[idx]) {
1405 - ret = 1;
1406 - break;
1407 - }
1408 - }
1409 - new_time = atomic_read(&dd->active_time);
1410 -
1411 - if (ret == 0 && new_time < 3600)
1412 - goto bail;
1413 -
1414 - /*
1415 - * The quick-check above determined that there is something worthy
1416 - * of logging, so get current contents and do a more detailed idea.
1417 - * read full flash, not just currently used part, since it may have
1418 - * been written with a newer definition
1419 - */
1420 - len = sizeof(struct qib_flash);
1421 - buf = vmalloc(len);
1422 - ret = 1;
1423 - if (!buf) {
1424 - qib_dev_err(dd,
1425 - "Couldn't allocate memory to read %u bytes from eeprom for logging\n",
1426 - len);
1427 - goto bail;
1428 - }
1429 -
1430 - /* Grab semaphore and read current EEPROM. If we get an
1431 - * error, let go, but if not, keep it until we finish write.
1432 - */
1433 - ret = mutex_lock_interruptible(&dd->eep_lock);
1434 - if (ret) {
1435 - qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
1436 - goto free_bail;
1437 - }
1438 - ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
1439 - if (ret) {
1440 - mutex_unlock(&dd->eep_lock);
1441 - qib_dev_err(dd, "Unable read EEPROM for logging\n");
1442 - goto free_bail;
1443 - }
1444 - ifp = (struct qib_flash *)buf;
1445 -
1446 - csum = flash_csum(ifp, 0);
1447 - if (csum != ifp->if_csum) {
1448 - mutex_unlock(&dd->eep_lock);
1449 - qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
1450 - csum, ifp->if_csum);
1451 - ret = 1;
1452 - goto free_bail;
1453 - }
1454 - hi_water = 0;
1455 - spin_lock_irqsave(&dd->eep_st_lock, flags);
1456 - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
1457 - int new_val = dd->eep_st_new_errs[idx];
1458 - if (new_val) {
1459 - /*
1460 - * If we have seen any errors, add to EEPROM values
1461 - * We need to saturate at 0xFF (255) and we also
1462 - * would need to adjust the checksum if we were
1463 - * trying to minimize EEPROM traffic
1464 - * Note that we add to actual current count in EEPROM,
1465 - * in case it was altered while we were running.
1466 - */
1467 - new_val += ifp->if_errcntp[idx];
1468 - if (new_val > 0xFF)
1469 - new_val = 0xFF;
1470 - if (ifp->if_errcntp[idx] != new_val) {
1471 - ifp->if_errcntp[idx] = new_val;
1472 - hi_water = offsetof(struct qib_flash,
1473 - if_errcntp) + idx;
1474 - }
1475 - /*
1476 - * update our shadow (used to minimize EEPROM
1477 - * traffic), to match what we are about to write.
1478 - */
1479 - dd->eep_st_errs[idx] = new_val;
1480 - dd->eep_st_new_errs[idx] = 0;
1481 - }
1482 - }
1483 - /*
1484 - * Now update active-time. We would like to round to the nearest hour
1485 - * but unless atomic_t are sure to be proper signed ints we cannot,
1486 - * because we need to account for what we "transfer" to EEPROM and
1487 - * if we log an hour at 31 minutes, then we would need to set
1488 - * active_time to -29 to accurately count the _next_ hour.
1489 - */
1490 - if (new_time >= 3600) {
1491 - new_hrs = new_time / 3600;
1492 - atomic_sub((new_hrs * 3600), &dd->active_time);
1493 - new_hrs += dd->eep_hrs;
1494 - if (new_hrs > 0xFFFF)
1495 - new_hrs = 0xFFFF;
1496 - dd->eep_hrs = new_hrs;
1497 - if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
1498 - ifp->if_powerhour[0] = new_hrs & 0xFF;
1499 - hi_water = offsetof(struct qib_flash, if_powerhour);
1500 - }
1501 - if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
1502 - ifp->if_powerhour[1] = new_hrs >> 8;
1503 - hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
1504 - }
1505 - }
1506 - /*
1507 - * There is a tiny possibility that we could somehow fail to write
1508 - * the EEPROM after updating our shadows, but problems from holding
1509 - * the spinlock too long are a much bigger issue.
1510 - */
1511 - spin_unlock_irqrestore(&dd->eep_st_lock, flags);
1512 - if (hi_water) {
1513 - /* we made some change to the data, uopdate cksum and write */
1514 - csum = flash_csum(ifp, 1);
1515 - ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
1516 - }
1517 - mutex_unlock(&dd->eep_lock);
1518 - if (ret)
1519 - qib_dev_err(dd, "Failed updating EEPROM\n");
1520 -
1521 -free_bail:
1522 - vfree(buf);
1523 -bail:
1524 - return ret;
1525 -}
1526 -
1527 -/**
1528 - * qib_inc_eeprom_err - increment one of the four error counters
1529 - * that are logged to EEPROM.
1530 - * @dd: the qlogic_ib device
1531 - * @eidx: 0..3, the counter to increment
1532 - * @incr: how much to add
1533 - *
1534 - * Each counter is 8-bits, and saturates at 255 (0xFF). They
1535 - * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
1536 - * is called, but it can only be called in a context that allows sleep.
1537 - * This function can be called even at interrupt level.
1538 - */
1539 -void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
1540 -{
1541 - uint new_val;
1542 - unsigned long flags;
1543 -
1544 - spin_lock_irqsave(&dd->eep_st_lock, flags);
1545 - new_val = dd->eep_st_new_errs[eidx] + incr;
1546 - if (new_val > 255)
1547 - new_val = 255;
1548 - dd->eep_st_new_errs[eidx] = new_val;
1549 - spin_unlock_irqrestore(&dd->eep_st_lock, flags);
1550 -}
1551 diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
1552 index d68266ac7619..f7f49a6c34b0 100644
1553 --- a/drivers/infiniband/hw/qib/qib_iba6120.c
1554 +++ b/drivers/infiniband/hw/qib/qib_iba6120.c
1555 @@ -2681,8 +2681,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
1556 spin_lock_irqsave(&dd->eep_st_lock, flags);
1557 traffic_wds -= dd->traffic_wds;
1558 dd->traffic_wds += traffic_wds;
1559 - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
1560 - atomic_add(5, &dd->active_time); /* S/B #define */
1561 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
1562
1563 qib_chk_6120_errormask(dd);
1564 diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
1565 index 7dec89fdc124..f5fa106e1992 100644
1566 --- a/drivers/infiniband/hw/qib/qib_iba7220.c
1567 +++ b/drivers/infiniband/hw/qib/qib_iba7220.c
1568 @@ -3297,8 +3297,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
1569 spin_lock_irqsave(&dd->eep_st_lock, flags);
1570 traffic_wds -= dd->traffic_wds;
1571 dd->traffic_wds += traffic_wds;
1572 - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
1573 - atomic_add(5, &dd->active_time); /* S/B #define */
1574 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
1575 done:
1576 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
1577 diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
1578 index a7eb32517a04..23ca2aca1ad6 100644
1579 --- a/drivers/infiniband/hw/qib/qib_iba7322.c
1580 +++ b/drivers/infiniband/hw/qib/qib_iba7322.c
1581 @@ -5178,8 +5178,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
1582 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
1583 traffic_wds -= ppd->dd->traffic_wds;
1584 ppd->dd->traffic_wds += traffic_wds;
1585 - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
1586 - atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
1587 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
1588 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
1589 QIB_IB_QDR) &&
1590 diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
1591 index 729da39c49ed..738269b46d83 100644
1592 --- a/drivers/infiniband/hw/qib/qib_init.c
1593 +++ b/drivers/infiniband/hw/qib/qib_init.c
1594 @@ -931,7 +931,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
1595 qib_free_pportdata(ppd);
1596 }
1597
1598 - qib_update_eeprom_log(dd);
1599 }
1600
1601 /**
1602 diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
1603 index 3c8e4e3caca6..b9ccbda7817d 100644
1604 --- a/drivers/infiniband/hw/qib/qib_sysfs.c
1605 +++ b/drivers/infiniband/hw/qib/qib_sysfs.c
1606 @@ -611,28 +611,6 @@ bail:
1607 return ret < 0 ? ret : count;
1608 }
1609
1610 -static ssize_t show_logged_errs(struct device *device,
1611 - struct device_attribute *attr, char *buf)
1612 -{
1613 - struct qib_ibdev *dev =
1614 - container_of(device, struct qib_ibdev, ibdev.dev);
1615 - struct qib_devdata *dd = dd_from_dev(dev);
1616 - int idx, count;
1617 -
1618 - /* force consistency with actual EEPROM */
1619 - if (qib_update_eeprom_log(dd) != 0)
1620 - return -ENXIO;
1621 -
1622 - count = 0;
1623 - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
1624 - count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
1625 - dd->eep_st_errs[idx],
1626 - idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
1627 - }
1628 -
1629 - return count;
1630 -}
1631 -
1632 /*
1633 * Dump tempsense regs. in decimal, to ease shell-scripts.
1634 */
1635 @@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
1636 static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
1637 static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
1638 static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
1639 -static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
1640 static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
1641 static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
1642 static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
1643 @@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
1644 &dev_attr_nfreectxts,
1645 &dev_attr_serial,
1646 &dev_attr_boardversion,
1647 - &dev_attr_logged_errors,
1648 &dev_attr_tempsense,
1649 &dev_attr_localbus_info,
1650 &dev_attr_chip_reset,
1651 diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
1652 index f14c3849e568..e4bc9409243f 100644
1653 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h
1654 +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
1655 @@ -654,7 +654,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
1656 enum dma_data_direction dma_dir);
1657
1658 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
1659 - struct iser_data_buf *data);
1660 + struct iser_data_buf *data,
1661 + enum dma_data_direction dir);
1662 +
1663 int iser_initialize_task_headers(struct iscsi_task *task,
1664 struct iser_tx_desc *tx_desc);
1665 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
1666 diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
1667 index 3821633f1065..20e859a6f1a6 100644
1668 --- a/drivers/infiniband/ulp/iser/iser_initiator.c
1669 +++ b/drivers/infiniband/ulp/iser/iser_initiator.c
1670 @@ -320,9 +320,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
1671 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1672 struct iser_device *device = ib_conn->device;
1673
1674 - if (!iser_conn->rx_descs)
1675 - goto free_login_buf;
1676 -
1677 if (device->iser_free_rdma_reg_res)
1678 device->iser_free_rdma_reg_res(ib_conn);
1679
1680 @@ -334,7 +331,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
1681 /* make sure we never redo any unmapping */
1682 iser_conn->rx_descs = NULL;
1683
1684 -free_login_buf:
1685 iser_free_login_buf(iser_conn);
1686 }
1687
1688 @@ -714,19 +710,23 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
1689 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
1690 if (is_rdma_data_aligned)
1691 iser_dma_unmap_task_data(iser_task,
1692 - &iser_task->data[ISER_DIR_IN]);
1693 + &iser_task->data[ISER_DIR_IN],
1694 + DMA_FROM_DEVICE);
1695 if (prot_count && is_rdma_prot_aligned)
1696 iser_dma_unmap_task_data(iser_task,
1697 - &iser_task->prot[ISER_DIR_IN]);
1698 + &iser_task->prot[ISER_DIR_IN],
1699 + DMA_FROM_DEVICE);
1700 }
1701
1702 if (iser_task->dir[ISER_DIR_OUT]) {
1703 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
1704 if (is_rdma_data_aligned)
1705 iser_dma_unmap_task_data(iser_task,
1706 - &iser_task->data[ISER_DIR_OUT]);
1707 + &iser_task->data[ISER_DIR_OUT],
1708 + DMA_TO_DEVICE);
1709 if (prot_count && is_rdma_prot_aligned)
1710 iser_dma_unmap_task_data(iser_task,
1711 - &iser_task->prot[ISER_DIR_OUT]);
1712 + &iser_task->prot[ISER_DIR_OUT],
1713 + DMA_TO_DEVICE);
1714 }
1715 }
1716 diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
1717 index 6c5ce357fba6..424783f3e0af 100644
1718 --- a/drivers/infiniband/ulp/iser/iser_memory.c
1719 +++ b/drivers/infiniband/ulp/iser/iser_memory.c
1720 @@ -333,12 +333,13 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
1721 }
1722
1723 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
1724 - struct iser_data_buf *data)
1725 + struct iser_data_buf *data,
1726 + enum dma_data_direction dir)
1727 {
1728 struct ib_device *dev;
1729
1730 dev = iser_task->iser_conn->ib_conn.device->ib_device;
1731 - ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
1732 + ib_dma_unmap_sg(dev, data->buf, data->size, dir);
1733 }
1734
1735 static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
1736 @@ -358,7 +359,9 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
1737 iser_data_buf_dump(mem, ibdev);
1738
1739 /* unmap the command data before accessing it */
1740 - iser_dma_unmap_task_data(iser_task, mem);
1741 + iser_dma_unmap_task_data(iser_task, mem,
1742 + (cmd_dir == ISER_DIR_OUT) ?
1743 + DMA_TO_DEVICE : DMA_FROM_DEVICE);
1744
1745 /* allocate copy buf, if we are writing, copy the */
1746 /* unaligned scatterlist, dma map the copy */
1747 diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
1748 index 67225bb82bb5..d004d6ee2c1a 100644
1749 --- a/drivers/infiniband/ulp/iser/iser_verbs.c
1750 +++ b/drivers/infiniband/ulp/iser/iser_verbs.c
1751 @@ -567,16 +567,16 @@ void iser_release_work(struct work_struct *work)
1752 /**
1753 * iser_free_ib_conn_res - release IB related resources
1754 * @iser_conn: iser connection struct
1755 - * @destroy_device: indicator if we need to try to release
1756 - * the iser device (only iscsi shutdown and DEVICE_REMOVAL
1757 - * will use this.
1758 + * @destroy: indicator if we need to try to release the
1759 + * iser device and memory regoins pool (only iscsi
1760 + * shutdown and DEVICE_REMOVAL will use this).
1761 *
1762 * This routine is called with the iser state mutex held
1763 * so the cm_id removal is out of here. It is Safe to
1764 * be invoked multiple times.
1765 */
1766 static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
1767 - bool destroy_device)
1768 + bool destroy)
1769 {
1770 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1771 struct iser_device *device = ib_conn->device;
1772 @@ -584,17 +584,20 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
1773 iser_info("freeing conn %p cma_id %p qp %p\n",
1774 iser_conn, ib_conn->cma_id, ib_conn->qp);
1775
1776 - iser_free_rx_descriptors(iser_conn);
1777 -
1778 if (ib_conn->qp != NULL) {
1779 ib_conn->comp->active_qps--;
1780 rdma_destroy_qp(ib_conn->cma_id);
1781 ib_conn->qp = NULL;
1782 }
1783
1784 - if (destroy_device && device != NULL) {
1785 - iser_device_try_release(device);
1786 - ib_conn->device = NULL;
1787 + if (destroy) {
1788 + if (iser_conn->rx_descs)
1789 + iser_free_rx_descriptors(iser_conn);
1790 +
1791 + if (device != NULL) {
1792 + iser_device_try_release(device);
1793 + ib_conn->device = NULL;
1794 + }
1795 }
1796 }
1797
1798 @@ -803,7 +806,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
1799 }
1800
1801 static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
1802 - bool destroy_device)
1803 + bool destroy)
1804 {
1805 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
1806
1807 @@ -813,7 +816,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
1808 * and flush errors.
1809 */
1810 iser_disconnected_handler(cma_id);
1811 - iser_free_ib_conn_res(iser_conn, destroy_device);
1812 + iser_free_ib_conn_res(iser_conn, destroy);
1813 complete(&iser_conn->ib_completion);
1814 };
1815
1816 diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
1817 index c09359db3a90..37de0173b6d2 100644
1818 --- a/drivers/md/dm-io.c
1819 +++ b/drivers/md/dm-io.c
1820 @@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
1821 unsigned short logical_block_size = queue_logical_block_size(q);
1822 sector_t num_sectors;
1823
1824 + /* Reject unsupported discard requests */
1825 + if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
1826 + dec_count(io, region, -EOPNOTSUPP);
1827 + return;
1828 + }
1829 +
1830 /*
1831 * where->count may be zero if rw holds a flush and we need to
1832 * send a zero-sized flush.
1833 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
1834 index 7dfdb5c746d6..089d62751f7f 100644
1835 --- a/drivers/md/dm-raid1.c
1836 +++ b/drivers/md/dm-raid1.c
1837 @@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context)
1838 return;
1839 }
1840
1841 + /*
1842 + * If the bio is discard, return an error, but do not
1843 + * degrade the array.
1844 + */
1845 + if (bio->bi_rw & REQ_DISCARD) {
1846 + bio_endio(bio, -EOPNOTSUPP);
1847 + return;
1848 + }
1849 +
1850 for (i = 0; i < ms->nr_mirrors; i++)
1851 if (test_bit(i, &error))
1852 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
1853 diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
1854 index 864b03f47727..8b204ae216ab 100644
1855 --- a/drivers/md/dm-snap.c
1856 +++ b/drivers/md/dm-snap.c
1857 @@ -1432,8 +1432,6 @@ out:
1858 full_bio->bi_private = pe->full_bio_private;
1859 atomic_inc(&full_bio->bi_remaining);
1860 }
1861 - free_pending_exception(pe);
1862 -
1863 increment_pending_exceptions_done_count();
1864
1865 up_write(&s->lock);
1866 @@ -1450,6 +1448,8 @@ out:
1867 }
1868
1869 retry_origin_bios(s, origin_bios);
1870 +
1871 + free_pending_exception(pe);
1872 }
1873
1874 static void commit_callback(void *context, int success)
1875 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1876 index 62c51364cf9e..cec85c5bae9e 100644
1877 --- a/drivers/md/dm.c
1878 +++ b/drivers/md/dm.c
1879 @@ -2462,7 +2462,7 @@ int dm_setup_md_queue(struct mapped_device *md)
1880 return 0;
1881 }
1882
1883 -static struct mapped_device *dm_find_md(dev_t dev)
1884 +struct mapped_device *dm_get_md(dev_t dev)
1885 {
1886 struct mapped_device *md;
1887 unsigned minor = MINOR(dev);
1888 @@ -2473,12 +2473,15 @@ static struct mapped_device *dm_find_md(dev_t dev)
1889 spin_lock(&_minor_lock);
1890
1891 md = idr_find(&_minor_idr, minor);
1892 - if (md && (md == MINOR_ALLOCED ||
1893 - (MINOR(disk_devt(dm_disk(md))) != minor) ||
1894 - dm_deleting_md(md) ||
1895 - test_bit(DMF_FREEING, &md->flags))) {
1896 - md = NULL;
1897 - goto out;
1898 + if (md) {
1899 + if ((md == MINOR_ALLOCED ||
1900 + (MINOR(disk_devt(dm_disk(md))) != minor) ||
1901 + dm_deleting_md(md) ||
1902 + test_bit(DMF_FREEING, &md->flags))) {
1903 + md = NULL;
1904 + goto out;
1905 + }
1906 + dm_get(md);
1907 }
1908
1909 out:
1910 @@ -2486,16 +2489,6 @@ out:
1911
1912 return md;
1913 }
1914 -
1915 -struct mapped_device *dm_get_md(dev_t dev)
1916 -{
1917 - struct mapped_device *md = dm_find_md(dev);
1918 -
1919 - if (md)
1920 - dm_get(md);
1921 -
1922 - return md;
1923 -}
1924 EXPORT_SYMBOL_GPL(dm_get_md);
1925
1926 void *dm_get_mdptr(struct mapped_device *md)
1927 diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
1928 index 6ee785da574e..9ff67b1e1d39 100644
1929 --- a/drivers/misc/cxl/cxl.h
1930 +++ b/drivers/misc/cxl/cxl.h
1931 @@ -471,6 +471,7 @@ void cxl_release_one_irq(struct cxl *adapter, int hwirq);
1932 int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num);
1933 void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter);
1934 int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq);
1935 +int cxl_update_image_control(struct cxl *adapter);
1936
1937 /* common == phyp + powernv */
1938 struct cxl_process_element_common {
1939 diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
1940 index 336020c8e1af..6fe4027feb7d 100644
1941 --- a/drivers/misc/cxl/irq.c
1942 +++ b/drivers/misc/cxl/irq.c
1943 @@ -174,6 +174,7 @@ static irqreturn_t cxl_irq(int irq, void *data)
1944 }
1945
1946 cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
1947 + return IRQ_HANDLED;
1948 }
1949 if (dsisr & CXL_PSL_DSISR_An_OC)
1950 pr_devel("CXL interrupt: OS Context Warning\n");
1951 diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
1952 index 0f2cc9f8b4db..eee4fd606dc1 100644
1953 --- a/drivers/misc/cxl/pci.c
1954 +++ b/drivers/misc/cxl/pci.c
1955 @@ -316,7 +316,7 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev
1956 u64 psl_dsnctl;
1957 u64 chipid;
1958
1959 - if (!(np = pnv_pci_to_phb_node(dev)))
1960 + if (!(np = pnv_pci_get_phb_node(dev)))
1961 return -ENODEV;
1962
1963 while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
1964 @@ -361,6 +361,41 @@ int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq,
1965 return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
1966 }
1967
1968 +int cxl_update_image_control(struct cxl *adapter)
1969 +{
1970 + struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
1971 + int rc;
1972 + int vsec;
1973 + u8 image_state;
1974 +
1975 + if (!(vsec = find_cxl_vsec(dev))) {
1976 + dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
1977 + return -ENODEV;
1978 + }
1979 +
1980 + if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
1981 + dev_err(&dev->dev, "failed to read image state: %i\n", rc);
1982 + return rc;
1983 + }
1984 +
1985 + if (adapter->perst_loads_image)
1986 + image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
1987 + else
1988 + image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
1989 +
1990 + if (adapter->perst_select_user)
1991 + image_state |= CXL_VSEC_PERST_SELECT_USER;
1992 + else
1993 + image_state &= ~CXL_VSEC_PERST_SELECT_USER;
1994 +
1995 + if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
1996 + dev_err(&dev->dev, "failed to update image control: %i\n", rc);
1997 + return rc;
1998 + }
1999 +
2000 + return 0;
2001 +}
2002 +
2003 int cxl_alloc_one_irq(struct cxl *adapter)
2004 {
2005 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
2006 @@ -770,8 +805,8 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
2007 CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
2008 CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
2009 adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
2010 - adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE);
2011 - adapter->perst_select_user = !!(image_state & CXL_VSEC_PERST_SELECT_USER);
2012 + adapter->perst_loads_image = true;
2013 + adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
2014
2015 CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
2016 CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
2017 @@ -879,6 +914,9 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
2018 if ((rc = cxl_vsec_looks_ok(adapter, dev)))
2019 goto err2;
2020
2021 + if ((rc = cxl_update_image_control(adapter)))
2022 + goto err2;
2023 +
2024 if ((rc = cxl_map_adapter_regs(adapter, dev)))
2025 goto err2;
2026
2027 diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
2028 index 7901d076c127..6c267162f151 100644
2029 --- a/drivers/misc/mei/init.c
2030 +++ b/drivers/misc/mei/init.c
2031 @@ -313,6 +313,8 @@ void mei_stop(struct mei_device *dev)
2032
2033 dev->dev_state = MEI_DEV_POWER_DOWN;
2034 mei_reset(dev);
2035 + /* move device to disabled state unconditionally */
2036 + dev->dev_state = MEI_DEV_DISABLED;
2037
2038 mutex_unlock(&dev->device_lock);
2039
2040 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
2041 index da1a2500c91c..bb27028d392b 100644
2042 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
2043 +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
2044 @@ -872,13 +872,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
2045 }
2046
2047 /* Unlocked version of the reclaim routine */
2048 -static void __bcmgenet_tx_reclaim(struct net_device *dev,
2049 - struct bcmgenet_tx_ring *ring)
2050 +static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
2051 + struct bcmgenet_tx_ring *ring)
2052 {
2053 struct bcmgenet_priv *priv = netdev_priv(dev);
2054 int last_tx_cn, last_c_index, num_tx_bds;
2055 struct enet_cb *tx_cb_ptr;
2056 struct netdev_queue *txq;
2057 + unsigned int pkts_compl = 0;
2058 unsigned int bds_compl;
2059 unsigned int c_index;
2060
2061 @@ -906,6 +907,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
2062 tx_cb_ptr = ring->cbs + last_c_index;
2063 bds_compl = 0;
2064 if (tx_cb_ptr->skb) {
2065 + pkts_compl++;
2066 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
2067 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
2068 dma_unmap_single(&dev->dev,
2069 @@ -929,23 +931,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
2070 last_c_index &= (num_tx_bds - 1);
2071 }
2072
2073 - if (ring->free_bds > (MAX_SKB_FRAGS + 1))
2074 - ring->int_disable(priv, ring);
2075 -
2076 - if (netif_tx_queue_stopped(txq))
2077 - netif_tx_wake_queue(txq);
2078 + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
2079 + if (netif_tx_queue_stopped(txq))
2080 + netif_tx_wake_queue(txq);
2081 + }
2082
2083 ring->c_index = c_index;
2084 +
2085 + return pkts_compl;
2086 }
2087
2088 -static void bcmgenet_tx_reclaim(struct net_device *dev,
2089 +static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
2090 struct bcmgenet_tx_ring *ring)
2091 {
2092 + unsigned int released;
2093 unsigned long flags;
2094
2095 spin_lock_irqsave(&ring->lock, flags);
2096 - __bcmgenet_tx_reclaim(dev, ring);
2097 + released = __bcmgenet_tx_reclaim(dev, ring);
2098 spin_unlock_irqrestore(&ring->lock, flags);
2099 +
2100 + return released;
2101 +}
2102 +
2103 +static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
2104 +{
2105 + struct bcmgenet_tx_ring *ring =
2106 + container_of(napi, struct bcmgenet_tx_ring, napi);
2107 + unsigned int work_done = 0;
2108 +
2109 + work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
2110 +
2111 + if (work_done == 0) {
2112 + napi_complete(napi);
2113 + ring->int_enable(ring->priv, ring);
2114 +
2115 + return 0;
2116 + }
2117 +
2118 + return budget;
2119 }
2120
2121 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
2122 @@ -1201,10 +1225,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
2123 bcmgenet_tdma_ring_writel(priv, ring->index,
2124 ring->prod_index, TDMA_PROD_INDEX);
2125
2126 - if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
2127 + if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
2128 netif_tx_stop_queue(txq);
2129 - ring->int_enable(priv, ring);
2130 - }
2131
2132 out:
2133 spin_unlock_irqrestore(&ring->lock, flags);
2134 @@ -1517,6 +1539,7 @@ static int init_umac(struct bcmgenet_priv *priv)
2135 struct device *kdev = &priv->pdev->dev;
2136 int ret;
2137 u32 reg, cpu_mask_clear;
2138 + int index;
2139
2140 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
2141
2142 @@ -1543,7 +1566,7 @@ static int init_umac(struct bcmgenet_priv *priv)
2143
2144 bcmgenet_intr_disable(priv);
2145
2146 - cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
2147 + cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
2148
2149 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
2150
2151 @@ -1570,6 +1593,10 @@ static int init_umac(struct bcmgenet_priv *priv)
2152
2153 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
2154
2155 + for (index = 0; index < priv->hw_params->tx_queues; index++)
2156 + bcmgenet_intrl2_1_writel(priv, (1 << index),
2157 + INTRL2_CPU_MASK_CLEAR);
2158 +
2159 /* Enable rx/tx engine.*/
2160 dev_dbg(kdev, "done init umac\n");
2161
2162 @@ -1589,6 +1616,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
2163 unsigned int first_bd;
2164
2165 spin_lock_init(&ring->lock);
2166 + ring->priv = priv;
2167 + netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
2168 ring->index = index;
2169 if (index == DESC_INDEX) {
2170 ring->queue = 0;
2171 @@ -1634,6 +1663,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
2172 TDMA_WRITE_PTR);
2173 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2174 DMA_END_ADDR);
2175 +
2176 + napi_enable(&ring->napi);
2177 +}
2178 +
2179 +static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
2180 + unsigned int index)
2181 +{
2182 + struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
2183 +
2184 + napi_disable(&ring->napi);
2185 + netif_napi_del(&ring->napi);
2186 }
2187
2188 /* Initialize a RDMA ring */
2189 @@ -1803,7 +1843,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2190 return ret;
2191 }
2192
2193 -static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2194 +static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2195 {
2196 int i;
2197
2198 @@ -1822,6 +1862,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2199 kfree(priv->tx_cbs);
2200 }
2201
2202 +static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2203 +{
2204 + int i;
2205 +
2206 + bcmgenet_fini_tx_ring(priv, DESC_INDEX);
2207 +
2208 + for (i = 0; i < priv->hw_params->tx_queues; i++)
2209 + bcmgenet_fini_tx_ring(priv, i);
2210 +
2211 + __bcmgenet_fini_dma(priv);
2212 +}
2213 +
2214 /* init_edma: Initialize DMA control register */
2215 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2216 {
2217 @@ -1848,7 +1900,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2218 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
2219 GFP_KERNEL);
2220 if (!priv->tx_cbs) {
2221 - bcmgenet_fini_dma(priv);
2222 + __bcmgenet_fini_dma(priv);
2223 return -ENOMEM;
2224 }
2225
2226 @@ -1871,9 +1923,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
2227 struct bcmgenet_priv, napi);
2228 unsigned int work_done;
2229
2230 - /* tx reclaim */
2231 - bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
2232 -
2233 work_done = bcmgenet_desc_rx(priv, budget);
2234
2235 /* Advancing our consumer index*/
2236 @@ -1918,28 +1967,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
2237 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2238 {
2239 struct bcmgenet_priv *priv = dev_id;
2240 + struct bcmgenet_tx_ring *ring;
2241 unsigned int index;
2242
2243 /* Save irq status for bottom-half processing. */
2244 priv->irq1_stat =
2245 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2246 - ~priv->int1_mask;
2247 + ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2248 /* clear interrupts */
2249 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2250
2251 netif_dbg(priv, intr, priv->dev,
2252 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2253 +
2254 /* Check the MBDONE interrupts.
2255 * packet is done, reclaim descriptors
2256 */
2257 - if (priv->irq1_stat & 0x0000ffff) {
2258 - index = 0;
2259 - for (index = 0; index < 16; index++) {
2260 - if (priv->irq1_stat & (1 << index))
2261 - bcmgenet_tx_reclaim(priv->dev,
2262 - &priv->tx_rings[index]);
2263 + for (index = 0; index < priv->hw_params->tx_queues; index++) {
2264 + if (!(priv->irq1_stat & BIT(index)))
2265 + continue;
2266 +
2267 + ring = &priv->tx_rings[index];
2268 +
2269 + if (likely(napi_schedule_prep(&ring->napi))) {
2270 + ring->int_disable(priv, ring);
2271 + __napi_schedule(&ring->napi);
2272 }
2273 }
2274 +
2275 return IRQ_HANDLED;
2276 }
2277
2278 @@ -1971,8 +2026,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2279 }
2280 if (priv->irq0_stat &
2281 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
2282 - /* Tx reclaim */
2283 - bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
2284 + struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
2285 +
2286 + if (likely(napi_schedule_prep(&ring->napi))) {
2287 + ring->int_disable(priv, ring);
2288 + __napi_schedule(&ring->napi);
2289 + }
2290 }
2291 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2292 UMAC_IRQ_PHY_DET_F |
2293 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
2294 index 31b2da5f9b82..eeda0281c684 100644
2295 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
2296 +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
2297 @@ -495,6 +495,7 @@ struct bcmgenet_hw_params {
2298
2299 struct bcmgenet_tx_ring {
2300 spinlock_t lock; /* ring lock */
2301 + struct napi_struct napi; /* NAPI per tx queue */
2302 unsigned int index; /* ring index */
2303 unsigned int queue; /* queue index */
2304 struct enet_cb *cbs; /* tx ring buffer control block*/
2305 @@ -509,6 +510,7 @@ struct bcmgenet_tx_ring {
2306 struct bcmgenet_tx_ring *);
2307 void (*int_disable)(struct bcmgenet_priv *priv,
2308 struct bcmgenet_tx_ring *);
2309 + struct bcmgenet_priv *priv;
2310 };
2311
2312 /* device context */
2313 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
2314 index cf154f74cba1..54390b3e0344 100644
2315 --- a/drivers/net/ethernet/realtek/r8169.c
2316 +++ b/drivers/net/ethernet/realtek/r8169.c
2317 @@ -5062,8 +5062,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
2318 RTL_W8(ChipCmd, CmdReset);
2319
2320 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
2321 -
2322 - netdev_reset_queue(tp->dev);
2323 }
2324
2325 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
2326 @@ -7073,8 +7071,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
2327
2328 txd->opts2 = cpu_to_le32(opts[1]);
2329
2330 - netdev_sent_queue(dev, skb->len);
2331 -
2332 skb_tx_timestamp(skb);
2333
2334 wmb();
2335 @@ -7174,7 +7170,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
2336 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
2337 {
2338 unsigned int dirty_tx, tx_left;
2339 - unsigned int bytes_compl = 0, pkts_compl = 0;
2340
2341 dirty_tx = tp->dirty_tx;
2342 smp_rmb();
2343 @@ -7193,8 +7188,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
2344 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
2345 tp->TxDescArray + entry);
2346 if (status & LastFrag) {
2347 - pkts_compl++;
2348 - bytes_compl += tx_skb->skb->len;
2349 + u64_stats_update_begin(&tp->tx_stats.syncp);
2350 + tp->tx_stats.packets++;
2351 + tp->tx_stats.bytes += tx_skb->skb->len;
2352 + u64_stats_update_end(&tp->tx_stats.syncp);
2353 dev_kfree_skb_any(tx_skb->skb);
2354 tx_skb->skb = NULL;
2355 }
2356 @@ -7203,13 +7200,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
2357 }
2358
2359 if (tp->dirty_tx != dirty_tx) {
2360 - netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
2361 -
2362 - u64_stats_update_begin(&tp->tx_stats.syncp);
2363 - tp->tx_stats.packets += pkts_compl;
2364 - tp->tx_stats.bytes += bytes_compl;
2365 - u64_stats_update_end(&tp->tx_stats.syncp);
2366 -
2367 tp->dirty_tx = dirty_tx;
2368 /* Sync with rtl8169_start_xmit:
2369 * - publish dirty_tx ring index (write barrier)
2370 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
2371 index 880cc090dc44..91d0c6a86e37 100644
2372 --- a/drivers/net/macvtap.c
2373 +++ b/drivers/net/macvtap.c
2374 @@ -637,12 +637,15 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
2375 } /* else everything is zero */
2376 }
2377
2378 +/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
2379 +#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
2380 +
2381 /* Get packet from user space buffer */
2382 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
2383 const struct iovec *iv, unsigned long total_len,
2384 size_t count, int noblock)
2385 {
2386 - int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
2387 + int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
2388 struct sk_buff *skb;
2389 struct macvlan_dev *vlan;
2390 unsigned long len = total_len;
2391 @@ -701,7 +704,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
2392 linear = vnet_hdr.hdr_len;
2393 }
2394
2395 - skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
2396 + skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
2397 linear, noblock, &err);
2398 if (!skb)
2399 goto err;
2400 diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
2401 index 767cd110f496..dc1f6f07326a 100644
2402 --- a/drivers/net/phy/phy.c
2403 +++ b/drivers/net/phy/phy.c
2404 @@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
2405 }
2406
2407 /**
2408 + * phy_check_valid - check if there is a valid PHY setting which matches
2409 + * speed, duplex, and feature mask
2410 + * @speed: speed to match
2411 + * @duplex: duplex to match
2412 + * @features: A mask of the valid settings
2413 + *
2414 + * Description: Returns true if there is a valid setting, false otherwise.
2415 + */
2416 +static inline bool phy_check_valid(int speed, int duplex, u32 features)
2417 +{
2418 + unsigned int idx;
2419 +
2420 + idx = phy_find_valid(phy_find_setting(speed, duplex), features);
2421 +
2422 + return settings[idx].speed == speed && settings[idx].duplex == duplex &&
2423 + (settings[idx].setting & features);
2424 +}
2425 +
2426 +/**
2427 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
2428 * @phydev: the target phy_device struct
2429 *
2430 @@ -1042,7 +1061,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
2431 int eee_lp, eee_cap, eee_adv;
2432 u32 lp, cap, adv;
2433 int status;
2434 - unsigned int idx;
2435
2436 /* Read phy status to properly get the right settings */
2437 status = phy_read_status(phydev);
2438 @@ -1074,8 +1092,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
2439
2440 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
2441 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
2442 - idx = phy_find_setting(phydev->speed, phydev->duplex);
2443 - if (!(lp & adv & settings[idx].setting))
2444 + if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
2445 goto eee_exit_err;
2446
2447 if (clk_stop_enable) {
2448 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2449 index 9c505c4dbe04..ebc95a3771a4 100644
2450 --- a/drivers/net/team/team.c
2451 +++ b/drivers/net/team/team.c
2452 @@ -42,9 +42,7 @@
2453
2454 static struct team_port *team_port_get_rcu(const struct net_device *dev)
2455 {
2456 - struct team_port *port = rcu_dereference(dev->rx_handler_data);
2457 -
2458 - return team_port_exists(dev) ? port : NULL;
2459 + return rcu_dereference(dev->rx_handler_data);
2460 }
2461
2462 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
2463 @@ -1735,11 +1733,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
2464 if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
2465 return -EADDRNOTAVAIL;
2466 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2467 - rcu_read_lock();
2468 - list_for_each_entry_rcu(port, &team->port_list, list)
2469 + mutex_lock(&team->lock);
2470 + list_for_each_entry(port, &team->port_list, list)
2471 if (team->ops.port_change_dev_addr)
2472 team->ops.port_change_dev_addr(team, port);
2473 - rcu_read_unlock();
2474 + mutex_unlock(&team->lock);
2475 return 0;
2476 }
2477
2478 diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
2479 index 3d18bb0eee85..1bfe0fcaccf5 100644
2480 --- a/drivers/net/usb/plusb.c
2481 +++ b/drivers/net/usb/plusb.c
2482 @@ -134,6 +134,11 @@ static const struct usb_device_id products [] = {
2483 }, {
2484 USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
2485 .driver_info = (unsigned long) &prolific_info,
2486 +}, {
2487 + USB_DEVICE(0x3923, 0x7825), /* National Instruments USB
2488 + * Host-to-Host Cable
2489 + */
2490 + .driver_info = (unsigned long) &prolific_info,
2491 },
2492
2493 { }, // END
2494 diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
2495 index a3399c4f13a9..b9b651ea9851 100644
2496 --- a/drivers/net/wireless/ath/ath5k/reset.c
2497 +++ b/drivers/net/wireless/ath/ath5k/reset.c
2498 @@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
2499 regval = ioread32(reg);
2500 iowrite32(regval | val, reg);
2501 regval = ioread32(reg);
2502 - usleep_range(100, 150);
2503 + udelay(100); /* NB: should be atomic */
2504
2505 /* Bring BB/MAC out of reset */
2506 iowrite32(regval & ~val, reg);
2507 diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
2508 index 8882b467be95..ecc5fa5640d2 100644
2509 --- a/drivers/of/of_pci.c
2510 +++ b/drivers/of/of_pci.c
2511 @@ -140,6 +140,7 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
2512 unsigned char busno, unsigned char bus_max,
2513 struct list_head *resources, resource_size_t *io_base)
2514 {
2515 + struct pci_host_bridge_window *window;
2516 struct resource *res;
2517 struct resource *bus_range;
2518 struct of_pci_range range;
2519 @@ -225,7 +226,10 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
2520 conversion_failed:
2521 kfree(res);
2522 parse_failed:
2523 + list_for_each_entry(window, resources, list)
2524 + kfree(window->res);
2525 pci_free_resource_list(resources);
2526 + kfree(bus_range);
2527 return err;
2528 }
2529 EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
2530 diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
2531 index f2446769247f..6f806f93662a 100644
2532 --- a/drivers/pinctrl/freescale/pinctrl-imx.c
2533 +++ b/drivers/pinctrl/freescale/pinctrl-imx.c
2534 @@ -365,7 +365,7 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
2535 const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
2536 unsigned long config;
2537
2538 - if (!pin_reg || !pin_reg->conf_reg) {
2539 + if (!pin_reg || pin_reg->conf_reg == -1) {
2540 seq_printf(s, "N/A");
2541 return;
2542 }
2543 diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
2544 index 550e6d77ac2b..b5fabf684632 100644
2545 --- a/drivers/pinctrl/freescale/pinctrl-imx25.c
2546 +++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
2547 @@ -27,150 +27,148 @@
2548
2549 enum imx25_pads {
2550 MX25_PAD_RESERVE0 = 1,
2551 - MX25_PAD_RESERVE1 = 2,
2552 - MX25_PAD_A10 = 3,
2553 - MX25_PAD_A13 = 4,
2554 - MX25_PAD_A14 = 5,
2555 - MX25_PAD_A15 = 6,
2556 - MX25_PAD_A16 = 7,
2557 - MX25_PAD_A17 = 8,
2558 - MX25_PAD_A18 = 9,
2559 - MX25_PAD_A19 = 10,
2560 - MX25_PAD_A20 = 11,
2561 - MX25_PAD_A21 = 12,
2562 - MX25_PAD_A22 = 13,
2563 - MX25_PAD_A23 = 14,
2564 - MX25_PAD_A24 = 15,
2565 - MX25_PAD_A25 = 16,
2566 - MX25_PAD_EB0 = 17,
2567 - MX25_PAD_EB1 = 18,
2568 - MX25_PAD_OE = 19,
2569 - MX25_PAD_CS0 = 20,
2570 - MX25_PAD_CS1 = 21,
2571 - MX25_PAD_CS4 = 22,
2572 - MX25_PAD_CS5 = 23,
2573 - MX25_PAD_NF_CE0 = 24,
2574 - MX25_PAD_ECB = 25,
2575 - MX25_PAD_LBA = 26,
2576 - MX25_PAD_BCLK = 27,
2577 - MX25_PAD_RW = 28,
2578 - MX25_PAD_NFWE_B = 29,
2579 - MX25_PAD_NFRE_B = 30,
2580 - MX25_PAD_NFALE = 31,
2581 - MX25_PAD_NFCLE = 32,
2582 - MX25_PAD_NFWP_B = 33,
2583 - MX25_PAD_NFRB = 34,
2584 - MX25_PAD_D15 = 35,
2585 - MX25_PAD_D14 = 36,
2586 - MX25_PAD_D13 = 37,
2587 - MX25_PAD_D12 = 38,
2588 - MX25_PAD_D11 = 39,
2589 - MX25_PAD_D10 = 40,
2590 - MX25_PAD_D9 = 41,
2591 - MX25_PAD_D8 = 42,
2592 - MX25_PAD_D7 = 43,
2593 - MX25_PAD_D6 = 44,
2594 - MX25_PAD_D5 = 45,
2595 - MX25_PAD_D4 = 46,
2596 - MX25_PAD_D3 = 47,
2597 - MX25_PAD_D2 = 48,
2598 - MX25_PAD_D1 = 49,
2599 - MX25_PAD_D0 = 50,
2600 - MX25_PAD_LD0 = 51,
2601 - MX25_PAD_LD1 = 52,
2602 - MX25_PAD_LD2 = 53,
2603 - MX25_PAD_LD3 = 54,
2604 - MX25_PAD_LD4 = 55,
2605 - MX25_PAD_LD5 = 56,
2606 - MX25_PAD_LD6 = 57,
2607 - MX25_PAD_LD7 = 58,
2608 - MX25_PAD_LD8 = 59,
2609 - MX25_PAD_LD9 = 60,
2610 - MX25_PAD_LD10 = 61,
2611 - MX25_PAD_LD11 = 62,
2612 - MX25_PAD_LD12 = 63,
2613 - MX25_PAD_LD13 = 64,
2614 - MX25_PAD_LD14 = 65,
2615 - MX25_PAD_LD15 = 66,
2616 - MX25_PAD_HSYNC = 67,
2617 - MX25_PAD_VSYNC = 68,
2618 - MX25_PAD_LSCLK = 69,
2619 - MX25_PAD_OE_ACD = 70,
2620 - MX25_PAD_CONTRAST = 71,
2621 - MX25_PAD_PWM = 72,
2622 - MX25_PAD_CSI_D2 = 73,
2623 - MX25_PAD_CSI_D3 = 74,
2624 - MX25_PAD_CSI_D4 = 75,
2625 - MX25_PAD_CSI_D5 = 76,
2626 - MX25_PAD_CSI_D6 = 77,
2627 - MX25_PAD_CSI_D7 = 78,
2628 - MX25_PAD_CSI_D8 = 79,
2629 - MX25_PAD_CSI_D9 = 80,
2630 - MX25_PAD_CSI_MCLK = 81,
2631 - MX25_PAD_CSI_VSYNC = 82,
2632 - MX25_PAD_CSI_HSYNC = 83,
2633 - MX25_PAD_CSI_PIXCLK = 84,
2634 - MX25_PAD_I2C1_CLK = 85,
2635 - MX25_PAD_I2C1_DAT = 86,
2636 - MX25_PAD_CSPI1_MOSI = 87,
2637 - MX25_PAD_CSPI1_MISO = 88,
2638 - MX25_PAD_CSPI1_SS0 = 89,
2639 - MX25_PAD_CSPI1_SS1 = 90,
2640 - MX25_PAD_CSPI1_SCLK = 91,
2641 - MX25_PAD_CSPI1_RDY = 92,
2642 - MX25_PAD_UART1_RXD = 93,
2643 - MX25_PAD_UART1_TXD = 94,
2644 - MX25_PAD_UART1_RTS = 95,
2645 - MX25_PAD_UART1_CTS = 96,
2646 - MX25_PAD_UART2_RXD = 97,
2647 - MX25_PAD_UART2_TXD = 98,
2648 - MX25_PAD_UART2_RTS = 99,
2649 - MX25_PAD_UART2_CTS = 100,
2650 - MX25_PAD_SD1_CMD = 101,
2651 - MX25_PAD_SD1_CLK = 102,
2652 - MX25_PAD_SD1_DATA0 = 103,
2653 - MX25_PAD_SD1_DATA1 = 104,
2654 - MX25_PAD_SD1_DATA2 = 105,
2655 - MX25_PAD_SD1_DATA3 = 106,
2656 - MX25_PAD_KPP_ROW0 = 107,
2657 - MX25_PAD_KPP_ROW1 = 108,
2658 - MX25_PAD_KPP_ROW2 = 109,
2659 - MX25_PAD_KPP_ROW3 = 110,
2660 - MX25_PAD_KPP_COL0 = 111,
2661 - MX25_PAD_KPP_COL1 = 112,
2662 - MX25_PAD_KPP_COL2 = 113,
2663 - MX25_PAD_KPP_COL3 = 114,
2664 - MX25_PAD_FEC_MDC = 115,
2665 - MX25_PAD_FEC_MDIO = 116,
2666 - MX25_PAD_FEC_TDATA0 = 117,
2667 - MX25_PAD_FEC_TDATA1 = 118,
2668 - MX25_PAD_FEC_TX_EN = 119,
2669 - MX25_PAD_FEC_RDATA0 = 120,
2670 - MX25_PAD_FEC_RDATA1 = 121,
2671 - MX25_PAD_FEC_RX_DV = 122,
2672 - MX25_PAD_FEC_TX_CLK = 123,
2673 - MX25_PAD_RTCK = 124,
2674 - MX25_PAD_DE_B = 125,
2675 - MX25_PAD_GPIO_A = 126,
2676 - MX25_PAD_GPIO_B = 127,
2677 - MX25_PAD_GPIO_C = 128,
2678 - MX25_PAD_GPIO_D = 129,
2679 - MX25_PAD_GPIO_E = 130,
2680 - MX25_PAD_GPIO_F = 131,
2681 - MX25_PAD_EXT_ARMCLK = 132,
2682 - MX25_PAD_UPLL_BYPCLK = 133,
2683 - MX25_PAD_VSTBY_REQ = 134,
2684 - MX25_PAD_VSTBY_ACK = 135,
2685 - MX25_PAD_POWER_FAIL = 136,
2686 - MX25_PAD_CLKO = 137,
2687 - MX25_PAD_BOOT_MODE0 = 138,
2688 - MX25_PAD_BOOT_MODE1 = 139,
2689 + MX25_PAD_A10 = 2,
2690 + MX25_PAD_A13 = 3,
2691 + MX25_PAD_A14 = 4,
2692 + MX25_PAD_A15 = 5,
2693 + MX25_PAD_A16 = 6,
2694 + MX25_PAD_A17 = 7,
2695 + MX25_PAD_A18 = 8,
2696 + MX25_PAD_A19 = 9,
2697 + MX25_PAD_A20 = 10,
2698 + MX25_PAD_A21 = 11,
2699 + MX25_PAD_A22 = 12,
2700 + MX25_PAD_A23 = 13,
2701 + MX25_PAD_A24 = 14,
2702 + MX25_PAD_A25 = 15,
2703 + MX25_PAD_EB0 = 16,
2704 + MX25_PAD_EB1 = 17,
2705 + MX25_PAD_OE = 18,
2706 + MX25_PAD_CS0 = 19,
2707 + MX25_PAD_CS1 = 20,
2708 + MX25_PAD_CS4 = 21,
2709 + MX25_PAD_CS5 = 22,
2710 + MX25_PAD_NF_CE0 = 23,
2711 + MX25_PAD_ECB = 24,
2712 + MX25_PAD_LBA = 25,
2713 + MX25_PAD_BCLK = 26,
2714 + MX25_PAD_RW = 27,
2715 + MX25_PAD_NFWE_B = 28,
2716 + MX25_PAD_NFRE_B = 29,
2717 + MX25_PAD_NFALE = 30,
2718 + MX25_PAD_NFCLE = 31,
2719 + MX25_PAD_NFWP_B = 32,
2720 + MX25_PAD_NFRB = 33,
2721 + MX25_PAD_D15 = 34,
2722 + MX25_PAD_D14 = 35,
2723 + MX25_PAD_D13 = 36,
2724 + MX25_PAD_D12 = 37,
2725 + MX25_PAD_D11 = 38,
2726 + MX25_PAD_D10 = 39,
2727 + MX25_PAD_D9 = 40,
2728 + MX25_PAD_D8 = 41,
2729 + MX25_PAD_D7 = 42,
2730 + MX25_PAD_D6 = 43,
2731 + MX25_PAD_D5 = 44,
2732 + MX25_PAD_D4 = 45,
2733 + MX25_PAD_D3 = 46,
2734 + MX25_PAD_D2 = 47,
2735 + MX25_PAD_D1 = 48,
2736 + MX25_PAD_D0 = 49,
2737 + MX25_PAD_LD0 = 50,
2738 + MX25_PAD_LD1 = 51,
2739 + MX25_PAD_LD2 = 52,
2740 + MX25_PAD_LD3 = 53,
2741 + MX25_PAD_LD4 = 54,
2742 + MX25_PAD_LD5 = 55,
2743 + MX25_PAD_LD6 = 56,
2744 + MX25_PAD_LD7 = 57,
2745 + MX25_PAD_LD8 = 58,
2746 + MX25_PAD_LD9 = 59,
2747 + MX25_PAD_LD10 = 60,
2748 + MX25_PAD_LD11 = 61,
2749 + MX25_PAD_LD12 = 62,
2750 + MX25_PAD_LD13 = 63,
2751 + MX25_PAD_LD14 = 64,
2752 + MX25_PAD_LD15 = 65,
2753 + MX25_PAD_HSYNC = 66,
2754 + MX25_PAD_VSYNC = 67,
2755 + MX25_PAD_LSCLK = 68,
2756 + MX25_PAD_OE_ACD = 69,
2757 + MX25_PAD_CONTRAST = 70,
2758 + MX25_PAD_PWM = 71,
2759 + MX25_PAD_CSI_D2 = 72,
2760 + MX25_PAD_CSI_D3 = 73,
2761 + MX25_PAD_CSI_D4 = 74,
2762 + MX25_PAD_CSI_D5 = 75,
2763 + MX25_PAD_CSI_D6 = 76,
2764 + MX25_PAD_CSI_D7 = 77,
2765 + MX25_PAD_CSI_D8 = 78,
2766 + MX25_PAD_CSI_D9 = 79,
2767 + MX25_PAD_CSI_MCLK = 80,
2768 + MX25_PAD_CSI_VSYNC = 81,
2769 + MX25_PAD_CSI_HSYNC = 82,
2770 + MX25_PAD_CSI_PIXCLK = 83,
2771 + MX25_PAD_I2C1_CLK = 84,
2772 + MX25_PAD_I2C1_DAT = 85,
2773 + MX25_PAD_CSPI1_MOSI = 86,
2774 + MX25_PAD_CSPI1_MISO = 87,
2775 + MX25_PAD_CSPI1_SS0 = 88,
2776 + MX25_PAD_CSPI1_SS1 = 89,
2777 + MX25_PAD_CSPI1_SCLK = 90,
2778 + MX25_PAD_CSPI1_RDY = 91,
2779 + MX25_PAD_UART1_RXD = 92,
2780 + MX25_PAD_UART1_TXD = 93,
2781 + MX25_PAD_UART1_RTS = 94,
2782 + MX25_PAD_UART1_CTS = 95,
2783 + MX25_PAD_UART2_RXD = 96,
2784 + MX25_PAD_UART2_TXD = 97,
2785 + MX25_PAD_UART2_RTS = 98,
2786 + MX25_PAD_UART2_CTS = 99,
2787 + MX25_PAD_SD1_CMD = 100,
2788 + MX25_PAD_SD1_CLK = 101,
2789 + MX25_PAD_SD1_DATA0 = 102,
2790 + MX25_PAD_SD1_DATA1 = 103,
2791 + MX25_PAD_SD1_DATA2 = 104,
2792 + MX25_PAD_SD1_DATA3 = 105,
2793 + MX25_PAD_KPP_ROW0 = 106,
2794 + MX25_PAD_KPP_ROW1 = 107,
2795 + MX25_PAD_KPP_ROW2 = 108,
2796 + MX25_PAD_KPP_ROW3 = 109,
2797 + MX25_PAD_KPP_COL0 = 110,
2798 + MX25_PAD_KPP_COL1 = 111,
2799 + MX25_PAD_KPP_COL2 = 112,
2800 + MX25_PAD_KPP_COL3 = 113,
2801 + MX25_PAD_FEC_MDC = 114,
2802 + MX25_PAD_FEC_MDIO = 115,
2803 + MX25_PAD_FEC_TDATA0 = 116,
2804 + MX25_PAD_FEC_TDATA1 = 117,
2805 + MX25_PAD_FEC_TX_EN = 118,
2806 + MX25_PAD_FEC_RDATA0 = 119,
2807 + MX25_PAD_FEC_RDATA1 = 120,
2808 + MX25_PAD_FEC_RX_DV = 121,
2809 + MX25_PAD_FEC_TX_CLK = 122,
2810 + MX25_PAD_RTCK = 123,
2811 + MX25_PAD_DE_B = 124,
2812 + MX25_PAD_GPIO_A = 125,
2813 + MX25_PAD_GPIO_B = 126,
2814 + MX25_PAD_GPIO_C = 127,
2815 + MX25_PAD_GPIO_D = 128,
2816 + MX25_PAD_GPIO_E = 129,
2817 + MX25_PAD_GPIO_F = 130,
2818 + MX25_PAD_EXT_ARMCLK = 131,
2819 + MX25_PAD_UPLL_BYPCLK = 132,
2820 + MX25_PAD_VSTBY_REQ = 133,
2821 + MX25_PAD_VSTBY_ACK = 134,
2822 + MX25_PAD_POWER_FAIL = 135,
2823 + MX25_PAD_CLKO = 136,
2824 + MX25_PAD_BOOT_MODE0 = 137,
2825 + MX25_PAD_BOOT_MODE1 = 138,
2826 };
2827
2828 /* Pad names for the pinmux subsystem */
2829 static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = {
2830 IMX_PINCTRL_PIN(MX25_PAD_RESERVE0),
2831 - IMX_PINCTRL_PIN(MX25_PAD_RESERVE1),
2832 IMX_PINCTRL_PIN(MX25_PAD_A10),
2833 IMX_PINCTRL_PIN(MX25_PAD_A13),
2834 IMX_PINCTRL_PIN(MX25_PAD_A14),
2835 diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
2836 index 30d74a06b993..15a8998bd161 100644
2837 --- a/drivers/scsi/be2iscsi/be_main.c
2838 +++ b/drivers/scsi/be2iscsi/be_main.c
2839 @@ -586,7 +586,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
2840 "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
2841 return NULL;
2842 }
2843 - shost->dma_boundary = pcidev->dma_mask;
2844 shost->max_id = BE2_MAX_SESSIONS;
2845 shost->max_channel = 0;
2846 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
2847 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
2848 index 843594c2583d..07b2ea1fbf0d 100644
2849 --- a/drivers/scsi/sg.c
2850 +++ b/drivers/scsi/sg.c
2851 @@ -546,7 +546,7 @@ static ssize_t
2852 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
2853 {
2854 sg_io_hdr_t *hp = &srp->header;
2855 - int err = 0;
2856 + int err = 0, err2;
2857 int len;
2858
2859 if (count < SZ_SG_IO_HDR) {
2860 @@ -575,8 +575,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
2861 goto err_out;
2862 }
2863 err_out:
2864 - err = sg_finish_rem_req(srp);
2865 - return (0 == err) ? count : err;
2866 + err2 = sg_finish_rem_req(srp);
2867 + return err ? : err2 ? : count;
2868 }
2869
2870 static ssize_t
2871 diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
2872 index 9b6f96f1591c..25e0b40881ca 100644
2873 --- a/drivers/staging/comedi/comedi_compat32.c
2874 +++ b/drivers/staging/comedi/comedi_compat32.c
2875 @@ -262,7 +262,7 @@ static int compat_cmd(struct file *file, unsigned long arg)
2876 {
2877 struct comedi_cmd __user *cmd;
2878 struct comedi32_cmd_struct __user *cmd32;
2879 - int rc;
2880 + int rc, err;
2881
2882 cmd32 = compat_ptr(arg);
2883 cmd = compat_alloc_user_space(sizeof(*cmd));
2884 @@ -271,7 +271,15 @@ static int compat_cmd(struct file *file, unsigned long arg)
2885 if (rc)
2886 return rc;
2887
2888 - return translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
2889 + rc = translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
2890 + if (rc == -EAGAIN) {
2891 + /* Special case: copy cmd back to user. */
2892 + err = put_compat_cmd(cmd32, cmd);
2893 + if (err)
2894 + rc = err;
2895 + }
2896 +
2897 + return rc;
2898 }
2899
2900 /* Handle 32-bit COMEDI_CMDTEST ioctl. */
2901 diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
2902 index 3b6bffc66918..1eb13b134b32 100644
2903 --- a/drivers/staging/comedi/drivers/cb_pcidas64.c
2904 +++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
2905 @@ -439,6 +439,29 @@ static const struct comedi_lrange ai_ranges_64xx = {
2906 }
2907 };
2908
2909 +static const uint8_t ai_range_code_64xx[8] = {
2910 + 0x0, 0x1, 0x2, 0x3, /* bipolar 10, 5, 2,5, 1.25 */
2911 + 0x8, 0x9, 0xa, 0xb /* unipolar 10, 5, 2.5, 1.25 */
2912 +};
2913 +
2914 +/* analog input ranges for 64-Mx boards */
2915 +static const struct comedi_lrange ai_ranges_64_mx = {
2916 + 7, {
2917 + BIP_RANGE(5),
2918 + BIP_RANGE(2.5),
2919 + BIP_RANGE(1.25),
2920 + BIP_RANGE(0.625),
2921 + UNI_RANGE(5),
2922 + UNI_RANGE(2.5),
2923 + UNI_RANGE(1.25)
2924 + }
2925 +};
2926 +
2927 +static const uint8_t ai_range_code_64_mx[7] = {
2928 + 0x0, 0x1, 0x2, 0x3, /* bipolar 5, 2.5, 1.25, 0.625 */
2929 + 0x9, 0xa, 0xb /* unipolar 5, 2.5, 1.25 */
2930 +};
2931 +
2932 /* analog input ranges for 60xx boards */
2933 static const struct comedi_lrange ai_ranges_60xx = {
2934 4, {
2935 @@ -449,6 +472,10 @@ static const struct comedi_lrange ai_ranges_60xx = {
2936 }
2937 };
2938
2939 +static const uint8_t ai_range_code_60xx[4] = {
2940 + 0x0, 0x1, 0x4, 0x7 /* bipolar 10, 5, 0.5, 0.05 */
2941 +};
2942 +
2943 /* analog input ranges for 6030, etc boards */
2944 static const struct comedi_lrange ai_ranges_6030 = {
2945 14, {
2946 @@ -469,6 +496,11 @@ static const struct comedi_lrange ai_ranges_6030 = {
2947 }
2948 };
2949
2950 +static const uint8_t ai_range_code_6030[14] = {
2951 + 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, /* bip 10, 5, 2, 1, 0.5, 0.2, 0.1 */
2952 + 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* uni 10, 5, 2, 1, 0.5, 0.2, 0.1 */
2953 +};
2954 +
2955 /* analog input ranges for 6052, etc boards */
2956 static const struct comedi_lrange ai_ranges_6052 = {
2957 15, {
2958 @@ -490,6 +522,11 @@ static const struct comedi_lrange ai_ranges_6052 = {
2959 }
2960 };
2961
2962 +static const uint8_t ai_range_code_6052[15] = {
2963 + 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, /* bipolar 10 ... 0.05 */
2964 + 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* unipolar 10 ... 0.1 */
2965 +};
2966 +
2967 /* analog input ranges for 4020 board */
2968 static const struct comedi_lrange ai_ranges_4020 = {
2969 2, {
2970 @@ -593,6 +630,7 @@ struct pcidas64_board {
2971 int ai_bits; /* analog input resolution */
2972 int ai_speed; /* fastest conversion period in ns */
2973 const struct comedi_lrange *ai_range_table;
2974 + const uint8_t *ai_range_code;
2975 int ao_nchan; /* number of analog out channels */
2976 int ao_bits; /* analog output resolution */
2977 int ao_scan_speed; /* analog output scan speed */
2978 @@ -651,6 +689,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
2979 .ao_scan_speed = 10000,
2980 .layout = LAYOUT_64XX,
2981 .ai_range_table = &ai_ranges_64xx,
2982 + .ai_range_code = ai_range_code_64xx,
2983 .ao_range_table = &ao_ranges_64xx,
2984 .ao_range_code = ao_range_code_64xx,
2985 .ai_fifo = &ai_fifo_64xx,
2986 @@ -666,6 +705,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
2987 .ao_scan_speed = 10000,
2988 .layout = LAYOUT_64XX,
2989 .ai_range_table = &ai_ranges_64xx,
2990 + .ai_range_code = ai_range_code_64xx,
2991 .ao_range_table = &ao_ranges_64xx,
2992 .ao_range_code = ao_range_code_64xx,
2993 .ai_fifo = &ai_fifo_64xx,
2994 @@ -680,7 +720,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
2995 .ao_bits = 16,
2996 .ao_scan_speed = 10000,
2997 .layout = LAYOUT_64XX,
2998 - .ai_range_table = &ai_ranges_64xx,
2999 + .ai_range_table = &ai_ranges_64_mx,
3000 + .ai_range_code = ai_range_code_64_mx,
3001 .ao_range_table = &ao_ranges_64xx,
3002 .ao_range_code = ao_range_code_64xx,
3003 .ai_fifo = &ai_fifo_64xx,
3004 @@ -695,7 +736,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
3005 .ao_bits = 16,
3006 .ao_scan_speed = 10000,
3007 .layout = LAYOUT_64XX,
3008 - .ai_range_table = &ai_ranges_64xx,
3009 + .ai_range_table = &ai_ranges_64_mx,
3010 + .ai_range_code = ai_range_code_64_mx,
3011 .ao_range_table = &ao_ranges_64xx,
3012 .ao_range_code = ao_range_code_64xx,
3013 .ai_fifo = &ai_fifo_64xx,
3014 @@ -710,7 +752,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
3015 .ao_bits = 16,
3016 .ao_scan_speed = 10000,
3017 .layout = LAYOUT_64XX,
3018 - .ai_range_table = &ai_ranges_64xx,
3019 + .ai_range_table = &ai_ranges_64_mx,
3020 + .ai_range_code = ai_range_code_64_mx,
3021 .ao_range_table = &ao_ranges_64xx,
3022 .ao_range_code = ao_range_code_64xx,
3023 .ai_fifo = &ai_fifo_64xx,
3024 @@ -725,6 +768,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3025 .ao_bits = 16,
3026 .layout = LAYOUT_60XX,
3027 .ai_range_table = &ai_ranges_60xx,
3028 + .ai_range_code = ai_range_code_60xx,
3029 .ao_range_table = &range_bipolar10,
3030 .ao_range_code = ao_range_code_60xx,
3031 .ai_fifo = &ai_fifo_60xx,
3032 @@ -740,6 +784,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3033 .ao_scan_speed = 100000,
3034 .layout = LAYOUT_60XX,
3035 .ai_range_table = &ai_ranges_60xx,
3036 + .ai_range_code = ai_range_code_60xx,
3037 .ao_range_table = &range_bipolar10,
3038 .ao_range_code = ao_range_code_60xx,
3039 .ai_fifo = &ai_fifo_60xx,
3040 @@ -754,6 +799,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3041 .ao_scan_speed = 100000,
3042 .layout = LAYOUT_60XX,
3043 .ai_range_table = &ai_ranges_60xx,
3044 + .ai_range_code = ai_range_code_60xx,
3045 .ao_range_table = &range_bipolar10,
3046 .ao_range_code = ao_range_code_60xx,
3047 .ai_fifo = &ai_fifo_60xx,
3048 @@ -769,6 +815,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3049 .ao_scan_speed = 100000,
3050 .layout = LAYOUT_60XX,
3051 .ai_range_table = &ai_ranges_60xx,
3052 + .ai_range_code = ai_range_code_60xx,
3053 .ao_range_table = &range_bipolar10,
3054 .ao_range_code = ao_range_code_60xx,
3055 .ai_fifo = &ai_fifo_60xx,
3056 @@ -784,6 +831,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3057 .ao_scan_speed = 10000,
3058 .layout = LAYOUT_60XX,
3059 .ai_range_table = &ai_ranges_6030,
3060 + .ai_range_code = ai_range_code_6030,
3061 .ao_range_table = &ao_ranges_6030,
3062 .ao_range_code = ao_range_code_6030,
3063 .ai_fifo = &ai_fifo_60xx,
3064 @@ -799,6 +847,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3065 .ao_scan_speed = 10000,
3066 .layout = LAYOUT_60XX,
3067 .ai_range_table = &ai_ranges_6030,
3068 + .ai_range_code = ai_range_code_6030,
3069 .ao_range_table = &ao_ranges_6030,
3070 .ao_range_code = ao_range_code_6030,
3071 .ai_fifo = &ai_fifo_60xx,
3072 @@ -812,6 +861,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3073 .ao_nchan = 0,
3074 .layout = LAYOUT_60XX,
3075 .ai_range_table = &ai_ranges_6030,
3076 + .ai_range_code = ai_range_code_6030,
3077 .ai_fifo = &ai_fifo_60xx,
3078 .has_8255 = 0,
3079 },
3080 @@ -823,6 +873,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3081 .ao_nchan = 0,
3082 .layout = LAYOUT_60XX,
3083 .ai_range_table = &ai_ranges_6030,
3084 + .ai_range_code = ai_range_code_6030,
3085 .ai_fifo = &ai_fifo_60xx,
3086 .has_8255 = 0,
3087 },
3088 @@ -835,6 +886,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3089 .ao_scan_speed = 0,
3090 .layout = LAYOUT_60XX,
3091 .ai_range_table = &ai_ranges_60xx,
3092 + .ai_range_code = ai_range_code_60xx,
3093 .ai_fifo = &ai_fifo_60xx,
3094 .has_8255 = 0,
3095 },
3096 @@ -848,6 +900,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3097 .ao_scan_speed = 100000,
3098 .layout = LAYOUT_60XX,
3099 .ai_range_table = &ai_ranges_60xx,
3100 + .ai_range_code = ai_range_code_60xx,
3101 .ao_range_table = &range_bipolar10,
3102 .ao_range_code = ao_range_code_60xx,
3103 .ai_fifo = &ai_fifo_60xx,
3104 @@ -863,6 +916,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3105 .ao_scan_speed = 100000,
3106 .layout = LAYOUT_60XX,
3107 .ai_range_table = &ai_ranges_60xx,
3108 + .ai_range_code = ai_range_code_60xx,
3109 .ao_range_table = &range_bipolar10,
3110 .ao_range_code = ao_range_code_60xx,
3111 .ai_fifo = &ai_fifo_60xx,
3112 @@ -878,6 +932,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3113 .ao_scan_speed = 1000,
3114 .layout = LAYOUT_60XX,
3115 .ai_range_table = &ai_ranges_6052,
3116 + .ai_range_code = ai_range_code_6052,
3117 .ao_range_table = &ao_ranges_6030,
3118 .ao_range_code = ao_range_code_6030,
3119 .ai_fifo = &ai_fifo_60xx,
3120 @@ -893,6 +948,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3121 .ao_scan_speed = 3333,
3122 .layout = LAYOUT_60XX,
3123 .ai_range_table = &ai_ranges_6052,
3124 + .ai_range_code = ai_range_code_6052,
3125 .ao_range_table = &ao_ranges_6030,
3126 .ao_range_code = ao_range_code_6030,
3127 .ai_fifo = &ai_fifo_60xx,
3128 @@ -908,6 +964,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3129 .ao_scan_speed = 1000,
3130 .layout = LAYOUT_60XX,
3131 .ai_range_table = &ai_ranges_6052,
3132 + .ai_range_code = ai_range_code_6052,
3133 .ao_range_table = &ao_ranges_6030,
3134 .ao_range_code = ao_range_code_6030,
3135 .ai_fifo = &ai_fifo_60xx,
3136 @@ -923,6 +980,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3137 .ao_scan_speed = 1000,
3138 .layout = LAYOUT_60XX,
3139 .ai_range_table = &ai_ranges_6052,
3140 + .ai_range_code = ai_range_code_6052,
3141 .ao_range_table = &ao_ranges_6030,
3142 .ao_range_code = ao_range_code_6030,
3143 .ai_fifo = &ai_fifo_60xx,
3144 @@ -957,6 +1015,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
3145 .ao_scan_speed = 10000,
3146 .layout = LAYOUT_64XX,
3147 .ai_range_table = &ai_ranges_64xx,
3148 + .ai_range_code = ai_range_code_64xx,
3149 .ai_fifo = ai_fifo_64xx,
3150 .has_8255 = 1,
3151 },
3152 @@ -968,7 +1027,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
3153 .ao_nchan = 0,
3154 .ao_scan_speed = 10000,
3155 .layout = LAYOUT_64XX,
3156 - .ai_range_table = &ai_ranges_64xx,
3157 + .ai_range_table = &ai_ranges_64_mx,
3158 + .ai_range_code = ai_range_code_64_mx,
3159 .ai_fifo = ai_fifo_64xx,
3160 .has_8255 = 1,
3161 },
3162 @@ -980,7 +1040,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
3163 .ao_nchan = 0,
3164 .ao_scan_speed = 10000,
3165 .layout = LAYOUT_64XX,
3166 - .ai_range_table = &ai_ranges_64xx,
3167 + .ai_range_table = &ai_ranges_64_mx,
3168 + .ai_range_code = ai_range_code_64_mx,
3169 .ai_fifo = ai_fifo_64xx,
3170 .has_8255 = 1,
3171 },
3172 @@ -992,7 +1053,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
3173 .ao_nchan = 0,
3174 .ao_scan_speed = 10000,
3175 .layout = LAYOUT_64XX,
3176 - .ai_range_table = &ai_ranges_64xx,
3177 + .ai_range_table = &ai_ranges_64_mx,
3178 + .ai_range_code = ai_range_code_64_mx,
3179 .ai_fifo = ai_fifo_64xx,
3180 .has_8255 = 1,
3181 },
3182 @@ -1004,7 +1066,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
3183 .ao_nchan = 2,
3184 .ao_scan_speed = 10000,
3185 .layout = LAYOUT_64XX,
3186 - .ai_range_table = &ai_ranges_64xx,
3187 + .ai_range_table = &ai_ranges_64_mx,
3188 + .ai_range_code = ai_range_code_64_mx,
3189 .ai_fifo = ai_fifo_64xx,
3190 .has_8255 = 1,
3191 },
3192 @@ -1016,7 +1079,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
3193 .ao_nchan = 2,
3194 .ao_scan_speed = 10000,
3195 .layout = LAYOUT_64XX,
3196 - .ai_range_table = &ai_ranges_64xx,
3197 + .ai_range_table = &ai_ranges_64_mx,
3198 + .ai_range_code = ai_range_code_64_mx,
3199 .ai_fifo = ai_fifo_64xx,
3200 .has_8255 = 1,
3201 },
3202 @@ -1028,7 +1092,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
3203 .ao_nchan = 2,
3204 .ao_scan_speed = 10000,
3205 .layout = LAYOUT_64XX,
3206 - .ai_range_table = &ai_ranges_64xx,
3207 + .ai_range_table = &ai_ranges_64_mx,
3208 + .ai_range_code = ai_range_code_64_mx,
3209 .ai_fifo = ai_fifo_64xx,
3210 .has_8255 = 1,
3211 },
3212 @@ -1122,45 +1187,8 @@ static unsigned int ai_range_bits_6xxx(const struct comedi_device *dev,
3213 unsigned int range_index)
3214 {
3215 const struct pcidas64_board *thisboard = dev->board_ptr;
3216 - const struct comedi_krange *range =
3217 - &thisboard->ai_range_table->range[range_index];
3218 - unsigned int bits = 0;
3219
3220 - switch (range->max) {
3221 - case 10000000:
3222 - bits = 0x000;
3223 - break;
3224 - case 5000000:
3225 - bits = 0x100;
3226 - break;
3227 - case 2000000:
3228 - case 2500000:
3229 - bits = 0x200;
3230 - break;
3231 - case 1000000:
3232 - case 1250000:
3233 - bits = 0x300;
3234 - break;
3235 - case 500000:
3236 - bits = 0x400;
3237 - break;
3238 - case 200000:
3239 - case 250000:
3240 - bits = 0x500;
3241 - break;
3242 - case 100000:
3243 - bits = 0x600;
3244 - break;
3245 - case 50000:
3246 - bits = 0x700;
3247 - break;
3248 - default:
3249 - dev_err(dev->class_dev, "bug! in %s\n", __func__);
3250 - break;
3251 - }
3252 - if (range->min == 0)
3253 - bits += 0x900;
3254 - return bits;
3255 + return thisboard->ai_range_code[range_index] << 8;
3256 }
3257
3258 static unsigned int hw_revision(const struct comedi_device *dev,
3259 diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
3260 index 2a29b9baec0d..ffd42071a12e 100644
3261 --- a/drivers/staging/iio/adc/mxs-lradc.c
3262 +++ b/drivers/staging/iio/adc/mxs-lradc.c
3263 @@ -214,11 +214,17 @@ struct mxs_lradc {
3264 unsigned long is_divided;
3265
3266 /*
3267 - * Touchscreen LRADC channels receives a private slot in the CTRL4
3268 - * register, the slot #7. Therefore only 7 slots instead of 8 in the
3269 - * CTRL4 register can be mapped to LRADC channels when using the
3270 - * touchscreen.
3271 - *
3272 + * When the touchscreen is enabled, we give it two private virtual
3273 + * channels: #6 and #7. This means that only 6 virtual channels (instead
3274 + * of 8) will be available for buffered capture.
3275 + */
3276 +#define TOUCHSCREEN_VCHANNEL1 7
3277 +#define TOUCHSCREEN_VCHANNEL2 6
3278 +#define BUFFER_VCHANS_LIMITED 0x3f
3279 +#define BUFFER_VCHANS_ALL 0xff
3280 + u8 buffer_vchans;
3281 +
3282 + /*
3283 * Furthermore, certain LRADC channels are shared between touchscreen
3284 * and/or touch-buttons and generic LRADC block. Therefore when using
3285 * either of these, these channels are not available for the regular
3286 @@ -342,6 +348,9 @@ struct mxs_lradc {
3287 #define LRADC_CTRL4 0x140
3288 #define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4))
3289 #define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4)
3290 +#define LRADC_CTRL4_LRADCSELECT(n, x) \
3291 + (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \
3292 + LRADC_CTRL4_LRADCSELECT_MASK(n))
3293
3294 #define LRADC_RESOLUTION 12
3295 #define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1)
3296 @@ -416,6 +425,14 @@ static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc)
3297 LRADC_STATUS_TOUCH_DETECT_RAW);
3298 }
3299
3300 +static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch,
3301 + unsigned ch)
3302 +{
3303 + mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch),
3304 + LRADC_CTRL4);
3305 + mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4);
3306 +}
3307 +
3308 static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
3309 {
3310 /*
3311 @@ -443,12 +460,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
3312 LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
3313 LRADC_DELAY(3));
3314
3315 - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
3316 - LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
3317 - LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
3318 + mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1);
3319
3320 - /* wake us again, when the complete conversion is done */
3321 - mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1);
3322 /*
3323 * after changing the touchscreen plates setting
3324 * the signals need some initial time to settle. Start the
3325 @@ -501,12 +514,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
3326 LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
3327 LRADC_DELAY(3));
3328
3329 - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
3330 - LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
3331 - LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
3332 + mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1);
3333
3334 - /* wake us again, when the conversions are done */
3335 - mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1);
3336 /*
3337 * after changing the touchscreen plates setting
3338 * the signals need some initial time to settle. Start the
3339 @@ -571,36 +580,6 @@ static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc,
3340 #define TS_CH_XM 4
3341 #define TS_CH_YM 5
3342
3343 -static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc)
3344 -{
3345 - u32 reg;
3346 - int val;
3347 -
3348 - reg = readl(lradc->base + LRADC_CTRL1);
3349 -
3350 - /* only channels 3 to 5 are of interest here */
3351 - if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) {
3352 - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) |
3353 - LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1);
3354 - val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP);
3355 - } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) {
3356 - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) |
3357 - LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1);
3358 - val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM);
3359 - } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) {
3360 - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) |
3361 - LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1);
3362 - val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM);
3363 - } else {
3364 - return -EIO;
3365 - }
3366 -
3367 - mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
3368 - mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
3369 -
3370 - return val;
3371 -}
3372 -
3373 /*
3374 * YP(open)--+-------------+
3375 * | |--+
3376 @@ -644,7 +623,8 @@ static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc)
3377 mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0);
3378
3379 lradc->cur_plate = LRADC_SAMPLE_X;
3380 - mxs_lradc_setup_ts_channel(lradc, TS_CH_YP);
3381 + mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP);
3382 + mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
3383 }
3384
3385 /*
3386 @@ -665,7 +645,8 @@ static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc)
3387 mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0);
3388
3389 lradc->cur_plate = LRADC_SAMPLE_Y;
3390 - mxs_lradc_setup_ts_channel(lradc, TS_CH_XM);
3391 + mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM);
3392 + mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
3393 }
3394
3395 /*
3396 @@ -686,7 +667,10 @@ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc)
3397 mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0);
3398
3399 lradc->cur_plate = LRADC_SAMPLE_PRESSURE;
3400 - mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
3401 + mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM);
3402 + mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP);
3403 + mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2,
3404 + TOUCHSCREEN_VCHANNEL1);
3405 }
3406
3407 static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
3408 @@ -699,6 +683,19 @@ static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
3409 mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
3410 }
3411
3412 +static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc)
3413 +{
3414 + mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
3415 + LRADC_CTRL1);
3416 + mxs_lradc_reg_set(lradc,
3417 + LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
3418 + /*
3419 + * start with the Y-pos, because it uses nearly the same plate
3420 + * settings like the touch detection
3421 + */
3422 + mxs_lradc_prepare_y_pos(lradc);
3423 +}
3424 +
3425 static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc)
3426 {
3427 input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos);
3428 @@ -716,10 +713,12 @@ static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc)
3429 * start a dummy conversion to burn time to settle the signals
3430 * note: we are not interested in the conversion's value
3431 */
3432 - mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5));
3433 - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
3434 - mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1);
3435 - mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) |
3436 + mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1));
3437 + mxs_lradc_reg_clear(lradc,
3438 + LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
3439 + LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
3440 + mxs_lradc_reg_wrt(lradc,
3441 + LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) |
3442 LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */
3443 LRADC_DELAY(2));
3444 }
3445 @@ -751,59 +750,45 @@ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid)
3446
3447 /* if it is released, wait for the next touch via IRQ */
3448 lradc->cur_plate = LRADC_TOUCH;
3449 - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1);
3450 + mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
3451 + mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
3452 + mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ |
3453 + LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
3454 + LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
3455 mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
3456 }
3457
3458 /* touchscreen's state machine */
3459 static void mxs_lradc_handle_touch(struct mxs_lradc *lradc)
3460 {
3461 - int val;
3462 -
3463 switch (lradc->cur_plate) {
3464 case LRADC_TOUCH:
3465 - /*
3466 - * start with the Y-pos, because it uses nearly the same plate
3467 - * settings like the touch detection
3468 - */
3469 - if (mxs_lradc_check_touch_event(lradc)) {
3470 - mxs_lradc_reg_clear(lradc,
3471 - LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
3472 - LRADC_CTRL1);
3473 - mxs_lradc_prepare_y_pos(lradc);
3474 - }
3475 + if (mxs_lradc_check_touch_event(lradc))
3476 + mxs_lradc_start_touch_event(lradc);
3477 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ,
3478 LRADC_CTRL1);
3479 return;
3480
3481 case LRADC_SAMPLE_Y:
3482 - val = mxs_lradc_read_ts_channel(lradc);
3483 - if (val < 0) {
3484 - mxs_lradc_enable_touch_detection(lradc); /* re-start */
3485 - return;
3486 - }
3487 - lradc->ts_y_pos = val;
3488 + lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc,
3489 + TOUCHSCREEN_VCHANNEL1);
3490 mxs_lradc_prepare_x_pos(lradc);
3491 return;
3492
3493 case LRADC_SAMPLE_X:
3494 - val = mxs_lradc_read_ts_channel(lradc);
3495 - if (val < 0) {
3496 - mxs_lradc_enable_touch_detection(lradc); /* re-start */
3497 - return;
3498 - }
3499 - lradc->ts_x_pos = val;
3500 + lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc,
3501 + TOUCHSCREEN_VCHANNEL1);
3502 mxs_lradc_prepare_pressure(lradc);
3503 return;
3504
3505 case LRADC_SAMPLE_PRESSURE:
3506 - lradc->ts_pressure =
3507 - mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
3508 + lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc,
3509 + TOUCHSCREEN_VCHANNEL2,
3510 + TOUCHSCREEN_VCHANNEL1);
3511 mxs_lradc_complete_touch_event(lradc);
3512 return;
3513
3514 case LRADC_SAMPLE_VALID:
3515 - val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */
3516 mxs_lradc_finish_touch_event(lradc, 1);
3517 break;
3518 }
3519 @@ -835,9 +820,9 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
3520 * used if doing raw sampling.
3521 */
3522 if (lradc->soc == IMX28_LRADC)
3523 - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
3524 + mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0),
3525 LRADC_CTRL1);
3526 - mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
3527 + mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0);
3528
3529 /* Enable / disable the divider per requirement */
3530 if (test_bit(chan, &lradc->is_divided))
3531 @@ -1081,9 +1066,8 @@ static void mxs_lradc_disable_ts(struct mxs_lradc *lradc)
3532 {
3533 /* stop all interrupts from firing */
3534 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN |
3535 - LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) |
3536 - LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5),
3537 - LRADC_CTRL1);
3538 + LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
3539 + LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
3540
3541 /* Power-down touchscreen touch-detect circuitry. */
3542 mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
3543 @@ -1149,26 +1133,31 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
3544 struct iio_dev *iio = data;
3545 struct mxs_lradc *lradc = iio_priv(iio);
3546 unsigned long reg = readl(lradc->base + LRADC_CTRL1);
3547 + uint32_t clr_irq = mxs_lradc_irq_mask(lradc);
3548 const uint32_t ts_irq_mask =
3549 LRADC_CTRL1_TOUCH_DETECT_IRQ |
3550 - LRADC_CTRL1_LRADC_IRQ(2) |
3551 - LRADC_CTRL1_LRADC_IRQ(3) |
3552 - LRADC_CTRL1_LRADC_IRQ(4) |
3553 - LRADC_CTRL1_LRADC_IRQ(5);
3554 + LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
3555 + LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2);
3556
3557 if (!(reg & mxs_lradc_irq_mask(lradc)))
3558 return IRQ_NONE;
3559
3560 - if (lradc->use_touchscreen && (reg & ts_irq_mask))
3561 + if (lradc->use_touchscreen && (reg & ts_irq_mask)) {
3562 mxs_lradc_handle_touch(lradc);
3563
3564 - if (iio_buffer_enabled(iio))
3565 - iio_trigger_poll(iio->trig);
3566 - else if (reg & LRADC_CTRL1_LRADC_IRQ(0))
3567 + /* Make sure we don't clear the next conversion's interrupt. */
3568 + clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
3569 + LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2));
3570 + }
3571 +
3572 + if (iio_buffer_enabled(iio)) {
3573 + if (reg & lradc->buffer_vchans)
3574 + iio_trigger_poll(iio->trig);
3575 + } else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) {
3576 complete(&lradc->completion);
3577 + }
3578
3579 - mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc),
3580 - LRADC_CTRL1);
3581 + mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1);
3582
3583 return IRQ_HANDLED;
3584 }
3585 @@ -1280,9 +1269,10 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
3586 }
3587
3588 if (lradc->soc == IMX28_LRADC)
3589 - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
3590 - LRADC_CTRL1);
3591 - mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
3592 + mxs_lradc_reg_clear(lradc,
3593 + lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
3594 + LRADC_CTRL1);
3595 + mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
3596
3597 for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
3598 ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
3599 @@ -1315,10 +1305,11 @@ static int mxs_lradc_buffer_postdisable(struct iio_dev *iio)
3600 mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
3601 LRADC_DELAY_KICK, LRADC_DELAY(0));
3602
3603 - mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
3604 + mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
3605 if (lradc->soc == IMX28_LRADC)
3606 - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
3607 - LRADC_CTRL1);
3608 + mxs_lradc_reg_clear(lradc,
3609 + lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
3610 + LRADC_CTRL1);
3611
3612 kfree(lradc->buffer);
3613 mutex_unlock(&lradc->lock);
3614 @@ -1344,7 +1335,7 @@ static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio,
3615 if (lradc->use_touchbutton)
3616 rsvd_chans++;
3617 if (lradc->use_touchscreen)
3618 - rsvd_chans++;
3619 + rsvd_chans += 2;
3620
3621 /* Test for attempts to map channels with special mode of operation. */
3622 if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS))
3623 @@ -1404,6 +1395,13 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
3624 .channel = 8,
3625 .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
3626 },
3627 + /* Hidden channel to keep indexes */
3628 + {
3629 + .type = IIO_TEMP,
3630 + .indexed = 1,
3631 + .scan_index = -1,
3632 + .channel = 9,
3633 + },
3634 MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */
3635 MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */
3636 MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */
3637 @@ -1556,6 +1554,11 @@ static int mxs_lradc_probe(struct platform_device *pdev)
3638
3639 touch_ret = mxs_lradc_probe_touchscreen(lradc, node);
3640
3641 + if (touch_ret == 0)
3642 + lradc->buffer_vchans = BUFFER_VCHANS_LIMITED;
3643 + else
3644 + lradc->buffer_vchans = BUFFER_VCHANS_ALL;
3645 +
3646 /* Grab all IRQ sources */
3647 for (i = 0; i < of_cfg->irq_count; i++) {
3648 lradc->irq[i] = platform_get_irq(pdev, i);
3649 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
3650 index 9f93b8234095..45837a4e950d 100644
3651 --- a/drivers/target/target_core_pr.c
3652 +++ b/drivers/target/target_core_pr.c
3653 @@ -1857,8 +1857,8 @@ static int core_scsi3_update_aptpl_buf(
3654 }
3655
3656 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
3657 - pr_err("Unable to update renaming"
3658 - " APTPL metadata\n");
3659 + pr_err("Unable to update renaming APTPL metadata,"
3660 + " reallocating larger buffer\n");
3661 ret = -EMSGSIZE;
3662 goto out;
3663 }
3664 @@ -1875,8 +1875,8 @@ static int core_scsi3_update_aptpl_buf(
3665 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
3666
3667 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
3668 - pr_err("Unable to update renaming"
3669 - " APTPL metadata\n");
3670 + pr_err("Unable to update renaming APTPL metadata,"
3671 + " reallocating larger buffer\n");
3672 ret = -EMSGSIZE;
3673 goto out;
3674 }
3675 @@ -1939,7 +1939,7 @@ static int __core_scsi3_write_aptpl_to_file(
3676 static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
3677 {
3678 unsigned char *buf;
3679 - int rc;
3680 + int rc, len = PR_APTPL_BUF_LEN;
3681
3682 if (!aptpl) {
3683 char *null_buf = "No Registrations or Reservations\n";
3684 @@ -1953,25 +1953,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b
3685
3686 return 0;
3687 }
3688 -
3689 - buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL);
3690 +retry:
3691 + buf = vzalloc(len);
3692 if (!buf)
3693 return TCM_OUT_OF_RESOURCES;
3694
3695 - rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN);
3696 + rc = core_scsi3_update_aptpl_buf(dev, buf, len);
3697 if (rc < 0) {
3698 - kfree(buf);
3699 - return TCM_OUT_OF_RESOURCES;
3700 + vfree(buf);
3701 + len *= 2;
3702 + goto retry;
3703 }
3704
3705 rc = __core_scsi3_write_aptpl_to_file(dev, buf);
3706 if (rc != 0) {
3707 pr_err("SPC-3 PR: Could not update APTPL\n");
3708 - kfree(buf);
3709 + vfree(buf);
3710 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3711 }
3712 dev->t10_pr.pr_aptpl_active = 1;
3713 - kfree(buf);
3714 + vfree(buf);
3715 pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
3716 return 0;
3717 }
3718 diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
3719 index 7a88af0e32d6..565c0da9d99d 100644
3720 --- a/drivers/target/target_core_sbc.c
3721 +++ b/drivers/target/target_core_sbc.c
3722 @@ -251,6 +251,8 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
3723 static sense_reason_t
3724 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
3725 {
3726 + struct se_device *dev = cmd->se_dev;
3727 + sector_t end_lba = dev->transport->get_blocks(dev) + 1;
3728 unsigned int sectors = sbc_get_write_same_sectors(cmd);
3729
3730 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
3731 @@ -264,6 +266,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
3732 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
3733 return TCM_INVALID_CDB_FIELD;
3734 }
3735 + /*
3736 + * Sanity check for LBA wrap and request past end of device.
3737 + */
3738 + if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
3739 + ((cmd->t_task_lba + sectors) > end_lba)) {
3740 + pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
3741 + (unsigned long long)end_lba, cmd->t_task_lba, sectors);
3742 + return TCM_ADDRESS_OUT_OF_RANGE;
3743 + }
3744 +
3745 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
3746 if (flags[0] & 0x10) {
3747 pr_warn("WRITE SAME with ANCHOR not supported\n");
3748 @@ -955,7 +967,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
3749 unsigned long long end_lba;
3750 check_lba:
3751 end_lba = dev->transport->get_blocks(dev) + 1;
3752 - if (cmd->t_task_lba + sectors > end_lba) {
3753 + if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
3754 + ((cmd->t_task_lba + sectors) > end_lba)) {
3755 pr_err("cmd exceeds last lba %llu "
3756 "(lba %llu, sectors %u)\n",
3757 end_lba, cmd->t_task_lba, sectors);
3758 diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
3759 index ca5cfdc1459a..e5c31eadb0ac 100644
3760 --- a/drivers/tty/serial/8250/8250_core.c
3761 +++ b/drivers/tty/serial/8250/8250_core.c
3762 @@ -2088,8 +2088,8 @@ int serial8250_do_startup(struct uart_port *port)
3763 /*
3764 * Clear the interrupt registers.
3765 */
3766 - if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
3767 - serial_port_in(port, UART_RX);
3768 + serial_port_in(port, UART_LSR);
3769 + serial_port_in(port, UART_RX);
3770 serial_port_in(port, UART_IIR);
3771 serial_port_in(port, UART_MSR);
3772
3773 @@ -2250,8 +2250,8 @@ dont_test_tx_en:
3774 * saved flags to avoid getting false values from polling
3775 * routines or the previous session.
3776 */
3777 - if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
3778 - serial_port_in(port, UART_RX);
3779 + serial_port_in(port, UART_LSR);
3780 + serial_port_in(port, UART_RX);
3781 serial_port_in(port, UART_IIR);
3782 serial_port_in(port, UART_MSR);
3783 up->lsr_saved_flags = 0;
3784 @@ -2344,8 +2344,7 @@ void serial8250_do_shutdown(struct uart_port *port)
3785 * Read data port to reset things, and then unlink from
3786 * the IRQ chain.
3787 */
3788 - if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
3789 - serial_port_in(port, UART_RX);
3790 + serial_port_in(port, UART_RX);
3791 serial8250_rpm_put(up);
3792
3793 del_timer_sync(&up->timer);
3794 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
3795 index 0508a1d8e4cd..0a0a6305c511 100644
3796 --- a/drivers/tty/tty_io.c
3797 +++ b/drivers/tty/tty_io.c
3798 @@ -975,8 +975,8 @@ EXPORT_SYMBOL(start_tty);
3799 /* We limit tty time update visibility to every 8 seconds or so. */
3800 static void tty_update_time(struct timespec *time)
3801 {
3802 - unsigned long sec = get_seconds() & ~7;
3803 - if ((long)(sec - time->tv_sec) > 0)
3804 + unsigned long sec = get_seconds();
3805 + if (abs(sec - time->tv_sec) & ~7)
3806 time->tv_sec = sec;
3807 }
3808
3809 diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
3810 index 62380ccf70fb..e43b6e559b3d 100644
3811 --- a/drivers/tty/tty_ioctl.c
3812 +++ b/drivers/tty/tty_ioctl.c
3813 @@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout)
3814 #endif
3815 if (!timeout)
3816 timeout = MAX_SCHEDULE_TIMEOUT;
3817 +
3818 if (wait_event_interruptible_timeout(tty->write_wait,
3819 - !tty_chars_in_buffer(tty), timeout) >= 0) {
3820 - if (tty->ops->wait_until_sent)
3821 - tty->ops->wait_until_sent(tty, timeout);
3822 + !tty_chars_in_buffer(tty), timeout) < 0) {
3823 + return;
3824 }
3825 +
3826 + if (timeout == MAX_SCHEDULE_TIMEOUT)
3827 + timeout = 0;
3828 +
3829 + if (tty->ops->wait_until_sent)
3830 + tty->ops->wait_until_sent(tty, timeout);
3831 }
3832 EXPORT_SYMBOL(tty_wait_until_sent);
3833
3834 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
3835 index 0b59731c3021..e500243803d8 100644
3836 --- a/drivers/usb/core/devio.c
3837 +++ b/drivers/usb/core/devio.c
3838 @@ -501,6 +501,7 @@ static void async_completed(struct urb *urb)
3839 as->status = urb->status;
3840 signr = as->signr;
3841 if (signr) {
3842 + memset(&sinfo, 0, sizeof(sinfo));
3843 sinfo.si_signo = as->signr;
3844 sinfo.si_errno = as->status;
3845 sinfo.si_code = SI_ASYNCIO;
3846 @@ -2371,6 +2372,7 @@ static void usbdev_remove(struct usb_device *udev)
3847 wake_up_all(&ps->wait);
3848 list_del_init(&ps->list);
3849 if (ps->discsignr) {
3850 + memset(&sinfo, 0, sizeof(sinfo));
3851 sinfo.si_signo = ps->discsignr;
3852 sinfo.si_errno = EPIPE;
3853 sinfo.si_code = SI_ASYNCIO;
3854 diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
3855 index a0aa9f3da441..3eb0f5eace79 100644
3856 --- a/drivers/usb/dwc3/dwc3-omap.c
3857 +++ b/drivers/usb/dwc3/dwc3-omap.c
3858 @@ -205,6 +205,18 @@ static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value)
3859 omap->irq0_offset, value);
3860 }
3861
3862 +static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value)
3863 +{
3864 + dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC +
3865 + omap->irqmisc_offset, value);
3866 +}
3867 +
3868 +static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value)
3869 +{
3870 + dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 -
3871 + omap->irq0_offset, value);
3872 +}
3873 +
3874 static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
3875 enum omap_dwc3_vbus_id_status status)
3876 {
3877 @@ -345,9 +357,23 @@ static void dwc3_omap_enable_irqs(struct dwc3_omap *omap)
3878
3879 static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
3880 {
3881 + u32 reg;
3882 +
3883 /* disable all IRQs */
3884 - dwc3_omap_write_irqmisc_set(omap, 0x00);
3885 - dwc3_omap_write_irq0_set(omap, 0x00);
3886 + reg = USBOTGSS_IRQO_COREIRQ_ST;
3887 + dwc3_omap_write_irq0_clr(omap, reg);
3888 +
3889 + reg = (USBOTGSS_IRQMISC_OEVT |
3890 + USBOTGSS_IRQMISC_DRVVBUS_RISE |
3891 + USBOTGSS_IRQMISC_CHRGVBUS_RISE |
3892 + USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
3893 + USBOTGSS_IRQMISC_IDPULLUP_RISE |
3894 + USBOTGSS_IRQMISC_DRVVBUS_FALL |
3895 + USBOTGSS_IRQMISC_CHRGVBUS_FALL |
3896 + USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
3897 + USBOTGSS_IRQMISC_IDPULLUP_FALL);
3898 +
3899 + dwc3_omap_write_irqmisc_clr(omap, reg);
3900 }
3901
3902 static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32);
3903 diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
3904 index 34034333f7f6..28d3dd3637b3 100644
3905 --- a/drivers/usb/gadget/configfs.c
3906 +++ b/drivers/usb/gadget/configfs.c
3907 @@ -1161,7 +1161,6 @@ static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc,
3908 if (desc->opts_mutex)
3909 mutex_lock(desc->opts_mutex);
3910 memcpy(desc->ext_compat_id, page, l);
3911 - desc->ext_compat_id[l] = '\0';
3912
3913 if (desc->opts_mutex)
3914 mutex_unlock(desc->opts_mutex);
3915 @@ -1192,7 +1191,6 @@ static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc,
3916 if (desc->opts_mutex)
3917 mutex_lock(desc->opts_mutex);
3918 memcpy(desc->ext_compat_id + 8, page, l);
3919 - desc->ext_compat_id[l + 8] = '\0';
3920
3921 if (desc->opts_mutex)
3922 mutex_unlock(desc->opts_mutex);
3923 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3924 index 7f76c8a12f89..fd53c9ebd662 100644
3925 --- a/drivers/usb/host/xhci-pci.c
3926 +++ b/drivers/usb/host/xhci-pci.c
3927 @@ -37,6 +37,9 @@
3928
3929 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
3930 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
3931 +#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
3932 +#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
3933 +#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
3934
3935 static const char hcd_name[] = "xhci_hcd";
3936
3937 @@ -133,6 +136,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3938 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
3939 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
3940 }
3941 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3942 + (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
3943 + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
3944 + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
3945 + xhci->quirks |= XHCI_PME_STUCK_QUIRK;
3946 + }
3947 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
3948 pdev->device == PCI_DEVICE_ID_EJ168) {
3949 xhci->quirks |= XHCI_RESET_ON_RESUME;
3950 @@ -159,6 +168,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3951 "QUIRK: Resetting on resume");
3952 }
3953
3954 +/*
3955 + * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
3956 + * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
3957 + */
3958 +static void xhci_pme_quirk(struct xhci_hcd *xhci)
3959 +{
3960 + u32 val;
3961 + void __iomem *reg;
3962 +
3963 + reg = (void __iomem *) xhci->cap_regs + 0x80a4;
3964 + val = readl(reg);
3965 + writel(val | BIT(28), reg);
3966 + readl(reg);
3967 +}
3968 +
3969 /* called during probe() after chip reset completes */
3970 static int xhci_pci_setup(struct usb_hcd *hcd)
3971 {
3972 @@ -283,6 +307,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
3973 if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
3974 pdev->no_d3cold = true;
3975
3976 + if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
3977 + xhci_pme_quirk(xhci);
3978 +
3979 return xhci_suspend(xhci, do_wakeup);
3980 }
3981
3982 @@ -313,6 +340,9 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
3983 if (pdev->vendor == PCI_VENDOR_ID_INTEL)
3984 usb_enable_intel_xhci_ports(pdev);
3985
3986 + if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
3987 + xhci_pme_quirk(xhci);
3988 +
3989 retval = xhci_resume(xhci, hibernated);
3990 return retval;
3991 }
3992 diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
3993 index 646300cbe5f7..22516f41c6f4 100644
3994 --- a/drivers/usb/host/xhci-plat.c
3995 +++ b/drivers/usb/host/xhci-plat.c
3996 @@ -87,15 +87,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
3997 if (!res)
3998 return -ENODEV;
3999
4000 - if (of_device_is_compatible(pdev->dev.of_node,
4001 - "marvell,armada-375-xhci") ||
4002 - of_device_is_compatible(pdev->dev.of_node,
4003 - "marvell,armada-380-xhci")) {
4004 - ret = xhci_mvebu_mbus_init_quirk(pdev);
4005 - if (ret)
4006 - return ret;
4007 - }
4008 -
4009 /* Initialize dma_mask and coherent_dma_mask to 32-bits */
4010 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4011 if (ret)
4012 @@ -129,6 +120,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
4013 goto put_hcd;
4014 }
4015
4016 + if (of_device_is_compatible(pdev->dev.of_node,
4017 + "marvell,armada-375-xhci") ||
4018 + of_device_is_compatible(pdev->dev.of_node,
4019 + "marvell,armada-380-xhci")) {
4020 + ret = xhci_mvebu_mbus_init_quirk(pdev);
4021 + if (ret)
4022 + goto disable_clk;
4023 + }
4024 +
4025 ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
4026 if (ret)
4027 goto disable_clk;
4028 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
4029 index 06433aec81d7..338f19cc0973 100644
4030 --- a/drivers/usb/host/xhci-ring.c
4031 +++ b/drivers/usb/host/xhci-ring.c
4032 @@ -1957,7 +1957,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
4033 if (event_trb != ep_ring->dequeue) {
4034 /* The event was for the status stage */
4035 if (event_trb == td->last_trb) {
4036 - if (td->urb->actual_length != 0) {
4037 + if (td->urb_length_set) {
4038 /* Don't overwrite a previously set error code
4039 */
4040 if ((*status == -EINPROGRESS || *status == 0) &&
4041 @@ -1971,7 +1971,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
4042 td->urb->transfer_buffer_length;
4043 }
4044 } else {
4045 - /* Maybe the event was for the data stage? */
4046 + /*
4047 + * Maybe the event was for the data stage? If so, update
4048 + * already the actual_length of the URB and flag it as
4049 + * set, so that it is not overwritten in the event for
4050 + * the last TRB.
4051 + */
4052 + td->urb_length_set = true;
4053 td->urb->actual_length =
4054 td->urb->transfer_buffer_length -
4055 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
4056 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
4057 index d745715a1e2f..94251141adae 100644
4058 --- a/drivers/usb/host/xhci.h
4059 +++ b/drivers/usb/host/xhci.h
4060 @@ -1,3 +1,4 @@
4061 +
4062 /*
4063 * xHCI host controller driver
4064 *
4065 @@ -88,9 +89,10 @@ struct xhci_cap_regs {
4066 #define HCS_IST(p) (((p) >> 0) & 0xf)
4067 /* bits 4:7, max number of Event Ring segments */
4068 #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
4069 +/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
4070 /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
4071 -/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
4072 -#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f)
4073 +/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
4074 +#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
4075
4076 /* HCSPARAMS3 - hcs_params3 - bitmasks */
4077 /* bits 0:7, Max U1 to U0 latency for the roothub ports */
4078 @@ -1288,6 +1290,8 @@ struct xhci_td {
4079 struct xhci_segment *start_seg;
4080 union xhci_trb *first_trb;
4081 union xhci_trb *last_trb;
4082 + /* actual_length of the URB has already been set */
4083 + bool urb_length_set;
4084 };
4085
4086 /* xHCI command default timeout value */
4087 @@ -1560,6 +1564,7 @@ struct xhci_hcd {
4088 #define XHCI_SPURIOUS_WAKEUP (1 << 18)
4089 /* For controllers with a broken beyond repair streams implementation */
4090 #define XHCI_BROKEN_STREAMS (1 << 19)
4091 +#define XHCI_PME_STUCK_QUIRK (1 << 20)
4092 unsigned int num_active_eps;
4093 unsigned int limit_active_eps;
4094 /* There are two roothubs to keep track of bus suspend info for */
4095 diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
4096 index 9374bd2aba20..6f91eb9ae81a 100644
4097 --- a/drivers/usb/serial/bus.c
4098 +++ b/drivers/usb/serial/bus.c
4099 @@ -51,6 +51,7 @@ static int usb_serial_device_probe(struct device *dev)
4100 {
4101 struct usb_serial_driver *driver;
4102 struct usb_serial_port *port;
4103 + struct device *tty_dev;
4104 int retval = 0;
4105 int minor;
4106
4107 @@ -75,12 +76,20 @@ static int usb_serial_device_probe(struct device *dev)
4108 retval = device_create_file(dev, &dev_attr_port_number);
4109 if (retval) {
4110 if (driver->port_remove)
4111 - retval = driver->port_remove(port);
4112 + driver->port_remove(port);
4113 goto exit_with_autopm;
4114 }
4115
4116 minor = port->minor;
4117 - tty_register_device(usb_serial_tty_driver, minor, dev);
4118 + tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev);
4119 + if (IS_ERR(tty_dev)) {
4120 + retval = PTR_ERR(tty_dev);
4121 + device_remove_file(dev, &dev_attr_port_number);
4122 + if (driver->port_remove)
4123 + driver->port_remove(port);
4124 + goto exit_with_autopm;
4125 + }
4126 +
4127 dev_info(&port->serial->dev->dev,
4128 "%s converter now attached to ttyUSB%d\n",
4129 driver->description, minor);
4130 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
4131 index f40c856ff758..84ce2d74894c 100644
4132 --- a/drivers/usb/serial/cp210x.c
4133 +++ b/drivers/usb/serial/cp210x.c
4134 @@ -147,6 +147,8 @@ static const struct usb_device_id id_table[] = {
4135 { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
4136 { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
4137 { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
4138 + { USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */
4139 + { USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */
4140 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
4141 { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
4142 { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
4143 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
4144 index 1ebb351b9e9a..3086dec0ef53 100644
4145 --- a/drivers/usb/serial/ftdi_sio.c
4146 +++ b/drivers/usb/serial/ftdi_sio.c
4147 @@ -799,6 +799,8 @@ static const struct usb_device_id id_table_combined[] = {
4148 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
4149 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
4150 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
4151 + { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
4152 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
4153 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
4154 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
4155 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
4156 @@ -978,6 +980,23 @@ static const struct usb_device_id id_table_combined[] = {
4157 { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
4158 /* GE Healthcare devices */
4159 { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
4160 + /* Active Research (Actisense) devices */
4161 + { USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) },
4162 + { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
4163 + { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
4164 + { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
4165 + { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
4166 + { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
4167 + { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
4168 + { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
4169 + { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
4170 + { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
4171 + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) },
4172 + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) },
4173 + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) },
4174 + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
4175 + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
4176 + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
4177 { } /* Terminating entry */
4178 };
4179
4180 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
4181 index e52409c9be99..56b1b55c4751 100644
4182 --- a/drivers/usb/serial/ftdi_sio_ids.h
4183 +++ b/drivers/usb/serial/ftdi_sio_ids.h
4184 @@ -38,6 +38,9 @@
4185
4186 #define FTDI_LUMEL_PD12_PID 0x6002
4187
4188 +/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
4189 +#define CYBER_CORTEX_AV_PID 0x8698
4190 +
4191 /*
4192 * Marvell OpenRD Base, Client
4193 * http://www.open-rd.org
4194 @@ -1438,3 +1441,23 @@
4195 */
4196 #define GE_HEALTHCARE_VID 0x1901
4197 #define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015
4198 +
4199 +/*
4200 + * Active Research (Actisense) devices
4201 + */
4202 +#define ACTISENSE_NDC_PID 0xD9A8 /* NDC USB Serial Adapter */
4203 +#define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */
4204 +#define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */
4205 +#define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */
4206 +#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */
4207 +#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */
4208 +#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */
4209 +#define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */
4210 +#define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */
4211 +#define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */
4212 +#define CHETCO_SEASMART_NMEA2000_PID 0xA54A /* SeaSmart NMEA2000 Gateway */
4213 +#define CHETCO_SEASMART_ETHERNET_PID 0xA54B /* SeaSmart Ethernet Gateway */
4214 +#define CHETCO_SEASMART_WIFI_PID 0xA5AC /* SeaSmart Wifi Gateway */
4215 +#define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */
4216 +#define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */
4217 +#define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */
4218 diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
4219 index 1bd192290b08..904ab353ecf2 100644
4220 --- a/drivers/usb/serial/generic.c
4221 +++ b/drivers/usb/serial/generic.c
4222 @@ -258,7 +258,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
4223 * character or at least one jiffy.
4224 */
4225 period = max_t(unsigned long, (10 * HZ / bps), 1);
4226 - period = min_t(unsigned long, period, timeout);
4227 + if (timeout)
4228 + period = min_t(unsigned long, period, timeout);
4229
4230 dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
4231 __func__, jiffies_to_msecs(timeout),
4232 @@ -268,7 +269,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
4233 schedule_timeout_interruptible(period);
4234 if (signal_pending(current))
4235 break;
4236 - if (time_after(jiffies, expire))
4237 + if (timeout && time_after(jiffies, expire))
4238 break;
4239 }
4240 }
4241 diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
4242 index ab1d690274ae..460a40669967 100644
4243 --- a/drivers/usb/serial/mxuport.c
4244 +++ b/drivers/usb/serial/mxuport.c
4245 @@ -1284,7 +1284,8 @@ static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
4246 }
4247
4248 /* Initial port termios */
4249 - mxuport_set_termios(tty, port, NULL);
4250 + if (tty)
4251 + mxuport_set_termios(tty, port, NULL);
4252
4253 /*
4254 * TODO: use RQ_VENDOR_GET_MSR, once we know what it
4255 diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
4256 index 475723c006f9..19842370a07f 100644
4257 --- a/drivers/usb/serial/usb-serial.c
4258 +++ b/drivers/usb/serial/usb-serial.c
4259 @@ -940,8 +940,9 @@ static int usb_serial_probe(struct usb_interface *interface,
4260 port = serial->port[i];
4261 if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
4262 goto probe_error;
4263 - buffer_size = max_t(int, serial->type->bulk_out_size,
4264 - usb_endpoint_maxp(endpoint));
4265 + buffer_size = serial->type->bulk_out_size;
4266 + if (!buffer_size)
4267 + buffer_size = usb_endpoint_maxp(endpoint);
4268 port->bulk_out_size = buffer_size;
4269 port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
4270
4271 diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
4272 index 2706a434fdbb..cd047d0cc7a6 100644
4273 --- a/drivers/usb/storage/unusual_uas.h
4274 +++ b/drivers/usb/storage/unusual_uas.h
4275 @@ -103,6 +103,13 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
4276 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4277 US_FL_NO_ATA_1X),
4278
4279 +/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
4280 +UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
4281 + "JMicron",
4282 + "JMS539",
4283 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4284 + US_FL_NO_REPORT_OPCODES),
4285 +
4286 /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
4287 UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
4288 "JMicron",
4289 diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
4290 index aaf96cb25452..ac7d921ed984 100644
4291 --- a/fs/autofs4/dev-ioctl.c
4292 +++ b/fs/autofs4/dev-ioctl.c
4293 @@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
4294 */
4295 static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
4296 {
4297 - struct autofs_dev_ioctl tmp;
4298 + struct autofs_dev_ioctl tmp, *res;
4299
4300 if (copy_from_user(&tmp, in, sizeof(tmp)))
4301 return ERR_PTR(-EFAULT);
4302 @@ -106,7 +106,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
4303 if (tmp.size > (PATH_MAX + sizeof(tmp)))
4304 return ERR_PTR(-ENAMETOOLONG);
4305
4306 - return memdup_user(in, tmp.size);
4307 + res = memdup_user(in, tmp.size);
4308 + if (!IS_ERR(res))
4309 + res->size = tmp.size;
4310 +
4311 + return res;
4312 }
4313
4314 static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
4315 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
4316 index a18ceabd99a8..5193c7844315 100644
4317 --- a/fs/btrfs/file.c
4318 +++ b/fs/btrfs/file.c
4319 @@ -1803,22 +1803,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
4320 mutex_unlock(&inode->i_mutex);
4321
4322 /*
4323 - * we want to make sure fsync finds this change
4324 - * but we haven't joined a transaction running right now.
4325 - *
4326 - * Later on, someone is sure to update the inode and get the
4327 - * real transid recorded.
4328 - *
4329 - * We set last_trans now to the fs_info generation + 1,
4330 - * this will either be one more than the running transaction
4331 - * or the generation used for the next transaction if there isn't
4332 - * one running right now.
4333 - *
4334 * We also have to set last_sub_trans to the current log transid,
4335 * otherwise subsequent syncs to a file that's been synced in this
4336 * transaction will appear to have already occured.
4337 */
4338 - BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
4339 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4340 if (num_written > 0) {
4341 err = generic_write_sync(file, pos, num_written);
4342 @@ -1954,25 +1942,37 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
4343 atomic_inc(&root->log_batch);
4344
4345 /*
4346 - * check the transaction that last modified this inode
4347 - * and see if its already been committed
4348 - */
4349 - if (!BTRFS_I(inode)->last_trans) {
4350 - mutex_unlock(&inode->i_mutex);
4351 - goto out;
4352 - }
4353 -
4354 - /*
4355 - * if the last transaction that changed this file was before
4356 - * the current transaction, we can bail out now without any
4357 - * syncing
4358 + * If the last transaction that changed this file was before the current
4359 + * transaction and we have the full sync flag set in our inode, we can
4360 + * bail out now without any syncing.
4361 + *
4362 + * Note that we can't bail out if the full sync flag isn't set. This is
4363 + * because when the full sync flag is set we start all ordered extents
4364 + * and wait for them to fully complete - when they complete they update
4365 + * the inode's last_trans field through:
4366 + *
4367 + * btrfs_finish_ordered_io() ->
4368 + * btrfs_update_inode_fallback() ->
4369 + * btrfs_update_inode() ->
4370 + * btrfs_set_inode_last_trans()
4371 + *
4372 + * So we are sure that last_trans is up to date and can do this check to
4373 + * bail out safely. For the fast path, when the full sync flag is not
4374 + * set in our inode, we can not do it because we start only our ordered
4375 + * extents and don't wait for them to complete (that is when
4376 + * btrfs_finish_ordered_io runs), so here at this point their last_trans
4377 + * value might be less than or equals to fs_info->last_trans_committed,
4378 + * and setting a speculative last_trans for an inode when a buffered
4379 + * write is made (such as fs_info->generation + 1 for example) would not
4380 + * be reliable since after setting the value and before fsync is called
4381 + * any number of transactions can start and commit (transaction kthread
4382 + * commits the current transaction periodically), and a transaction
4383 + * commit does not start nor waits for ordered extents to complete.
4384 */
4385 smp_mb();
4386 if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
4387 - BTRFS_I(inode)->last_trans <=
4388 - root->fs_info->last_trans_committed) {
4389 - BTRFS_I(inode)->last_trans = 0;
4390 -
4391 + (full_sync && BTRFS_I(inode)->last_trans <=
4392 + root->fs_info->last_trans_committed)) {
4393 /*
4394 * We'v had everything committed since the last time we were
4395 * modified so clear this flag in case it was set for whatever
4396 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4397 index d23362f4464e..edaa6178b4ec 100644
4398 --- a/fs/btrfs/inode.c
4399 +++ b/fs/btrfs/inode.c
4400 @@ -7151,7 +7151,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
4401 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
4402 em->block_start != EXTENT_MAP_HOLE)) {
4403 int type;
4404 - int ret;
4405 u64 block_start, orig_start, orig_block_len, ram_bytes;
4406
4407 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4408 diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
4409 index 269e21dd1506..b23d024c0234 100644
4410 --- a/fs/btrfs/ordered-data.c
4411 +++ b/fs/btrfs/ordered-data.c
4412 @@ -442,9 +442,7 @@ void btrfs_get_logged_extents(struct inode *inode,
4413 spin_lock_irq(&tree->lock);
4414 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
4415 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
4416 - if (!list_empty(&ordered->log_list))
4417 - continue;
4418 - if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
4419 + if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
4420 continue;
4421 list_add_tail(&ordered->log_list, logged_list);
4422 atomic_inc(&ordered->refs);
4423 @@ -501,8 +499,7 @@ void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
4424 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
4425 &ordered->flags));
4426
4427 - if (!test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
4428 - list_add_tail(&ordered->trans_list, &trans->ordered);
4429 + list_add_tail(&ordered->trans_list, &trans->ordered);
4430 spin_lock_irq(&log->log_extents_lock[index]);
4431 }
4432 spin_unlock_irq(&log->log_extents_lock[index]);
4433 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
4434 index ee1c60454a5f..8b40b35e5e0e 100644
4435 --- a/fs/btrfs/tree-log.c
4436 +++ b/fs/btrfs/tree-log.c
4437 @@ -1010,7 +1010,7 @@ again:
4438 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
4439
4440 while (cur_offset < item_size) {
4441 - extref = (struct btrfs_inode_extref *)base + cur_offset;
4442 + extref = (struct btrfs_inode_extref *)(base + cur_offset);
4443
4444 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
4445
4446 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
4447 index 05f2960ed7c3..6f0ce531e221 100644
4448 --- a/fs/debugfs/inode.c
4449 +++ b/fs/debugfs/inode.c
4450 @@ -246,10 +246,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
4451 return 0;
4452 }
4453
4454 +static void debugfs_evict_inode(struct inode *inode)
4455 +{
4456 + truncate_inode_pages_final(&inode->i_data);
4457 + clear_inode(inode);
4458 + if (S_ISLNK(inode->i_mode))
4459 + kfree(inode->i_private);
4460 +}
4461 +
4462 static const struct super_operations debugfs_super_operations = {
4463 .statfs = simple_statfs,
4464 .remount_fs = debugfs_remount,
4465 .show_options = debugfs_show_options,
4466 + .evict_inode = debugfs_evict_inode,
4467 };
4468
4469 static int debug_fill_super(struct super_block *sb, void *data, int silent)
4470 @@ -466,23 +475,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
4471 int ret = 0;
4472
4473 if (debugfs_positive(dentry)) {
4474 - if (dentry->d_inode) {
4475 - dget(dentry);
4476 - switch (dentry->d_inode->i_mode & S_IFMT) {
4477 - case S_IFDIR:
4478 - ret = simple_rmdir(parent->d_inode, dentry);
4479 - break;
4480 - case S_IFLNK:
4481 - kfree(dentry->d_inode->i_private);
4482 - /* fall through */
4483 - default:
4484 - simple_unlink(parent->d_inode, dentry);
4485 - break;
4486 - }
4487 - if (!ret)
4488 - d_delete(dentry);
4489 - dput(dentry);
4490 - }
4491 + dget(dentry);
4492 + if (S_ISDIR(dentry->d_inode->i_mode))
4493 + ret = simple_rmdir(parent->d_inode, dentry);
4494 + else
4495 + simple_unlink(parent->d_inode, dentry);
4496 + if (!ret)
4497 + d_delete(dentry);
4498 + dput(dentry);
4499 }
4500 return ret;
4501 }
4502 diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
4503 index 54742f9a67a8..77a3db3791c7 100644
4504 --- a/fs/ecryptfs/file.c
4505 +++ b/fs/ecryptfs/file.c
4506 @@ -303,9 +303,22 @@ ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4507 struct file *lower_file = ecryptfs_file_to_lower(file);
4508 long rc = -ENOTTY;
4509
4510 - if (lower_file->f_op->unlocked_ioctl)
4511 + if (!lower_file->f_op->unlocked_ioctl)
4512 + return rc;
4513 +
4514 + switch (cmd) {
4515 + case FITRIM:
4516 + case FS_IOC_GETFLAGS:
4517 + case FS_IOC_SETFLAGS:
4518 + case FS_IOC_GETVERSION:
4519 + case FS_IOC_SETVERSION:
4520 rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
4521 - return rc;
4522 + fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
4523 +
4524 + return rc;
4525 + default:
4526 + return rc;
4527 + }
4528 }
4529
4530 #ifdef CONFIG_COMPAT
4531 @@ -315,9 +328,22 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4532 struct file *lower_file = ecryptfs_file_to_lower(file);
4533 long rc = -ENOIOCTLCMD;
4534
4535 - if (lower_file->f_op->compat_ioctl)
4536 + if (!lower_file->f_op->compat_ioctl)
4537 + return rc;
4538 +
4539 + switch (cmd) {
4540 + case FITRIM:
4541 + case FS_IOC32_GETFLAGS:
4542 + case FS_IOC32_SETFLAGS:
4543 + case FS_IOC32_GETVERSION:
4544 + case FS_IOC32_SETVERSION:
4545 rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
4546 - return rc;
4547 + fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
4548 +
4549 + return rc;
4550 + default:
4551 + return rc;
4552 + }
4553 }
4554 #endif
4555
4556 diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
4557 index 3088e2a38e30..7b3143064af1 100644
4558 --- a/fs/gfs2/acl.c
4559 +++ b/fs/gfs2/acl.c
4560 @@ -73,7 +73,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
4561
4562 BUG_ON(name == NULL);
4563
4564 - if (acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
4565 + if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
4566 return -E2BIG;
4567
4568 if (type == ACL_TYPE_ACCESS) {
4569 diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
4570 index 7f3f60641344..4030b558b07e 100644
4571 --- a/fs/nfs/delegation.c
4572 +++ b/fs/nfs/delegation.c
4573 @@ -177,8 +177,8 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
4574 &delegation->flags);
4575 NFS_I(inode)->delegation_state = delegation->type;
4576 spin_unlock(&delegation->lock);
4577 - put_rpccred(oldcred);
4578 rcu_read_unlock();
4579 + put_rpccred(oldcred);
4580 trace_nfs4_reclaim_delegation(inode, res->delegation_type);
4581 } else {
4582 /* We appear to have raced with a delegation return. */
4583 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
4584 index 6e62155abf26..7a8d67cd823d 100644
4585 --- a/fs/nfs/dir.c
4586 +++ b/fs/nfs/dir.c
4587 @@ -469,6 +469,8 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
4588 struct inode *inode;
4589 int status;
4590
4591 + if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID))
4592 + return;
4593 if (filename.name[0] == '.') {
4594 if (filename.len == 1)
4595 return;
4596 @@ -479,6 +481,10 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
4597
4598 dentry = d_lookup(parent, &filename);
4599 if (dentry != NULL) {
4600 + /* Is there a mountpoint here? If so, just exit */
4601 + if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid,
4602 + &entry->fattr->fsid))
4603 + goto out;
4604 if (nfs_same_file(dentry, entry)) {
4605 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
4606 status = nfs_refresh_inode(dentry->d_inode, entry->fattr);
4607 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
4608 index d66e3ad1de48..5c9c13ee72f9 100644
4609 --- a/fs/nfsd/nfs4state.c
4610 +++ b/fs/nfsd/nfs4state.c
4611 @@ -1650,7 +1650,7 @@ __destroy_client(struct nfs4_client *clp)
4612 nfs4_put_stid(&dp->dl_stid);
4613 }
4614 while (!list_empty(&clp->cl_revoked)) {
4615 - dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
4616 + dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
4617 list_del_init(&dp->dl_recall_lru);
4618 nfs4_put_stid(&dp->dl_stid);
4619 }
4620 diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
4621 index b2e3ff347620..ecdbae19a766 100644
4622 --- a/fs/nilfs2/btree.c
4623 +++ b/fs/nilfs2/btree.c
4624 @@ -31,6 +31,8 @@
4625 #include "alloc.h"
4626 #include "dat.h"
4627
4628 +static void __nilfs_btree_init(struct nilfs_bmap *bmap);
4629 +
4630 static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
4631 {
4632 struct nilfs_btree_path *path;
4633 @@ -368,6 +370,34 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
4634 return ret;
4635 }
4636
4637 +/**
4638 + * nilfs_btree_root_broken - verify consistency of btree root node
4639 + * @node: btree root node to be examined
4640 + * @ino: inode number
4641 + *
4642 + * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
4643 + */
4644 +static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
4645 + unsigned long ino)
4646 +{
4647 + int level, flags, nchildren;
4648 + int ret = 0;
4649 +
4650 + level = nilfs_btree_node_get_level(node);
4651 + flags = nilfs_btree_node_get_flags(node);
4652 + nchildren = nilfs_btree_node_get_nchildren(node);
4653 +
4654 + if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
4655 + level > NILFS_BTREE_LEVEL_MAX ||
4656 + nchildren < 0 ||
4657 + nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
4658 + pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
4659 + ino, level, flags, nchildren);
4660 + ret = 1;
4661 + }
4662 + return ret;
4663 +}
4664 +
4665 int nilfs_btree_broken_node_block(struct buffer_head *bh)
4666 {
4667 int ret;
4668 @@ -1713,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
4669
4670 /* convert and insert */
4671 dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
4672 - nilfs_btree_init(btree);
4673 + __nilfs_btree_init(btree);
4674 if (nreq != NULL) {
4675 nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
4676 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
4677 @@ -2294,12 +2324,23 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
4678 .bop_gather_data = NULL,
4679 };
4680
4681 -int nilfs_btree_init(struct nilfs_bmap *bmap)
4682 +static void __nilfs_btree_init(struct nilfs_bmap *bmap)
4683 {
4684 bmap->b_ops = &nilfs_btree_ops;
4685 bmap->b_nchildren_per_block =
4686 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
4687 - return 0;
4688 +}
4689 +
4690 +int nilfs_btree_init(struct nilfs_bmap *bmap)
4691 +{
4692 + int ret = 0;
4693 +
4694 + __nilfs_btree_init(bmap);
4695 +
4696 + if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap),
4697 + bmap->b_inode->i_ino))
4698 + ret = -EIO;
4699 + return ret;
4700 }
4701
4702 void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
4703 diff --git a/fs/proc/generic.c b/fs/proc/generic.c
4704 index 317b72641ebf..228cc4eeeb4a 100644
4705 --- a/fs/proc/generic.c
4706 +++ b/fs/proc/generic.c
4707 @@ -19,7 +19,6 @@
4708 #include <linux/mount.h>
4709 #include <linux/init.h>
4710 #include <linux/idr.h>
4711 -#include <linux/namei.h>
4712 #include <linux/bitops.h>
4713 #include <linux/spinlock.h>
4714 #include <linux/completion.h>
4715 @@ -162,17 +161,6 @@ void proc_free_inum(unsigned int inum)
4716 spin_unlock_irqrestore(&proc_inum_lock, flags);
4717 }
4718
4719 -static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
4720 -{
4721 - nd_set_link(nd, __PDE_DATA(dentry->d_inode));
4722 - return NULL;
4723 -}
4724 -
4725 -static const struct inode_operations proc_link_inode_operations = {
4726 - .readlink = generic_readlink,
4727 - .follow_link = proc_follow_link,
4728 -};
4729 -
4730 /*
4731 * Don't create negative dentries here, return -ENOENT by hand
4732 * instead.
4733 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
4734 index 333080d7a671..54ed31cfb398 100644
4735 --- a/fs/proc/inode.c
4736 +++ b/fs/proc/inode.c
4737 @@ -23,6 +23,7 @@
4738 #include <linux/slab.h>
4739 #include <linux/mount.h>
4740 #include <linux/magic.h>
4741 +#include <linux/namei.h>
4742
4743 #include <asm/uaccess.h>
4744
4745 @@ -401,6 +402,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
4746 };
4747 #endif
4748
4749 +static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
4750 +{
4751 + struct proc_dir_entry *pde = PDE(dentry->d_inode);
4752 + if (unlikely(!use_pde(pde)))
4753 + return ERR_PTR(-EINVAL);
4754 + nd_set_link(nd, pde->data);
4755 + return pde;
4756 +}
4757 +
4758 +static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
4759 +{
4760 + unuse_pde(p);
4761 +}
4762 +
4763 +const struct inode_operations proc_link_inode_operations = {
4764 + .readlink = generic_readlink,
4765 + .follow_link = proc_follow_link,
4766 + .put_link = proc_put_link,
4767 +};
4768 +
4769 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
4770 {
4771 struct inode *inode = new_inode_pseudo(sb);
4772 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
4773 index aa7a0ee182e1..73f8190de795 100644
4774 --- a/fs/proc/internal.h
4775 +++ b/fs/proc/internal.h
4776 @@ -199,6 +199,7 @@ struct pde_opener {
4777 int closing;
4778 struct completion *c;
4779 };
4780 +extern const struct inode_operations proc_link_inode_operations;
4781
4782 extern const struct inode_operations proc_pid_link_inode_operations;
4783
4784 diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
4785 index a70d45647898..47116c87ab10 100644
4786 --- a/include/drm/i915_pciids.h
4787 +++ b/include/drm/i915_pciids.h
4788 @@ -214,9 +214,9 @@
4789 INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
4790
4791 #define _INTEL_BDW_M_IDS(gt, info) \
4792 - _INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \
4793 + _INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \
4794 _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
4795 - _INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \
4796 + _INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \
4797 _INTEL_BDW_M(gt, 0x160E, info) /* ULX */
4798
4799 #define _INTEL_BDW_D_IDS(gt, info) \
4800 diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
4801 index 9bb547c7bce7..704a1ab8240c 100644
4802 --- a/include/linux/usb/serial.h
4803 +++ b/include/linux/usb/serial.h
4804 @@ -190,8 +190,7 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
4805 * @num_ports: the number of different ports this device will have.
4806 * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer
4807 * (0 = end-point size)
4808 - * @bulk_out_size: minimum number of bytes to allocate for bulk-out buffer
4809 - * (0 = end-point size)
4810 + * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size)
4811 * @calc_num_ports: pointer to a function to determine how many ports this
4812 * device has dynamically. It will be called after the probe()
4813 * callback is called, but before attach()
4814 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
4815 index 23c518a0340c..1fbd69cfd0b7 100644
4816 --- a/include/target/target_core_base.h
4817 +++ b/include/target/target_core_base.h
4818 @@ -409,7 +409,7 @@ struct t10_reservation {
4819 /* Activate Persistence across Target Power Loss enabled
4820 * for SCSI device */
4821 int pr_aptpl_active;
4822 -#define PR_APTPL_BUF_LEN 8192
4823 +#define PR_APTPL_BUF_LEN 262144
4824 u32 pr_generation;
4825 spinlock_t registration_lock;
4826 spinlock_t aptpl_reg_lock;
4827 diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
4828 index aece1346ceb7..4ad10baecd4d 100644
4829 --- a/include/trace/events/kmem.h
4830 +++ b/include/trace/events/kmem.h
4831 @@ -268,11 +268,11 @@ TRACE_EVENT(mm_page_alloc_extfrag,
4832
4833 TP_PROTO(struct page *page,
4834 int alloc_order, int fallback_order,
4835 - int alloc_migratetype, int fallback_migratetype, int new_migratetype),
4836 + int alloc_migratetype, int fallback_migratetype),
4837
4838 TP_ARGS(page,
4839 alloc_order, fallback_order,
4840 - alloc_migratetype, fallback_migratetype, new_migratetype),
4841 + alloc_migratetype, fallback_migratetype),
4842
4843 TP_STRUCT__entry(
4844 __field( struct page *, page )
4845 @@ -289,7 +289,8 @@ TRACE_EVENT(mm_page_alloc_extfrag,
4846 __entry->fallback_order = fallback_order;
4847 __entry->alloc_migratetype = alloc_migratetype;
4848 __entry->fallback_migratetype = fallback_migratetype;
4849 - __entry->change_ownership = (new_migratetype == alloc_migratetype);
4850 + __entry->change_ownership = (alloc_migratetype ==
4851 + get_pageblock_migratetype(page));
4852 ),
4853
4854 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
4855 diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
4856 index 7c98873a3077..41d53e515914 100644
4857 --- a/kernel/locking/rtmutex.c
4858 +++ b/kernel/locking/rtmutex.c
4859 @@ -1193,7 +1193,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
4860 set_current_state(TASK_RUNNING);
4861
4862 if (unlikely(ret)) {
4863 - remove_waiter(lock, &waiter);
4864 + if (rt_mutex_has_waiters(lock))
4865 + remove_waiter(lock, &waiter);
4866 rt_mutex_handle_deadlock(ret, chwalk, &waiter);
4867 }
4868
4869 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
4870 index 8a2e230fb86a..eae160dd669d 100644
4871 --- a/kernel/sched/auto_group.c
4872 +++ b/kernel/sched/auto_group.c
4873 @@ -87,8 +87,7 @@ static inline struct autogroup *autogroup_create(void)
4874 * so we don't have to move tasks around upon policy change,
4875 * or flail around trying to allocate bandwidth on the fly.
4876 * A bandwidth exception in __sched_setscheduler() allows
4877 - * the policy change to proceed. Thereafter, task_group()
4878 - * returns &root_task_group, so zero bandwidth is required.
4879 + * the policy change to proceed.
4880 */
4881 free_rt_sched_group(tg);
4882 tg->rt_se = root_task_group.rt_se;
4883 @@ -115,9 +114,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
4884 if (tg != &root_task_group)
4885 return false;
4886
4887 - if (p->sched_class != &fair_sched_class)
4888 - return false;
4889 -
4890 /*
4891 * We can only assume the task group can't go away on us if
4892 * autogroup_move_group() can see us on ->thread_group list.
4893 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
4894 index efdca2f08222..9f5ed5e70eaa 100644
4895 --- a/kernel/sched/core.c
4896 +++ b/kernel/sched/core.c
4897 @@ -490,6 +490,11 @@ static __init void init_hrtick(void)
4898 */
4899 void hrtick_start(struct rq *rq, u64 delay)
4900 {
4901 + /*
4902 + * Don't schedule slices shorter than 10000ns, that just
4903 + * doesn't make sense. Rely on vruntime for fairness.
4904 + */
4905 + delay = max_t(u64, delay, 10000LL);
4906 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
4907 HRTIMER_MODE_REL_PINNED, 0);
4908 }
4909 @@ -7465,6 +7470,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
4910 {
4911 struct task_struct *g, *p;
4912
4913 + /*
4914 + * Autogroups do not have RT tasks; see autogroup_create().
4915 + */
4916 + if (task_group_is_autogroup(tg))
4917 + return 0;
4918 +
4919 for_each_process_thread(g, p) {
4920 if (rt_task(p) && task_group(p) == tg)
4921 return 1;
4922 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
4923 index 15f2511a1b7c..cd0e835ecb85 100644
4924 --- a/kernel/sysctl.c
4925 +++ b/kernel/sysctl.c
4926 @@ -1232,7 +1232,6 @@ static struct ctl_table vm_table[] = {
4927 .maxlen = sizeof(unsigned long),
4928 .mode = 0644,
4929 .proc_handler = hugetlb_sysctl_handler,
4930 - .extra1 = &zero,
4931 },
4932 #ifdef CONFIG_NUMA
4933 {
4934 @@ -1241,7 +1240,6 @@ static struct ctl_table vm_table[] = {
4935 .maxlen = sizeof(unsigned long),
4936 .mode = 0644,
4937 .proc_handler = &hugetlb_mempolicy_sysctl_handler,
4938 - .extra1 = &zero,
4939 },
4940 #endif
4941 {
4942 @@ -1264,7 +1262,6 @@ static struct ctl_table vm_table[] = {
4943 .maxlen = sizeof(unsigned long),
4944 .mode = 0644,
4945 .proc_handler = hugetlb_overcommit_handler,
4946 - .extra1 = &zero,
4947 },
4948 #endif
4949 {
4950 diff --git a/mm/compaction.c b/mm/compaction.c
4951 index f9792ba3537c..b47f08e159d4 100644
4952 --- a/mm/compaction.c
4953 +++ b/mm/compaction.c
4954 @@ -1027,8 +1027,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
4955 low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
4956 isolate_mode);
4957
4958 - if (!low_pfn || cc->contended)
4959 + if (!low_pfn || cc->contended) {
4960 + acct_isolated(zone, cc);
4961 return ISOLATE_ABORT;
4962 + }
4963
4964 /*
4965 * Either we isolated something and proceed with migration. Or
4966 @@ -1100,7 +1102,7 @@ static int compact_finished(struct zone *zone, struct compact_control *cc,
4967 return COMPACT_PARTIAL;
4968
4969 /* Job done if allocation would set block type */
4970 - if (cc->order >= pageblock_order && area->nr_free)
4971 + if (order >= pageblock_order && area->nr_free)
4972 return COMPACT_PARTIAL;
4973 }
4974
4975 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
4976 index f08fec71ec5a..4cacc6a8a6c1 100644
4977 --- a/mm/hugetlb.c
4978 +++ b/mm/hugetlb.c
4979 @@ -2653,9 +2653,10 @@ again:
4980 goto unlock;
4981
4982 /*
4983 - * HWPoisoned hugepage is already unmapped and dropped reference
4984 + * Migrating hugepage or HWPoisoned hugepage is already
4985 + * unmapped and its refcount is dropped, so just clear pte here.
4986 */
4987 - if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4988 + if (unlikely(!pte_present(pte))) {
4989 huge_pte_clear(mm, address, ptep);
4990 goto unlock;
4991 }
4992 @@ -3128,6 +3129,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
4993 struct page *pagecache_page = NULL;
4994 struct hstate *h = hstate_vma(vma);
4995 struct address_space *mapping;
4996 + int need_wait_lock = 0;
4997
4998 address &= huge_page_mask(h);
4999
5000 @@ -3166,6 +3168,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5001 ret = 0;
5002
5003 /*
5004 + * entry could be a migration/hwpoison entry at this point, so this
5005 + * check prevents the kernel from going below assuming that we have
5006 + * a active hugepage in pagecache. This goto expects the 2nd page fault,
5007 + * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
5008 + * handle it.
5009 + */
5010 + if (!pte_present(entry))
5011 + goto out_mutex;
5012 +
5013 + /*
5014 * If we are going to COW the mapping later, we examine the pending
5015 * reservations for this page now. This will ensure that any
5016 * allocations necessary to record that reservation occur outside the
5017 @@ -3184,30 +3196,31 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5018 vma, address);
5019 }
5020
5021 + ptl = huge_pte_lock(h, mm, ptep);
5022 +
5023 + /* Check for a racing update before calling hugetlb_cow */
5024 + if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
5025 + goto out_ptl;
5026 +
5027 /*
5028 * hugetlb_cow() requires page locks of pte_page(entry) and
5029 * pagecache_page, so here we need take the former one
5030 * when page != pagecache_page or !pagecache_page.
5031 - * Note that locking order is always pagecache_page -> page,
5032 - * so no worry about deadlock.
5033 */
5034 page = pte_page(entry);
5035 - get_page(page);
5036 if (page != pagecache_page)
5037 - lock_page(page);
5038 -
5039 - ptl = huge_pte_lockptr(h, mm, ptep);
5040 - spin_lock(ptl);
5041 - /* Check for a racing update before calling hugetlb_cow */
5042 - if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
5043 - goto out_ptl;
5044 + if (!trylock_page(page)) {
5045 + need_wait_lock = 1;
5046 + goto out_ptl;
5047 + }
5048
5049 + get_page(page);
5050
5051 if (flags & FAULT_FLAG_WRITE) {
5052 if (!huge_pte_write(entry)) {
5053 ret = hugetlb_cow(mm, vma, address, ptep, entry,
5054 pagecache_page, ptl);
5055 - goto out_ptl;
5056 + goto out_put_page;
5057 }
5058 entry = huge_pte_mkdirty(entry);
5059 }
5060 @@ -3215,7 +3228,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5061 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
5062 flags & FAULT_FLAG_WRITE))
5063 update_mmu_cache(vma, address, ptep);
5064 -
5065 +out_put_page:
5066 + if (page != pagecache_page)
5067 + unlock_page(page);
5068 + put_page(page);
5069 out_ptl:
5070 spin_unlock(ptl);
5071
5072 @@ -3223,12 +3239,17 @@ out_ptl:
5073 unlock_page(pagecache_page);
5074 put_page(pagecache_page);
5075 }
5076 - if (page != pagecache_page)
5077 - unlock_page(page);
5078 - put_page(page);
5079 -
5080 out_mutex:
5081 mutex_unlock(&htlb_fault_mutex_table[hash]);
5082 + /*
5083 + * Generally it's safe to hold refcount during waiting page lock. But
5084 + * here we just wait to defer the next page fault to avoid busy loop and
5085 + * the page is not used after unlocked before returning from the current
5086 + * page fault. So we are safe from accessing freed page, even if we wait
5087 + * here without taking refcount.
5088 + */
5089 + if (need_wait_lock)
5090 + wait_on_page_locked(page);
5091 return ret;
5092 }
5093
5094 @@ -3358,7 +3379,26 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
5095 spin_unlock(ptl);
5096 continue;
5097 }
5098 - if (!huge_pte_none(huge_ptep_get(ptep))) {
5099 + pte = huge_ptep_get(ptep);
5100 + if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
5101 + spin_unlock(ptl);
5102 + continue;
5103 + }
5104 + if (unlikely(is_hugetlb_entry_migration(pte))) {
5105 + swp_entry_t entry = pte_to_swp_entry(pte);
5106 +
5107 + if (is_write_migration_entry(entry)) {
5108 + pte_t newpte;
5109 +
5110 + make_migration_entry_read(&entry);
5111 + newpte = swp_entry_to_pte(entry);
5112 + set_huge_pte_at(mm, address, ptep, newpte);
5113 + pages++;
5114 + }
5115 + spin_unlock(ptl);
5116 + continue;
5117 + }
5118 + if (!huge_pte_none(pte)) {
5119 pte = huge_ptep_get_and_clear(mm, address, ptep);
5120 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
5121 pte = arch_make_huge_pte(pte, vma, NULL, 0);
5122 diff --git a/mm/memory.c b/mm/memory.c
5123 index d442584fd281..4ffa7b571fb8 100644
5124 --- a/mm/memory.c
5125 +++ b/mm/memory.c
5126 @@ -3558,7 +3558,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5127 if (follow_phys(vma, addr, write, &prot, &phys_addr))
5128 return -EINVAL;
5129
5130 - maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
5131 + maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
5132 if (write)
5133 memcpy_toio(maddr + offset, buf, len);
5134 else
5135 diff --git a/mm/mmap.c b/mm/mmap.c
5136 index 1620adbbd77f..3c83bec2274c 100644
5137 --- a/mm/mmap.c
5138 +++ b/mm/mmap.c
5139 @@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
5140 */
5141 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
5142 {
5143 - unsigned long free, allowed, reserve;
5144 + long free, allowed, reserve;
5145
5146 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
5147 -(s64)vm_committed_as_batch * num_online_cpus(),
5148 @@ -220,7 +220,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
5149 */
5150 if (mm) {
5151 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
5152 - allowed -= min(mm->total_vm / 32, reserve);
5153 + allowed -= min_t(long, mm->total_vm / 32, reserve);
5154 }
5155
5156 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
5157 diff --git a/mm/nommu.c b/mm/nommu.c
5158 index bd1808e194a7..b5ba5bc02e4b 100644
5159 --- a/mm/nommu.c
5160 +++ b/mm/nommu.c
5161 @@ -1905,7 +1905,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
5162 */
5163 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
5164 {
5165 - unsigned long free, allowed, reserve;
5166 + long free, allowed, reserve;
5167
5168 vm_acct_memory(pages);
5169
5170 @@ -1969,7 +1969,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
5171 */
5172 if (mm) {
5173 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
5174 - allowed -= min(mm->total_vm / 32, reserve);
5175 + allowed -= min_t(long, mm->total_vm / 32, reserve);
5176 }
5177
5178 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
5179 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5180 index 616a2c956b4b..c32cb64a1277 100644
5181 --- a/mm/page_alloc.c
5182 +++ b/mm/page_alloc.c
5183 @@ -1073,8 +1073,8 @@ static void change_pageblock_range(struct page *pageblock_page,
5184 * nor move CMA pages to different free lists. We don't want unmovable pages
5185 * to be allocated from MIGRATE_CMA areas.
5186 *
5187 - * Returns the new migratetype of the pageblock (or the same old migratetype
5188 - * if it was unchanged).
5189 + * Returns the allocation migratetype if free pages were stolen, or the
5190 + * fallback migratetype if it was decided not to steal.
5191 */
5192 static int try_to_steal_freepages(struct zone *zone, struct page *page,
5193 int start_type, int fallback_type)
5194 @@ -1105,12 +1105,10 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
5195
5196 /* Claim the whole block if over half of it is free */
5197 if (pages >= (1 << (pageblock_order-1)) ||
5198 - page_group_by_mobility_disabled) {
5199 -
5200 + page_group_by_mobility_disabled)
5201 set_pageblock_migratetype(page, start_type);
5202 - return start_type;
5203 - }
5204
5205 + return start_type;
5206 }
5207
5208 return fallback_type;
5209 @@ -1162,7 +1160,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
5210 set_freepage_migratetype(page, new_type);
5211
5212 trace_mm_page_alloc_extfrag(page, order, current_order,
5213 - start_migratetype, migratetype, new_type);
5214 + start_migratetype, migratetype);
5215
5216 return page;
5217 }
5218 diff --git a/mm/vmstat.c b/mm/vmstat.c
5219 index 1b12d390dc68..4590aa42b6cd 100644
5220 --- a/mm/vmstat.c
5221 +++ b/mm/vmstat.c
5222 @@ -1348,7 +1348,7 @@ static void __init start_shepherd_timer(void)
5223 int cpu;
5224
5225 for_each_possible_cpu(cpu)
5226 - INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
5227 + INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu),
5228 vmstat_update);
5229
5230 if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
5231 diff --git a/net/compat.c b/net/compat.c
5232 index bc8aeefddf3f..c48930373e65 100644
5233 --- a/net/compat.c
5234 +++ b/net/compat.c
5235 @@ -738,24 +738,18 @@ static unsigned char nas[21] = {
5236
5237 COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
5238 {
5239 - if (flags & MSG_CMSG_COMPAT)
5240 - return -EINVAL;
5241 return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
5242 }
5243
5244 COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
5245 unsigned int, vlen, unsigned int, flags)
5246 {
5247 - if (flags & MSG_CMSG_COMPAT)
5248 - return -EINVAL;
5249 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
5250 flags | MSG_CMSG_COMPAT);
5251 }
5252
5253 COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
5254 {
5255 - if (flags & MSG_CMSG_COMPAT)
5256 - return -EINVAL;
5257 return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
5258 }
5259
5260 @@ -778,9 +772,6 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
5261 int datagrams;
5262 struct timespec ktspec;
5263
5264 - if (flags & MSG_CMSG_COMPAT)
5265 - return -EINVAL;
5266 -
5267 if (timeout == NULL)
5268 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
5269 flags | MSG_CMSG_COMPAT, NULL);
5270 diff --git a/net/core/dev.c b/net/core/dev.c
5271 index 9704a5c1103e..5db3a3f96198 100644
5272 --- a/net/core/dev.c
5273 +++ b/net/core/dev.c
5274 @@ -943,7 +943,7 @@ bool dev_valid_name(const char *name)
5275 return false;
5276
5277 while (*name) {
5278 - if (*name == '/' || isspace(*name))
5279 + if (*name == '/' || *name == ':' || isspace(*name))
5280 return false;
5281 name++;
5282 }
5283 diff --git a/net/core/flow.c b/net/core/flow.c
5284 index a0348fde1fdf..1033725be40b 100644
5285 --- a/net/core/flow.c
5286 +++ b/net/core/flow.c
5287 @@ -379,7 +379,7 @@ done:
5288 static void flow_cache_flush_task(struct work_struct *work)
5289 {
5290 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
5291 - flow_cache_gc_work);
5292 + flow_cache_flush_work);
5293 struct net *net = container_of(xfrm, struct net, xfrm);
5294
5295 flow_cache_flush(net);
5296 diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
5297 index 0c08062d1796..1e2f46a69d50 100644
5298 --- a/net/core/gen_stats.c
5299 +++ b/net/core/gen_stats.c
5300 @@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
5301 return 0;
5302
5303 nla_put_failure:
5304 + kfree(d->xstats);
5305 + d->xstats = NULL;
5306 + d->xstats_len = 0;
5307 spin_unlock_bh(d->lock);
5308 return -1;
5309 }
5310 @@ -305,7 +308,9 @@ int
5311 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
5312 {
5313 if (d->compat_xstats) {
5314 - d->xstats = st;
5315 + d->xstats = kmemdup(st, len, GFP_ATOMIC);
5316 + if (!d->xstats)
5317 + goto err_out;
5318 d->xstats_len = len;
5319 }
5320
5321 @@ -313,6 +318,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
5322 return gnet_stats_copy(d, TCA_STATS_APP, st, len);
5323
5324 return 0;
5325 +
5326 +err_out:
5327 + d->xstats_len = 0;
5328 + spin_unlock_bh(d->lock);
5329 + return -1;
5330 }
5331 EXPORT_SYMBOL(gnet_stats_copy_app);
5332
5333 @@ -345,6 +355,9 @@ gnet_stats_finish_copy(struct gnet_dump *d)
5334 return -1;
5335 }
5336
5337 + kfree(d->xstats);
5338 + d->xstats = NULL;
5339 + d->xstats_len = 0;
5340 spin_unlock_bh(d->lock);
5341 return 0;
5342 }
5343 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
5344 index 443256bdcddc..0b320d93fb56 100644
5345 --- a/net/core/pktgen.c
5346 +++ b/net/core/pktgen.c
5347 @@ -1134,6 +1134,9 @@ static ssize_t pktgen_if_write(struct file *file,
5348 return len;
5349
5350 i += len;
5351 + if ((value > 1) &&
5352 + (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
5353 + return -ENOTSUPP;
5354 pkt_dev->burst = value < 1 ? 1 : value;
5355 sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
5356 return count;
5357 @@ -2842,25 +2845,25 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
5358 skb->dev = odev;
5359 skb->pkt_type = PACKET_HOST;
5360
5361 + pktgen_finalize_skb(pkt_dev, skb, datalen);
5362 +
5363 if (!(pkt_dev->flags & F_UDPCSUM)) {
5364 skb->ip_summed = CHECKSUM_NONE;
5365 } else if (odev->features & NETIF_F_V4_CSUM) {
5366 skb->ip_summed = CHECKSUM_PARTIAL;
5367 skb->csum = 0;
5368 - udp4_hwcsum(skb, udph->source, udph->dest);
5369 + udp4_hwcsum(skb, iph->saddr, iph->daddr);
5370 } else {
5371 - __wsum csum = udp_csum(skb);
5372 + __wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0);
5373
5374 /* add protocol-dependent pseudo-header */
5375 - udph->check = csum_tcpudp_magic(udph->source, udph->dest,
5376 + udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
5377 datalen + 8, IPPROTO_UDP, csum);
5378
5379 if (udph->check == 0)
5380 udph->check = CSUM_MANGLED_0;
5381 }
5382
5383 - pktgen_finalize_skb(pkt_dev, skb, datalen);
5384 -
5385 #ifdef CONFIG_XFRM
5386 if (!process_ipsec(pkt_dev, skb, protocol))
5387 return NULL;
5388 @@ -2976,6 +2979,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
5389 skb->dev = odev;
5390 skb->pkt_type = PACKET_HOST;
5391
5392 + pktgen_finalize_skb(pkt_dev, skb, datalen);
5393 +
5394 if (!(pkt_dev->flags & F_UDPCSUM)) {
5395 skb->ip_summed = CHECKSUM_NONE;
5396 } else if (odev->features & NETIF_F_V6_CSUM) {
5397 @@ -2984,7 +2989,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
5398 skb->csum_offset = offsetof(struct udphdr, check);
5399 udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
5400 } else {
5401 - __wsum csum = udp_csum(skb);
5402 + __wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0);
5403
5404 /* add protocol-dependent pseudo-header */
5405 udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
5406 @@ -2993,8 +2998,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
5407 udph->check = CSUM_MANGLED_0;
5408 }
5409
5410 - pktgen_finalize_skb(pkt_dev, skb, datalen);
5411 -
5412 return skb;
5413 }
5414
5415 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
5416 index ca82629de0b2..c522f7a00eab 100644
5417 --- a/net/core/rtnetlink.c
5418 +++ b/net/core/rtnetlink.c
5419 @@ -1212,18 +1212,12 @@ static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
5420 };
5421
5422 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
5423 - [IFLA_VF_MAC] = { .type = NLA_BINARY,
5424 - .len = sizeof(struct ifla_vf_mac) },
5425 - [IFLA_VF_VLAN] = { .type = NLA_BINARY,
5426 - .len = sizeof(struct ifla_vf_vlan) },
5427 - [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
5428 - .len = sizeof(struct ifla_vf_tx_rate) },
5429 - [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
5430 - .len = sizeof(struct ifla_vf_spoofchk) },
5431 - [IFLA_VF_RATE] = { .type = NLA_BINARY,
5432 - .len = sizeof(struct ifla_vf_rate) },
5433 - [IFLA_VF_LINK_STATE] = { .type = NLA_BINARY,
5434 - .len = sizeof(struct ifla_vf_link_state) },
5435 + [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
5436 + [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
5437 + [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
5438 + [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
5439 + [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
5440 + [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
5441 };
5442
5443 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
5444 @@ -1255,7 +1249,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5445 s_h = cb->args[0];
5446 s_idx = cb->args[1];
5447
5448 - rcu_read_lock();
5449 cb->seq = net->dev_base_seq;
5450
5451 /* A hack to preserve kernel<->userspace interface.
5452 @@ -1277,7 +1270,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5453 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5454 idx = 0;
5455 head = &net->dev_index_head[h];
5456 - hlist_for_each_entry_rcu(dev, head, index_hlist) {
5457 + hlist_for_each_entry(dev, head, index_hlist) {
5458 if (idx < s_idx)
5459 goto cont;
5460 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
5461 @@ -1299,7 +1292,6 @@ cont:
5462 }
5463 }
5464 out:
5465 - rcu_read_unlock();
5466 cb->args[1] = idx;
5467 cb->args[0] = h;
5468
5469 @@ -2105,8 +2097,16 @@ replay:
5470 }
5471 }
5472 err = rtnl_configure_link(dev, ifm);
5473 - if (err < 0)
5474 - unregister_netdevice(dev);
5475 + if (err < 0) {
5476 + if (ops->newlink) {
5477 + LIST_HEAD(list_kill);
5478 +
5479 + ops->dellink(dev, &list_kill);
5480 + unregister_netdevice_many(&list_kill);
5481 + } else {
5482 + unregister_netdevice(dev);
5483 + }
5484 + }
5485 out:
5486 put_net(dest_net);
5487 return err;
5488 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
5489 index d7543d0fd744..79589ae84a5d 100644
5490 --- a/net/core/skbuff.c
5491 +++ b/net/core/skbuff.c
5492 @@ -3515,13 +3515,14 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
5493 {
5494 struct sk_buff_head *q = &sk->sk_error_queue;
5495 struct sk_buff *skb, *skb_next;
5496 + unsigned long flags;
5497 int err = 0;
5498
5499 - spin_lock_bh(&q->lock);
5500 + spin_lock_irqsave(&q->lock, flags);
5501 skb = __skb_dequeue(q);
5502 if (skb && (skb_next = skb_peek(q)))
5503 err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
5504 - spin_unlock_bh(&q->lock);
5505 + spin_unlock_irqrestore(&q->lock, flags);
5506
5507 sk->sk_err = err;
5508 if (err)
5509 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
5510 index 2811cc18701a..b48e03cd6656 100644
5511 --- a/net/ipv4/ip_fragment.c
5512 +++ b/net/ipv4/ip_fragment.c
5513 @@ -660,27 +660,30 @@ EXPORT_SYMBOL(ip_defrag);
5514 struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
5515 {
5516 struct iphdr iph;
5517 + int netoff;
5518 u32 len;
5519
5520 if (skb->protocol != htons(ETH_P_IP))
5521 return skb;
5522
5523 - if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
5524 + netoff = skb_network_offset(skb);
5525 +
5526 + if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
5527 return skb;
5528
5529 if (iph.ihl < 5 || iph.version != 4)
5530 return skb;
5531
5532 len = ntohs(iph.tot_len);
5533 - if (skb->len < len || len < (iph.ihl * 4))
5534 + if (skb->len < netoff + len || len < (iph.ihl * 4))
5535 return skb;
5536
5537 if (ip_is_fragment(&iph)) {
5538 skb = skb_share_check(skb, GFP_ATOMIC);
5539 if (skb) {
5540 - if (!pskb_may_pull(skb, iph.ihl*4))
5541 + if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
5542 return skb;
5543 - if (pskb_trim_rcsum(skb, len))
5544 + if (pskb_trim_rcsum(skb, netoff + len))
5545 return skb;
5546 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
5547 if (ip_defrag(skb, user))
5548 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
5549 index 4a2a074bfb4a..357c2a914e77 100644
5550 --- a/net/ipv4/ip_output.c
5551 +++ b/net/ipv4/ip_output.c
5552 @@ -890,7 +890,8 @@ static int __ip_append_data(struct sock *sk,
5553 cork->length += length;
5554 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
5555 (sk->sk_protocol == IPPROTO_UDP) &&
5556 - (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
5557 + (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
5558 + (sk->sk_type == SOCK_DGRAM)) {
5559 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
5560 hh_len, fragheaderlen, transhdrlen,
5561 maxfraglen, flags);
5562 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
5563 index 5638b179b355..a5c49d657ab1 100644
5564 --- a/net/ipv4/ping.c
5565 +++ b/net/ipv4/ping.c
5566 @@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk)
5567 kgid_t low, high;
5568 int ret = 0;
5569
5570 + if (sk->sk_family == AF_INET6)
5571 + sk->sk_ipv6only = 1;
5572 +
5573 inet_get_ping_group_range_net(net, &low, &high);
5574 if (gid_lte(low, group) && gid_lte(group, high))
5575 return 0;
5576 @@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
5577 if (addr_len < sizeof(*addr))
5578 return -EINVAL;
5579
5580 + if (addr->sin_family != AF_INET &&
5581 + !(addr->sin_family == AF_UNSPEC &&
5582 + addr->sin_addr.s_addr == htonl(INADDR_ANY)))
5583 + return -EAFNOSUPPORT;
5584 +
5585 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
5586 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
5587
5588 @@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
5589 return -EINVAL;
5590
5591 if (addr->sin6_family != AF_INET6)
5592 - return -EINVAL;
5593 + return -EAFNOSUPPORT;
5594
5595 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
5596 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
5597 @@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
5598 if (msg->msg_namelen < sizeof(*usin))
5599 return -EINVAL;
5600 if (usin->sin_family != AF_INET)
5601 - return -EINVAL;
5602 + return -EAFNOSUPPORT;
5603 daddr = usin->sin_addr.s_addr;
5604 /* no remote port */
5605 } else {
5606 diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
5607 index 815c85e3b1e0..c73077280ad4 100644
5608 --- a/net/ipv4/tcp_fastopen.c
5609 +++ b/net/ipv4/tcp_fastopen.c
5610 @@ -134,6 +134,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
5611 struct tcp_sock *tp;
5612 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
5613 struct sock *child;
5614 + u32 end_seq;
5615
5616 req->num_retrans = 0;
5617 req->num_timeout = 0;
5618 @@ -185,20 +186,35 @@ static bool tcp_fastopen_create_child(struct sock *sk,
5619
5620 /* Queue the data carried in the SYN packet. We need to first
5621 * bump skb's refcnt because the caller will attempt to free it.
5622 + * Note that IPv6 might also have used skb_get() trick
5623 + * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts)
5624 + * So we need to eventually get a clone of the packet,
5625 + * before inserting it in sk_receive_queue.
5626 *
5627 * XXX (TFO) - we honor a zero-payload TFO request for now,
5628 * (any reason not to?) but no need to queue the skb since
5629 * there is no data. How about SYN+FIN?
5630 */
5631 - if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) {
5632 - skb = skb_get(skb);
5633 - skb_dst_drop(skb);
5634 - __skb_pull(skb, tcp_hdr(skb)->doff * 4);
5635 - skb_set_owner_r(skb, child);
5636 - __skb_queue_tail(&child->sk_receive_queue, skb);
5637 - tp->syn_data_acked = 1;
5638 + end_seq = TCP_SKB_CB(skb)->end_seq;
5639 + if (end_seq != TCP_SKB_CB(skb)->seq + 1) {
5640 + struct sk_buff *skb2;
5641 +
5642 + if (unlikely(skb_shared(skb)))
5643 + skb2 = skb_clone(skb, GFP_ATOMIC);
5644 + else
5645 + skb2 = skb_get(skb);
5646 +
5647 + if (likely(skb2)) {
5648 + skb_dst_drop(skb2);
5649 + __skb_pull(skb2, tcp_hdrlen(skb));
5650 + skb_set_owner_r(skb2, child);
5651 + __skb_queue_tail(&child->sk_receive_queue, skb2);
5652 + tp->syn_data_acked = 1;
5653 + } else {
5654 + end_seq = TCP_SKB_CB(skb)->seq + 1;
5655 + }
5656 }
5657 - tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
5658 + tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq;
5659 sk->sk_data_ready(sk);
5660 bh_unlock_sock(child);
5661 sock_put(child);
5662 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
5663 index 0169ccf5aa4f..17a025847999 100644
5664 --- a/net/ipv6/addrconf.c
5665 +++ b/net/ipv6/addrconf.c
5666 @@ -4536,6 +4536,22 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
5667 return 0;
5668 }
5669
5670 +static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5671 + [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
5672 + [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
5673 +};
5674 +
5675 +static int inet6_validate_link_af(const struct net_device *dev,
5676 + const struct nlattr *nla)
5677 +{
5678 + struct nlattr *tb[IFLA_INET6_MAX + 1];
5679 +
5680 + if (dev && !__in6_dev_get(dev))
5681 + return -EAFNOSUPPORT;
5682 +
5683 + return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy);
5684 +}
5685 +
5686 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
5687 {
5688 int err = -EINVAL;
5689 @@ -5351,6 +5367,7 @@ static struct rtnl_af_ops inet6_ops = {
5690 .family = AF_INET6,
5691 .fill_link_af = inet6_fill_link_af,
5692 .get_link_af_size = inet6_get_link_af_size,
5693 + .validate_link_af = inet6_validate_link_af,
5694 .set_link_af = inet6_set_link_af,
5695 };
5696
5697 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
5698 index 8e950c250ada..51add023b723 100644
5699 --- a/net/ipv6/ip6_output.c
5700 +++ b/net/ipv6/ip6_output.c
5701 @@ -1296,7 +1296,8 @@ emsgsize:
5702 if (((length > mtu) ||
5703 (skb && skb_is_gso(skb))) &&
5704 (sk->sk_protocol == IPPROTO_UDP) &&
5705 - (rt->dst.dev->features & NETIF_F_UFO)) {
5706 + (rt->dst.dev->features & NETIF_F_UFO) &&
5707 + (sk->sk_type == SOCK_DGRAM)) {
5708 err = ip6_ufo_append_data(sk, getfrag, from, length,
5709 hh_len, fragheaderlen,
5710 transhdrlen, mtu, flags, rt);
5711 diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
5712 index 5b7a1ed2aba9..2d452a382128 100644
5713 --- a/net/ipv6/ping.c
5714 +++ b/net/ipv6/ping.c
5715 @@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
5716
5717 if (msg->msg_name) {
5718 DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name);
5719 - if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
5720 - u->sin6_family != AF_INET6) {
5721 + if (msg->msg_namelen < sizeof(*u))
5722 return -EINVAL;
5723 + if (u->sin6_family != AF_INET6) {
5724 + return -EAFNOSUPPORT;
5725 }
5726 if (sk->sk_bound_dev_if &&
5727 sk->sk_bound_dev_if != u->sin6_scope_id) {
5728 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
5729 index d02ee019382e..2d9aca57e7c7 100644
5730 --- a/net/ipv6/route.c
5731 +++ b/net/ipv6/route.c
5732 @@ -141,7 +141,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
5733 u32 *p = NULL;
5734
5735 if (!(rt->dst.flags & DST_HOST))
5736 - return NULL;
5737 + return dst_cow_metrics_generic(dst, old);
5738
5739 peer = rt6_get_peer_create(rt);
5740 if (peer) {
5741 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
5742 index 61ceb4cdb4a2..23ad419361fb 100644
5743 --- a/net/irda/ircomm/ircomm_tty.c
5744 +++ b/net/irda/ircomm/ircomm_tty.c
5745 @@ -816,7 +816,9 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
5746 orig_jiffies = jiffies;
5747
5748 /* Set poll time to 200 ms */
5749 - poll_time = IRDA_MIN(timeout, msecs_to_jiffies(200));
5750 + poll_time = msecs_to_jiffies(200);
5751 + if (timeout)
5752 + poll_time = min_t(unsigned long, timeout, poll_time);
5753
5754 spin_lock_irqsave(&self->spinlock, flags);
5755 while (self->tx_skb && self->tx_skb->len) {
5756 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
5757 index 900632a250ec..80ce44f6693d 100644
5758 --- a/net/mac80211/tx.c
5759 +++ b/net/mac80211/tx.c
5760 @@ -563,6 +563,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
5761 if (tx->sdata->control_port_no_encrypt)
5762 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
5763 info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
5764 + info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
5765 }
5766
5767 return TX_CONTINUE;
5768 diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
5769 index f9e556b56086..68ccddb5e2c4 100644
5770 --- a/net/openvswitch/datapath.c
5771 +++ b/net/openvswitch/datapath.c
5772 @@ -2060,14 +2060,55 @@ static int __net_init ovs_init_net(struct net *net)
5773 return 0;
5774 }
5775
5776 -static void __net_exit ovs_exit_net(struct net *net)
5777 +static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
5778 + struct list_head *head)
5779 {
5780 - struct datapath *dp, *dp_next;
5781 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
5782 + struct datapath *dp;
5783 +
5784 + list_for_each_entry(dp, &ovs_net->dps, list_node) {
5785 + int i;
5786 +
5787 + for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
5788 + struct vport *vport;
5789 +
5790 + hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
5791 + struct netdev_vport *netdev_vport;
5792 +
5793 + if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
5794 + continue;
5795 +
5796 + netdev_vport = netdev_vport_priv(vport);
5797 + if (dev_net(netdev_vport->dev) == dnet)
5798 + list_add(&vport->detach_list, head);
5799 + }
5800 + }
5801 + }
5802 +}
5803 +
5804 +static void __net_exit ovs_exit_net(struct net *dnet)
5805 +{
5806 + struct datapath *dp, *dp_next;
5807 + struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
5808 + struct vport *vport, *vport_next;
5809 + struct net *net;
5810 + LIST_HEAD(head);
5811
5812 ovs_lock();
5813 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
5814 __dp_destroy(dp);
5815 +
5816 + rtnl_lock();
5817 + for_each_net(net)
5818 + list_vports_from_net(net, dnet, &head);
5819 + rtnl_unlock();
5820 +
5821 + /* Detach all vports from given namespace. */
5822 + list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
5823 + list_del(&vport->detach_list);
5824 + ovs_dp_detach_port(vport);
5825 + }
5826 +
5827 ovs_unlock();
5828
5829 cancel_work_sync(&ovs_net->dp_notify_work);
5830 diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
5831 index 8942125de3a6..ab01c65fb3de 100644
5832 --- a/net/openvswitch/vport.h
5833 +++ b/net/openvswitch/vport.h
5834 @@ -93,6 +93,7 @@ struct vport_portids {
5835 * @ops: Class structure.
5836 * @percpu_stats: Points to per-CPU statistics used and maintained by vport
5837 * @err_stats: Points to error statistics used and maintained by vport
5838 + * @detach_list: list used for detaching vport in net-exit call.
5839 */
5840 struct vport {
5841 struct rcu_head rcu;
5842 @@ -107,6 +108,7 @@ struct vport {
5843 struct pcpu_sw_netstats __percpu *percpu_stats;
5844
5845 struct vport_err_stats err_stats;
5846 + struct list_head detach_list;
5847 };
5848
5849 /**
5850 diff --git a/net/sched/ematch.c b/net/sched/ematch.c
5851 index 6742200b1307..fbb7ebfc58c6 100644
5852 --- a/net/sched/ematch.c
5853 +++ b/net/sched/ematch.c
5854 @@ -228,6 +228,7 @@ static int tcf_em_validate(struct tcf_proto *tp,
5855 * to replay the request.
5856 */
5857 module_put(em->ops->owner);
5858 + em->ops = NULL;
5859 err = -EAGAIN;
5860 }
5861 #endif
5862 diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
5863 index 9761a0da964d..1bb4d26fbd6b 100644
5864 --- a/net/sunrpc/backchannel_rqst.c
5865 +++ b/net/sunrpc/backchannel_rqst.c
5866 @@ -309,12 +309,15 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
5867 struct rpc_xprt *xprt = req->rq_xprt;
5868 struct svc_serv *bc_serv = xprt->bc_serv;
5869
5870 + spin_lock(&xprt->bc_pa_lock);
5871 + list_del(&req->rq_bc_pa_list);
5872 + spin_unlock(&xprt->bc_pa_lock);
5873 +
5874 req->rq_private_buf.len = copied;
5875 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
5876
5877 dprintk("RPC: add callback request to list\n");
5878 spin_lock(&bc_serv->sv_cb_lock);
5879 - list_del(&req->rq_bc_pa_list);
5880 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
5881 wake_up(&bc_serv->sv_cb_waitq);
5882 spin_unlock(&bc_serv->sv_cb_lock);
5883 diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
5884 index 066362141133..48f14003af10 100644
5885 --- a/net/sunrpc/cache.c
5886 +++ b/net/sunrpc/cache.c
5887 @@ -920,7 +920,7 @@ static unsigned int cache_poll(struct file *filp, poll_table *wait,
5888 poll_wait(filp, &queue_wait, wait);
5889
5890 /* alway allow write */
5891 - mask = POLL_OUT | POLLWRNORM;
5892 + mask = POLLOUT | POLLWRNORM;
5893
5894 if (!rp)
5895 return mask;
5896 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
5897 index 166d59cdc86b..9c823cfdfff0 100644
5898 --- a/sound/core/pcm_native.c
5899 +++ b/sound/core/pcm_native.c
5900 @@ -1523,6 +1523,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
5901 if (! snd_pcm_playback_empty(substream)) {
5902 snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
5903 snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
5904 + } else {
5905 + runtime->status->state = SNDRV_PCM_STATE_SETUP;
5906 }
5907 break;
5908 case SNDRV_PCM_STATE_RUNNING:
5909 diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
5910 index 8337645aa7a5..84c94301bfaf 100644
5911 --- a/sound/pci/hda/hda_controller.c
5912 +++ b/sound/pci/hda/hda_controller.c
5913 @@ -957,7 +957,6 @@ static int azx_alloc_cmd_io(struct azx *chip)
5914 dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
5915 return err;
5916 }
5917 -EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
5918
5919 static void azx_init_cmd_io(struct azx *chip)
5920 {
5921 @@ -1022,7 +1021,6 @@ static void azx_init_cmd_io(struct azx *chip)
5922 azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
5923 spin_unlock_irq(&chip->reg_lock);
5924 }
5925 -EXPORT_SYMBOL_GPL(azx_init_cmd_io);
5926
5927 static void azx_free_cmd_io(struct azx *chip)
5928 {
5929 @@ -1032,7 +1030,6 @@ static void azx_free_cmd_io(struct azx *chip)
5930 azx_writeb(chip, CORBCTL, 0);
5931 spin_unlock_irq(&chip->reg_lock);
5932 }
5933 -EXPORT_SYMBOL_GPL(azx_free_cmd_io);
5934
5935 static unsigned int azx_command_addr(u32 cmd)
5936 {
5937 @@ -1312,7 +1309,6 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
5938 else
5939 return azx_corb_send_cmd(bus, val);
5940 }
5941 -EXPORT_SYMBOL_GPL(azx_send_cmd);
5942
5943 /* get a response */
5944 static unsigned int azx_get_response(struct hda_bus *bus,
5945 @@ -1326,7 +1322,6 @@ static unsigned int azx_get_response(struct hda_bus *bus,
5946 else
5947 return azx_rirb_get_response(bus, addr);
5948 }
5949 -EXPORT_SYMBOL_GPL(azx_get_response);
5950
5951 #ifdef CONFIG_SND_HDA_DSP_LOADER
5952 /*
5953 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5954 index 48b6c5a3884f..8413797ba38d 100644
5955 --- a/sound/pci/hda/hda_intel.c
5956 +++ b/sound/pci/hda/hda_intel.c
5957 @@ -1995,7 +1995,7 @@ static const struct pci_device_id azx_ids[] = {
5958 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
5959 /* Panther Point */
5960 { PCI_DEVICE(0x8086, 0x1e20),
5961 - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
5962 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
5963 /* Lynx Point */
5964 { PCI_DEVICE(0x8086, 0x8c20),
5965 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
5966 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5967 index 50762cf62b2d..8375bc424e2d 100644
5968 --- a/sound/pci/hda/patch_realtek.c
5969 +++ b/sound/pci/hda/patch_realtek.c
5970 @@ -5074,6 +5074,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5971 {0x17, 0x40000000},
5972 {0x1d, 0x40700001},
5973 {0x21, 0x02211040}),
5974 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5975 + ALC255_STANDARD_PINS,
5976 + {0x12, 0x90a60170},
5977 + {0x14, 0x90170140},
5978 + {0x17, 0x40000000},
5979 + {0x1d, 0x40700001},
5980 + {0x21, 0x02211050}),
5981 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
5982 {0x12, 0x90a60130},
5983 {0x13, 0x40000000},
5984 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
5985 index 6d36c5b78805..87eff3173ce9 100644
5986 --- a/sound/pci/hda/patch_sigmatel.c
5987 +++ b/sound/pci/hda/patch_sigmatel.c
5988 @@ -79,6 +79,7 @@ enum {
5989 STAC_ALIENWARE_M17X,
5990 STAC_92HD89XX_HP_FRONT_JACK,
5991 STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
5992 + STAC_92HD73XX_ASUS_MOBO,
5993 STAC_92HD73XX_MODELS
5994 };
5995
5996 @@ -1911,7 +1912,18 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
5997 [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
5998 .type = HDA_FIXUP_PINS,
5999 .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
6000 - }
6001 + },
6002 + [STAC_92HD73XX_ASUS_MOBO] = {
6003 + .type = HDA_FIXUP_PINS,
6004 + .v.pins = (const struct hda_pintbl[]) {
6005 + /* enable 5.1 and SPDIF out */
6006 + { 0x0c, 0x01014411 },
6007 + { 0x0d, 0x01014410 },
6008 + { 0x0e, 0x01014412 },
6009 + { 0x22, 0x014b1180 },
6010 + { }
6011 + }
6012 + },
6013 };
6014
6015 static const struct hda_model_fixup stac92hd73xx_models[] = {
6016 @@ -1923,6 +1935,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
6017 { .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
6018 { .id = STAC_DELL_EQ, .name = "dell-eq" },
6019 { .id = STAC_ALIENWARE_M17X, .name = "alienware" },
6020 + { .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
6021 {}
6022 };
6023
6024 @@ -1975,6 +1988,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
6025 "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
6026 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
6027 "unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
6028 + SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10",
6029 + STAC_92HD73XX_ASUS_MOBO),
6030 {} /* terminator */
6031 };
6032
6033 diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
6034 index 7134f9ebf2f3..a8a9c1f36b2e 100644
6035 --- a/sound/soc/codecs/rt5670.c
6036 +++ b/sound/soc/codecs/rt5670.c
6037 @@ -222,7 +222,6 @@ static bool rt5670_volatile_register(struct device *dev, unsigned int reg)
6038 case RT5670_ADC_EQ_CTRL1:
6039 case RT5670_EQ_CTRL1:
6040 case RT5670_ALC_CTRL_1:
6041 - case RT5670_IRQ_CTRL1:
6042 case RT5670_IRQ_CTRL2:
6043 case RT5670_INT_IRQ_ST:
6044 case RT5670_IL_CMD:
6045 diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
6046 index f4b05bc23e4b..1343ecbf0bd5 100644
6047 --- a/sound/soc/omap/omap-pcm.c
6048 +++ b/sound/soc/omap/omap-pcm.c
6049 @@ -201,7 +201,7 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
6050 struct snd_pcm *pcm = rtd->pcm;
6051 int ret;
6052
6053 - ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
6054 + ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
6055 if (ret)
6056 return ret;
6057