Contents of /trunk/kernel-magellan/patches-4.0/0101-4.0.2-all-fixes.patch
Parent Directory | Revision Log
Revision 2564 -
(show annotations)
(download)
Tue Nov 25 22:54:20 2014 UTC (9 years, 10 months ago) by niro
File size: 287284 byte(s)
Tue Nov 25 22:54:20 2014 UTC (9 years, 10 months ago) by niro
File size: 287284 byte(s)
-linux-4.0.2
1 | diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt |
2 | index 99ca40e8e810..5c204df6b689 100644 |
3 | --- a/Documentation/networking/scaling.txt |
4 | +++ b/Documentation/networking/scaling.txt |
5 | @@ -282,7 +282,7 @@ following is true: |
6 | |
7 | - The current CPU's queue head counter >= the recorded tail counter |
8 | value in rps_dev_flow[i] |
9 | -- The current CPU is unset (equal to RPS_NO_CPU) |
10 | +- The current CPU is unset (>= nr_cpu_ids) |
11 | - The current CPU is offline |
12 | |
13 | After this check, the packet is sent to the (possibly updated) current |
14 | diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt |
15 | index 4ceef53164b0..d1ad9d5cae46 100644 |
16 | --- a/Documentation/virtual/kvm/devices/s390_flic.txt |
17 | +++ b/Documentation/virtual/kvm/devices/s390_flic.txt |
18 | @@ -27,6 +27,9 @@ Groups: |
19 | Copies all floating interrupts into a buffer provided by userspace. |
20 | When the buffer is too small it returns -ENOMEM, which is the indication |
21 | for userspace to try again with a bigger buffer. |
22 | + -ENOBUFS is returned when the allocation of a kernelspace buffer has |
23 | + failed. |
24 | + -EFAULT is returned when copying data to userspace failed. |
25 | All interrupts remain pending, i.e. are not deleted from the list of |
26 | currently pending interrupts. |
27 | attr->addr contains the userspace address of the buffer into which all |
28 | diff --git a/Makefile b/Makefile |
29 | index f499cd2f5738..0649a6011a76 100644 |
30 | --- a/Makefile |
31 | +++ b/Makefile |
32 | @@ -1,6 +1,6 @@ |
33 | VERSION = 4 |
34 | PATCHLEVEL = 0 |
35 | -SUBLEVEL = 1 |
36 | +SUBLEVEL = 2 |
37 | EXTRAVERSION = |
38 | NAME = Hurr durr I'ma sheep |
39 | |
40 | diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts |
41 | index fec1fca2ad66..6c4bc53cbf4e 100644 |
42 | --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts |
43 | +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts |
44 | @@ -167,7 +167,13 @@ |
45 | |
46 | macb1: ethernet@f802c000 { |
47 | phy-mode = "rmii"; |
48 | + #address-cells = <1>; |
49 | + #size-cells = <0>; |
50 | status = "okay"; |
51 | + |
52 | + ethernet-phy@1 { |
53 | + reg = <0x1>; |
54 | + }; |
55 | }; |
56 | |
57 | dbgu: serial@ffffee00 { |
58 | diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi |
59 | index a5441d5482a6..3cc8b8320345 100644 |
60 | --- a/arch/arm/boot/dts/dove.dtsi |
61 | +++ b/arch/arm/boot/dts/dove.dtsi |
62 | @@ -154,7 +154,7 @@ |
63 | |
64 | uart2: serial@12200 { |
65 | compatible = "ns16550a"; |
66 | - reg = <0x12000 0x100>; |
67 | + reg = <0x12200 0x100>; |
68 | reg-shift = <2>; |
69 | interrupts = <9>; |
70 | clocks = <&core_clk 0>; |
71 | @@ -163,7 +163,7 @@ |
72 | |
73 | uart3: serial@12300 { |
74 | compatible = "ns16550a"; |
75 | - reg = <0x12100 0x100>; |
76 | + reg = <0x12300 0x100>; |
77 | reg-shift = <2>; |
78 | interrupts = <10>; |
79 | clocks = <&core_clk 0>; |
80 | diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts |
81 | index f02775487cd4..c41600e587e0 100644 |
82 | --- a/arch/arm/boot/dts/exynos5250-spring.dts |
83 | +++ b/arch/arm/boot/dts/exynos5250-spring.dts |
84 | @@ -429,7 +429,6 @@ |
85 | &mmc_0 { |
86 | status = "okay"; |
87 | num-slots = <1>; |
88 | - supports-highspeed; |
89 | broken-cd; |
90 | card-detect-delay = <200>; |
91 | samsung,dw-mshc-ciu-div = <3>; |
92 | @@ -437,11 +436,8 @@ |
93 | samsung,dw-mshc-ddr-timing = <1 2>; |
94 | pinctrl-names = "default"; |
95 | pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_cd &sd0_bus4 &sd0_bus8>; |
96 | - |
97 | - slot@0 { |
98 | - reg = <0>; |
99 | - bus-width = <8>; |
100 | - }; |
101 | + bus-width = <8>; |
102 | + cap-mmc-highspeed; |
103 | }; |
104 | |
105 | /* |
106 | @@ -451,7 +447,6 @@ |
107 | &mmc_1 { |
108 | status = "okay"; |
109 | num-slots = <1>; |
110 | - supports-highspeed; |
111 | broken-cd; |
112 | card-detect-delay = <200>; |
113 | samsung,dw-mshc-ciu-div = <3>; |
114 | @@ -459,11 +454,8 @@ |
115 | samsung,dw-mshc-ddr-timing = <1 2>; |
116 | pinctrl-names = "default"; |
117 | pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_cd &sd1_bus4>; |
118 | - |
119 | - slot@0 { |
120 | - reg = <0>; |
121 | - bus-width = <4>; |
122 | - }; |
123 | + bus-width = <4>; |
124 | + cap-sd-highspeed; |
125 | }; |
126 | |
127 | &pinctrl_0 { |
128 | diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h |
129 | index afb9cafd3786..674d03f4ba15 100644 |
130 | --- a/arch/arm/include/asm/elf.h |
131 | +++ b/arch/arm/include/asm/elf.h |
132 | @@ -115,7 +115,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); |
133 | the loader. We need to make sure that it is out of the way of the program |
134 | that it will "exec", and that there is sufficient room for the brk. */ |
135 | |
136 | -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) |
137 | +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) |
138 | |
139 | /* When the program starts, a1 contains a pointer to a function to be |
140 | registered with atexit, as per the SVR4 ABI. A value of 0 means we |
141 | diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h |
142 | index 0db25bc32864..3a42ac646885 100644 |
143 | --- a/arch/arm/include/uapi/asm/kvm.h |
144 | +++ b/arch/arm/include/uapi/asm/kvm.h |
145 | @@ -195,8 +195,14 @@ struct kvm_arch_memory_slot { |
146 | #define KVM_ARM_IRQ_CPU_IRQ 0 |
147 | #define KVM_ARM_IRQ_CPU_FIQ 1 |
148 | |
149 | -/* Highest supported SPI, from VGIC_NR_IRQS */ |
150 | +/* |
151 | + * This used to hold the highest supported SPI, but it is now obsolete |
152 | + * and only here to provide source code level compatibility with older |
153 | + * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS. |
154 | + */ |
155 | +#ifndef __KERNEL__ |
156 | #define KVM_ARM_IRQ_GIC_MAX 127 |
157 | +#endif |
158 | |
159 | /* PSCI interface */ |
160 | #define KVM_PSCI_FN_BASE 0x95c1ba5e |
161 | diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c |
162 | index c4cc50e58c13..cfb354ff2a60 100644 |
163 | --- a/arch/arm/kernel/hibernate.c |
164 | +++ b/arch/arm/kernel/hibernate.c |
165 | @@ -22,6 +22,7 @@ |
166 | #include <asm/suspend.h> |
167 | #include <asm/memory.h> |
168 | #include <asm/sections.h> |
169 | +#include "reboot.h" |
170 | |
171 | int pfn_is_nosave(unsigned long pfn) |
172 | { |
173 | @@ -61,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused) |
174 | |
175 | ret = swsusp_save(); |
176 | if (ret == 0) |
177 | - soft_restart(virt_to_phys(cpu_resume)); |
178 | + _soft_restart(virt_to_phys(cpu_resume), false); |
179 | return ret; |
180 | } |
181 | |
182 | @@ -86,7 +87,7 @@ static void notrace arch_restore_image(void *unused) |
183 | for (pbe = restore_pblist; pbe; pbe = pbe->next) |
184 | copy_page(pbe->orig_address, pbe->address); |
185 | |
186 | - soft_restart(virt_to_phys(cpu_resume)); |
187 | + _soft_restart(virt_to_phys(cpu_resume), false); |
188 | } |
189 | |
190 | static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata; |
191 | diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c |
192 | index fdfa3a78ec8c..2bf1a162defb 100644 |
193 | --- a/arch/arm/kernel/process.c |
194 | +++ b/arch/arm/kernel/process.c |
195 | @@ -41,6 +41,7 @@ |
196 | #include <asm/system_misc.h> |
197 | #include <asm/mach/time.h> |
198 | #include <asm/tls.h> |
199 | +#include "reboot.h" |
200 | |
201 | #ifdef CONFIG_CC_STACKPROTECTOR |
202 | #include <linux/stackprotector.h> |
203 | @@ -95,7 +96,7 @@ static void __soft_restart(void *addr) |
204 | BUG(); |
205 | } |
206 | |
207 | -void soft_restart(unsigned long addr) |
208 | +void _soft_restart(unsigned long addr, bool disable_l2) |
209 | { |
210 | u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); |
211 | |
212 | @@ -104,7 +105,7 @@ void soft_restart(unsigned long addr) |
213 | local_fiq_disable(); |
214 | |
215 | /* Disable the L2 if we're the last man standing. */ |
216 | - if (num_online_cpus() == 1) |
217 | + if (disable_l2) |
218 | outer_disable(); |
219 | |
220 | /* Change to the new stack and continue with the reset. */ |
221 | @@ -114,6 +115,11 @@ void soft_restart(unsigned long addr) |
222 | BUG(); |
223 | } |
224 | |
225 | +void soft_restart(unsigned long addr) |
226 | +{ |
227 | + _soft_restart(addr, num_online_cpus() == 1); |
228 | +} |
229 | + |
230 | /* |
231 | * Function pointers to optional machine specific functions |
232 | */ |
233 | diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h |
234 | new file mode 100644 |
235 | index 000000000000..c87f05816d6b |
236 | --- /dev/null |
237 | +++ b/arch/arm/kernel/reboot.h |
238 | @@ -0,0 +1,6 @@ |
239 | +#ifndef REBOOT_H |
240 | +#define REBOOT_H |
241 | + |
242 | +extern void _soft_restart(unsigned long addr, bool disable_l2); |
243 | + |
244 | +#endif |
245 | diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c |
246 | index 5560f74f9eee..b652af50fda7 100644 |
247 | --- a/arch/arm/kvm/arm.c |
248 | +++ b/arch/arm/kvm/arm.c |
249 | @@ -651,8 +651,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
250 | if (!irqchip_in_kernel(kvm)) |
251 | return -ENXIO; |
252 | |
253 | - if (irq_num < VGIC_NR_PRIVATE_IRQS || |
254 | - irq_num > KVM_ARM_IRQ_GIC_MAX) |
255 | + if (irq_num < VGIC_NR_PRIVATE_IRQS) |
256 | return -EINVAL; |
257 | |
258 | return kvm_vgic_inject_irq(kvm, 0, irq_num, level); |
259 | diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c |
260 | index 8b9f5e202ccf..4f4e22206ae5 100644 |
261 | --- a/arch/arm/mach-mvebu/pmsu.c |
262 | +++ b/arch/arm/mach-mvebu/pmsu.c |
263 | @@ -415,6 +415,9 @@ static __init int armada_38x_cpuidle_init(void) |
264 | void __iomem *mpsoc_base; |
265 | u32 reg; |
266 | |
267 | + pr_warn("CPU idle is currently broken on Armada 38x: disabling"); |
268 | + return 0; |
269 | + |
270 | np = of_find_compatible_node(NULL, NULL, |
271 | "marvell,armada-380-coherency-fabric"); |
272 | if (!np) |
273 | @@ -476,6 +479,16 @@ static int __init mvebu_v7_cpu_pm_init(void) |
274 | return 0; |
275 | of_node_put(np); |
276 | |
277 | + /* |
278 | + * Currently the CPU idle support for Armada 38x is broken, as |
279 | + * the CPU hotplug uses some of the CPU idle functions it is |
280 | + * broken too, so let's disable it |
281 | + */ |
282 | + if (of_machine_is_compatible("marvell,armada380")) { |
283 | + cpu_hotplug_disable(); |
284 | + pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling"); |
285 | + } |
286 | + |
287 | if (of_machine_is_compatible("marvell,armadaxp")) |
288 | ret = armada_xp_cpuidle_init(); |
289 | else if (of_machine_is_compatible("marvell,armada370")) |
290 | @@ -489,7 +502,8 @@ static int __init mvebu_v7_cpu_pm_init(void) |
291 | return ret; |
292 | |
293 | mvebu_v7_pmsu_enable_l2_powerdown_onidle(); |
294 | - platform_device_register(&mvebu_v7_cpuidle_device); |
295 | + if (mvebu_v7_cpuidle_device.name) |
296 | + platform_device_register(&mvebu_v7_cpuidle_device); |
297 | cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier); |
298 | |
299 | return 0; |
300 | diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h |
301 | index 7bc66682687e..dcbe17f5e5f8 100644 |
302 | --- a/arch/arm/mach-s3c64xx/crag6410.h |
303 | +++ b/arch/arm/mach-s3c64xx/crag6410.h |
304 | @@ -14,6 +14,7 @@ |
305 | #include <mach/gpio-samsung.h> |
306 | |
307 | #define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START |
308 | +#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64) |
309 | |
310 | #define PCA935X_GPIO_BASE GPIO_BOARD_START |
311 | #define CODEC_GPIO_BASE (GPIO_BOARD_START + 8) |
312 | diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c |
313 | index 10b913baab28..65c426bc45f7 100644 |
314 | --- a/arch/arm/mach-s3c64xx/mach-crag6410.c |
315 | +++ b/arch/arm/mach-s3c64xx/mach-crag6410.c |
316 | @@ -554,6 +554,7 @@ static struct wm831x_touch_pdata touch_pdata = { |
317 | |
318 | static struct wm831x_pdata crag_pmic_pdata = { |
319 | .wm831x_num = 1, |
320 | + .irq_base = BANFF_PMIC_IRQ_BASE, |
321 | .gpio_base = BANFF_PMIC_GPIO_BASE, |
322 | .soft_shutdown = true, |
323 | |
324 | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig |
325 | index 1b8e97331ffb..a6186c24ca47 100644 |
326 | --- a/arch/arm64/Kconfig |
327 | +++ b/arch/arm64/Kconfig |
328 | @@ -361,6 +361,27 @@ config ARM64_ERRATUM_832075 |
329 | |
330 | If unsure, say Y. |
331 | |
332 | +config ARM64_ERRATUM_845719 |
333 | + bool "Cortex-A53: 845719: a load might read incorrect data" |
334 | + depends on COMPAT |
335 | + default y |
336 | + help |
337 | + This option adds an alternative code sequence to work around ARM |
338 | + erratum 845719 on Cortex-A53 parts up to r0p4. |
339 | + |
340 | + When running a compat (AArch32) userspace on an affected Cortex-A53 |
341 | + part, a load at EL0 from a virtual address that matches the bottom 32 |
342 | + bits of the virtual address used by a recent load at (AArch64) EL1 |
343 | + might return incorrect data. |
344 | + |
345 | + The workaround is to write the contextidr_el1 register on exception |
346 | + return to a 32-bit task. |
347 | + Please note that this does not necessarily enable the workaround, |
348 | + as it depends on the alternative framework, which will only patch |
349 | + the kernel if an affected CPU is detected. |
350 | + |
351 | + If unsure, say Y. |
352 | + |
353 | endmenu |
354 | |
355 | |
356 | @@ -470,6 +491,10 @@ config HOTPLUG_CPU |
357 | |
358 | source kernel/Kconfig.preempt |
359 | |
360 | +config UP_LATE_INIT |
361 | + def_bool y |
362 | + depends on !SMP |
363 | + |
364 | config HZ |
365 | int |
366 | default 100 |
367 | diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile |
368 | index 69ceedc982a5..4d2a925998f9 100644 |
369 | --- a/arch/arm64/Makefile |
370 | +++ b/arch/arm64/Makefile |
371 | @@ -48,7 +48,7 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/ |
372 | core-$(CONFIG_XEN) += arch/arm64/xen/ |
373 | core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ |
374 | libs-y := arch/arm64/lib/ $(libs-y) |
375 | -libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/ |
376 | +core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a |
377 | |
378 | # Default target when executing plain make |
379 | KBUILD_IMAGE := Image.gz |
380 | diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h |
381 | index b6c16d5f622f..3f0c53c45771 100644 |
382 | --- a/arch/arm64/include/asm/cpufeature.h |
383 | +++ b/arch/arm64/include/asm/cpufeature.h |
384 | @@ -23,8 +23,9 @@ |
385 | |
386 | #define ARM64_WORKAROUND_CLEAN_CACHE 0 |
387 | #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 |
388 | +#define ARM64_WORKAROUND_845719 2 |
389 | |
390 | -#define ARM64_NCAPS 2 |
391 | +#define ARM64_NCAPS 3 |
392 | |
393 | #ifndef __ASSEMBLY__ |
394 | |
395 | diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h |
396 | index 59e282311b58..8dcd61e32176 100644 |
397 | --- a/arch/arm64/include/asm/smp_plat.h |
398 | +++ b/arch/arm64/include/asm/smp_plat.h |
399 | @@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void) |
400 | extern u64 __cpu_logical_map[NR_CPUS]; |
401 | #define cpu_logical_map(cpu) __cpu_logical_map[cpu] |
402 | |
403 | +void __init do_post_cpus_up_work(void); |
404 | + |
405 | #endif /* __ASM_SMP_PLAT_H */ |
406 | diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h |
407 | index 3ef77a466018..bc49a1886b61 100644 |
408 | --- a/arch/arm64/include/uapi/asm/kvm.h |
409 | +++ b/arch/arm64/include/uapi/asm/kvm.h |
410 | @@ -188,8 +188,14 @@ struct kvm_arch_memory_slot { |
411 | #define KVM_ARM_IRQ_CPU_IRQ 0 |
412 | #define KVM_ARM_IRQ_CPU_FIQ 1 |
413 | |
414 | -/* Highest supported SPI, from VGIC_NR_IRQS */ |
415 | +/* |
416 | + * This used to hold the highest supported SPI, but it is now obsolete |
417 | + * and only here to provide source code level compatibility with older |
418 | + * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS. |
419 | + */ |
420 | +#ifndef __KERNEL__ |
421 | #define KVM_ARM_IRQ_GIC_MAX 127 |
422 | +#endif |
423 | |
424 | /* PSCI interface */ |
425 | #define KVM_PSCI_FN_BASE 0x95c1ba5e |
426 | diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c |
427 | index fa62637e63a8..ad6d52392bb1 100644 |
428 | --- a/arch/arm64/kernel/cpu_errata.c |
429 | +++ b/arch/arm64/kernel/cpu_errata.c |
430 | @@ -88,7 +88,16 @@ struct arm64_cpu_capabilities arm64_errata[] = { |
431 | /* Cortex-A57 r0p0 - r1p2 */ |
432 | .desc = "ARM erratum 832075", |
433 | .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, |
434 | - MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12), |
435 | + MIDR_RANGE(MIDR_CORTEX_A57, 0x00, |
436 | + (1 << MIDR_VARIANT_SHIFT) | 2), |
437 | + }, |
438 | +#endif |
439 | +#ifdef CONFIG_ARM64_ERRATUM_845719 |
440 | + { |
441 | + /* Cortex-A53 r0p[01234] */ |
442 | + .desc = "ARM erratum 845719", |
443 | + .capability = ARM64_WORKAROUND_845719, |
444 | + MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), |
445 | }, |
446 | #endif |
447 | { |
448 | diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S |
449 | index cf21bb3bf752..959fe8733560 100644 |
450 | --- a/arch/arm64/kernel/entry.S |
451 | +++ b/arch/arm64/kernel/entry.S |
452 | @@ -21,8 +21,10 @@ |
453 | #include <linux/init.h> |
454 | #include <linux/linkage.h> |
455 | |
456 | +#include <asm/alternative-asm.h> |
457 | #include <asm/assembler.h> |
458 | #include <asm/asm-offsets.h> |
459 | +#include <asm/cpufeature.h> |
460 | #include <asm/errno.h> |
461 | #include <asm/esr.h> |
462 | #include <asm/thread_info.h> |
463 | @@ -120,6 +122,24 @@ |
464 | ct_user_enter |
465 | ldr x23, [sp, #S_SP] // load return stack pointer |
466 | msr sp_el0, x23 |
467 | + |
468 | +#ifdef CONFIG_ARM64_ERRATUM_845719 |
469 | + alternative_insn \ |
470 | + "nop", \ |
471 | + "tbz x22, #4, 1f", \ |
472 | + ARM64_WORKAROUND_845719 |
473 | +#ifdef CONFIG_PID_IN_CONTEXTIDR |
474 | + alternative_insn \ |
475 | + "nop; nop", \ |
476 | + "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \ |
477 | + ARM64_WORKAROUND_845719 |
478 | +#else |
479 | + alternative_insn \ |
480 | + "nop", \ |
481 | + "msr contextidr_el1, xzr; 1:", \ |
482 | + ARM64_WORKAROUND_845719 |
483 | +#endif |
484 | +#endif |
485 | .endif |
486 | msr elr_el1, x21 // set up the return data |
487 | msr spsr_el1, x22 |
488 | diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S |
489 | index 07f930540f4a..c237ffb0effe 100644 |
490 | --- a/arch/arm64/kernel/head.S |
491 | +++ b/arch/arm64/kernel/head.S |
492 | @@ -426,6 +426,7 @@ __create_page_tables: |
493 | */ |
494 | mov x0, x25 |
495 | add x1, x26, #SWAPPER_DIR_SIZE |
496 | + dmb sy |
497 | bl __inval_cache_range |
498 | |
499 | mov lr, x27 |
500 | diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c |
501 | index e8420f635bd4..781f4697dc26 100644 |
502 | --- a/arch/arm64/kernel/setup.c |
503 | +++ b/arch/arm64/kernel/setup.c |
504 | @@ -207,6 +207,18 @@ static void __init smp_build_mpidr_hash(void) |
505 | } |
506 | #endif |
507 | |
508 | +void __init do_post_cpus_up_work(void) |
509 | +{ |
510 | + apply_alternatives_all(); |
511 | +} |
512 | + |
513 | +#ifdef CONFIG_UP_LATE_INIT |
514 | +void __init up_late_init(void) |
515 | +{ |
516 | + do_post_cpus_up_work(); |
517 | +} |
518 | +#endif /* CONFIG_UP_LATE_INIT */ |
519 | + |
520 | static void __init setup_processor(void) |
521 | { |
522 | struct cpu_info *cpu_info; |
523 | diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c |
524 | index 328b8ce4b007..4257369341e4 100644 |
525 | --- a/arch/arm64/kernel/smp.c |
526 | +++ b/arch/arm64/kernel/smp.c |
527 | @@ -309,7 +309,7 @@ void cpu_die(void) |
528 | void __init smp_cpus_done(unsigned int max_cpus) |
529 | { |
530 | pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); |
531 | - apply_alternatives_all(); |
532 | + do_post_cpus_up_work(); |
533 | } |
534 | |
535 | void __init smp_prepare_boot_cpu(void) |
536 | diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c |
537 | index 356ee84cad95..04845aaf5985 100644 |
538 | --- a/arch/c6x/kernel/time.c |
539 | +++ b/arch/c6x/kernel/time.c |
540 | @@ -49,7 +49,7 @@ u64 sched_clock(void) |
541 | return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT; |
542 | } |
543 | |
544 | -void time_init(void) |
545 | +void __init time_init(void) |
546 | { |
547 | u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT; |
548 | |
549 | diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h |
550 | index e41c56e375b1..1e38f0e1ea3e 100644 |
551 | --- a/arch/mips/include/asm/asm-eva.h |
552 | +++ b/arch/mips/include/asm/asm-eva.h |
553 | @@ -11,6 +11,36 @@ |
554 | #define __ASM_ASM_EVA_H |
555 | |
556 | #ifndef __ASSEMBLY__ |
557 | + |
558 | +/* Kernel variants */ |
559 | + |
560 | +#define kernel_cache(op, base) "cache " op ", " base "\n" |
561 | +#define kernel_ll(reg, addr) "ll " reg ", " addr "\n" |
562 | +#define kernel_sc(reg, addr) "sc " reg ", " addr "\n" |
563 | +#define kernel_lw(reg, addr) "lw " reg ", " addr "\n" |
564 | +#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n" |
565 | +#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n" |
566 | +#define kernel_lh(reg, addr) "lh " reg ", " addr "\n" |
567 | +#define kernel_lb(reg, addr) "lb " reg ", " addr "\n" |
568 | +#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n" |
569 | +#define kernel_sw(reg, addr) "sw " reg ", " addr "\n" |
570 | +#define kernel_swl(reg, addr) "swl " reg ", " addr "\n" |
571 | +#define kernel_swr(reg, addr) "swr " reg ", " addr "\n" |
572 | +#define kernel_sh(reg, addr) "sh " reg ", " addr "\n" |
573 | +#define kernel_sb(reg, addr) "sb " reg ", " addr "\n" |
574 | + |
575 | +#ifdef CONFIG_32BIT |
576 | +/* |
577 | + * No 'sd' or 'ld' instructions in 32-bit but the code will |
578 | + * do the correct thing |
579 | + */ |
580 | +#define kernel_sd(reg, addr) user_sw(reg, addr) |
581 | +#define kernel_ld(reg, addr) user_lw(reg, addr) |
582 | +#else |
583 | +#define kernel_sd(reg, addr) "sd " reg", " addr "\n" |
584 | +#define kernel_ld(reg, addr) "ld " reg", " addr "\n" |
585 | +#endif /* CONFIG_32BIT */ |
586 | + |
587 | #ifdef CONFIG_EVA |
588 | |
589 | #define __BUILD_EVA_INSN(insn, reg, addr) \ |
590 | @@ -41,37 +71,60 @@ |
591 | |
592 | #else |
593 | |
594 | -#define user_cache(op, base) "cache " op ", " base "\n" |
595 | -#define user_ll(reg, addr) "ll " reg ", " addr "\n" |
596 | -#define user_sc(reg, addr) "sc " reg ", " addr "\n" |
597 | -#define user_lw(reg, addr) "lw " reg ", " addr "\n" |
598 | -#define user_lwl(reg, addr) "lwl " reg ", " addr "\n" |
599 | -#define user_lwr(reg, addr) "lwr " reg ", " addr "\n" |
600 | -#define user_lh(reg, addr) "lh " reg ", " addr "\n" |
601 | -#define user_lb(reg, addr) "lb " reg ", " addr "\n" |
602 | -#define user_lbu(reg, addr) "lbu " reg ", " addr "\n" |
603 | -#define user_sw(reg, addr) "sw " reg ", " addr "\n" |
604 | -#define user_swl(reg, addr) "swl " reg ", " addr "\n" |
605 | -#define user_swr(reg, addr) "swr " reg ", " addr "\n" |
606 | -#define user_sh(reg, addr) "sh " reg ", " addr "\n" |
607 | -#define user_sb(reg, addr) "sb " reg ", " addr "\n" |
608 | +#define user_cache(op, base) kernel_cache(op, base) |
609 | +#define user_ll(reg, addr) kernel_ll(reg, addr) |
610 | +#define user_sc(reg, addr) kernel_sc(reg, addr) |
611 | +#define user_lw(reg, addr) kernel_lw(reg, addr) |
612 | +#define user_lwl(reg, addr) kernel_lwl(reg, addr) |
613 | +#define user_lwr(reg, addr) kernel_lwr(reg, addr) |
614 | +#define user_lh(reg, addr) kernel_lh(reg, addr) |
615 | +#define user_lb(reg, addr) kernel_lb(reg, addr) |
616 | +#define user_lbu(reg, addr) kernel_lbu(reg, addr) |
617 | +#define user_sw(reg, addr) kernel_sw(reg, addr) |
618 | +#define user_swl(reg, addr) kernel_swl(reg, addr) |
619 | +#define user_swr(reg, addr) kernel_swr(reg, addr) |
620 | +#define user_sh(reg, addr) kernel_sh(reg, addr) |
621 | +#define user_sb(reg, addr) kernel_sb(reg, addr) |
622 | |
623 | #ifdef CONFIG_32BIT |
624 | -/* |
625 | - * No 'sd' or 'ld' instructions in 32-bit but the code will |
626 | - * do the correct thing |
627 | - */ |
628 | -#define user_sd(reg, addr) user_sw(reg, addr) |
629 | -#define user_ld(reg, addr) user_lw(reg, addr) |
630 | +#define user_sd(reg, addr) kernel_sw(reg, addr) |
631 | +#define user_ld(reg, addr) kernel_lw(reg, addr) |
632 | #else |
633 | -#define user_sd(reg, addr) "sd " reg", " addr "\n" |
634 | -#define user_ld(reg, addr) "ld " reg", " addr "\n" |
635 | +#define user_sd(reg, addr) kernel_sd(reg, addr) |
636 | +#define user_ld(reg, addr) kernel_ld(reg, addr) |
637 | #endif /* CONFIG_32BIT */ |
638 | |
639 | #endif /* CONFIG_EVA */ |
640 | |
641 | #else /* __ASSEMBLY__ */ |
642 | |
643 | +#define kernel_cache(op, base) cache op, base |
644 | +#define kernel_ll(reg, addr) ll reg, addr |
645 | +#define kernel_sc(reg, addr) sc reg, addr |
646 | +#define kernel_lw(reg, addr) lw reg, addr |
647 | +#define kernel_lwl(reg, addr) lwl reg, addr |
648 | +#define kernel_lwr(reg, addr) lwr reg, addr |
649 | +#define kernel_lh(reg, addr) lh reg, addr |
650 | +#define kernel_lb(reg, addr) lb reg, addr |
651 | +#define kernel_lbu(reg, addr) lbu reg, addr |
652 | +#define kernel_sw(reg, addr) sw reg, addr |
653 | +#define kernel_swl(reg, addr) swl reg, addr |
654 | +#define kernel_swr(reg, addr) swr reg, addr |
655 | +#define kernel_sh(reg, addr) sh reg, addr |
656 | +#define kernel_sb(reg, addr) sb reg, addr |
657 | + |
658 | +#ifdef CONFIG_32BIT |
659 | +/* |
660 | + * No 'sd' or 'ld' instructions in 32-bit but the code will |
661 | + * do the correct thing |
662 | + */ |
663 | +#define kernel_sd(reg, addr) user_sw(reg, addr) |
664 | +#define kernel_ld(reg, addr) user_lw(reg, addr) |
665 | +#else |
666 | +#define kernel_sd(reg, addr) sd reg, addr |
667 | +#define kernel_ld(reg, addr) ld reg, addr |
668 | +#endif /* CONFIG_32BIT */ |
669 | + |
670 | #ifdef CONFIG_EVA |
671 | |
672 | #define __BUILD_EVA_INSN(insn, reg, addr) \ |
673 | @@ -101,31 +154,27 @@ |
674 | #define user_sd(reg, addr) user_sw(reg, addr) |
675 | #else |
676 | |
677 | -#define user_cache(op, base) cache op, base |
678 | -#define user_ll(reg, addr) ll reg, addr |
679 | -#define user_sc(reg, addr) sc reg, addr |
680 | -#define user_lw(reg, addr) lw reg, addr |
681 | -#define user_lwl(reg, addr) lwl reg, addr |
682 | -#define user_lwr(reg, addr) lwr reg, addr |
683 | -#define user_lh(reg, addr) lh reg, addr |
684 | -#define user_lb(reg, addr) lb reg, addr |
685 | -#define user_lbu(reg, addr) lbu reg, addr |
686 | -#define user_sw(reg, addr) sw reg, addr |
687 | -#define user_swl(reg, addr) swl reg, addr |
688 | -#define user_swr(reg, addr) swr reg, addr |
689 | -#define user_sh(reg, addr) sh reg, addr |
690 | -#define user_sb(reg, addr) sb reg, addr |
691 | +#define user_cache(op, base) kernel_cache(op, base) |
692 | +#define user_ll(reg, addr) kernel_ll(reg, addr) |
693 | +#define user_sc(reg, addr) kernel_sc(reg, addr) |
694 | +#define user_lw(reg, addr) kernel_lw(reg, addr) |
695 | +#define user_lwl(reg, addr) kernel_lwl(reg, addr) |
696 | +#define user_lwr(reg, addr) kernel_lwr(reg, addr) |
697 | +#define user_lh(reg, addr) kernel_lh(reg, addr) |
698 | +#define user_lb(reg, addr) kernel_lb(reg, addr) |
699 | +#define user_lbu(reg, addr) kernel_lbu(reg, addr) |
700 | +#define user_sw(reg, addr) kernel_sw(reg, addr) |
701 | +#define user_swl(reg, addr) kernel_swl(reg, addr) |
702 | +#define user_swr(reg, addr) kernel_swr(reg, addr) |
703 | +#define user_sh(reg, addr) kernel_sh(reg, addr) |
704 | +#define user_sb(reg, addr) kernel_sb(reg, addr) |
705 | |
706 | #ifdef CONFIG_32BIT |
707 | -/* |
708 | - * No 'sd' or 'ld' instructions in 32-bit but the code will |
709 | - * do the correct thing |
710 | - */ |
711 | -#define user_sd(reg, addr) user_sw(reg, addr) |
712 | -#define user_ld(reg, addr) user_lw(reg, addr) |
713 | +#define user_sd(reg, addr) kernel_sw(reg, addr) |
714 | +#define user_ld(reg, addr) kernel_lw(reg, addr) |
715 | #else |
716 | -#define user_sd(reg, addr) sd reg, addr |
717 | -#define user_ld(reg, addr) ld reg, addr |
718 | +#define user_sd(reg, addr) kernel_sd(reg, addr) |
719 | +#define user_ld(reg, addr) kernel_sd(reg, addr) |
720 | #endif /* CONFIG_32BIT */ |
721 | |
722 | #endif /* CONFIG_EVA */ |
723 | diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h |
724 | index dd083e999b08..9f26b079cc6a 100644 |
725 | --- a/arch/mips/include/asm/fpu.h |
726 | +++ b/arch/mips/include/asm/fpu.h |
727 | @@ -170,6 +170,7 @@ static inline void lose_fpu(int save) |
728 | } |
729 | disable_msa(); |
730 | clear_thread_flag(TIF_USEDMSA); |
731 | + __disable_fpu(); |
732 | } else if (is_fpu_owner()) { |
733 | if (save) |
734 | _save_fp(current); |
735 | diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h |
736 | index ac4fc716062b..f722b0528c25 100644 |
737 | --- a/arch/mips/include/asm/kvm_host.h |
738 | +++ b/arch/mips/include/asm/kvm_host.h |
739 | @@ -322,6 +322,7 @@ enum mips_mmu_types { |
740 | #define T_TRAP 13 /* Trap instruction */ |
741 | #define T_VCEI 14 /* Virtual coherency exception */ |
742 | #define T_FPE 15 /* Floating point exception */ |
743 | +#define T_MSADIS 21 /* MSA disabled exception */ |
744 | #define T_WATCH 23 /* Watch address reference */ |
745 | #define T_VCED 31 /* Virtual coherency data */ |
746 | |
747 | @@ -578,6 +579,7 @@ struct kvm_mips_callbacks { |
748 | int (*handle_syscall)(struct kvm_vcpu *vcpu); |
749 | int (*handle_res_inst)(struct kvm_vcpu *vcpu); |
750 | int (*handle_break)(struct kvm_vcpu *vcpu); |
751 | + int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); |
752 | int (*vm_init)(struct kvm *kvm); |
753 | int (*vcpu_init)(struct kvm_vcpu *vcpu); |
754 | int (*vcpu_setup)(struct kvm_vcpu *vcpu); |
755 | diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c |
756 | index bbb69695a0a1..7659da224fcd 100644 |
757 | --- a/arch/mips/kernel/unaligned.c |
758 | +++ b/arch/mips/kernel/unaligned.c |
759 | @@ -109,10 +109,11 @@ static u32 unaligned_action; |
760 | extern void show_registers(struct pt_regs *regs); |
761 | |
762 | #ifdef __BIG_ENDIAN |
763 | -#define LoadHW(addr, value, res) \ |
764 | +#define _LoadHW(addr, value, res, type) \ |
765 | +do { \ |
766 | __asm__ __volatile__ (".set\tnoat\n" \ |
767 | - "1:\t"user_lb("%0", "0(%2)")"\n" \ |
768 | - "2:\t"user_lbu("$1", "1(%2)")"\n\t" \ |
769 | + "1:\t"type##_lb("%0", "0(%2)")"\n" \ |
770 | + "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ |
771 | "sll\t%0, 0x8\n\t" \ |
772 | "or\t%0, $1\n\t" \ |
773 | "li\t%1, 0\n" \ |
774 | @@ -127,13 +128,15 @@ extern void show_registers(struct pt_regs *regs); |
775 | STR(PTR)"\t2b, 4b\n\t" \ |
776 | ".previous" \ |
777 | : "=&r" (value), "=r" (res) \ |
778 | - : "r" (addr), "i" (-EFAULT)); |
779 | + : "r" (addr), "i" (-EFAULT)); \ |
780 | +} while(0) |
781 | |
782 | #ifndef CONFIG_CPU_MIPSR6 |
783 | -#define LoadW(addr, value, res) \ |
784 | +#define _LoadW(addr, value, res, type) \ |
785 | +do { \ |
786 | __asm__ __volatile__ ( \ |
787 | - "1:\t"user_lwl("%0", "(%2)")"\n" \ |
788 | - "2:\t"user_lwr("%0", "3(%2)")"\n\t" \ |
789 | + "1:\t"type##_lwl("%0", "(%2)")"\n" \ |
790 | + "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ |
791 | "li\t%1, 0\n" \ |
792 | "3:\n\t" \ |
793 | ".insn\n\t" \ |
794 | @@ -146,21 +149,24 @@ extern void show_registers(struct pt_regs *regs); |
795 | STR(PTR)"\t2b, 4b\n\t" \ |
796 | ".previous" \ |
797 | : "=&r" (value), "=r" (res) \ |
798 | - : "r" (addr), "i" (-EFAULT)); |
799 | + : "r" (addr), "i" (-EFAULT)); \ |
800 | +} while(0) |
801 | + |
802 | #else |
803 | /* MIPSR6 has no lwl instruction */ |
804 | -#define LoadW(addr, value, res) \ |
805 | +#define _LoadW(addr, value, res, type) \ |
806 | +do { \ |
807 | __asm__ __volatile__ ( \ |
808 | ".set\tpush\n" \ |
809 | ".set\tnoat\n\t" \ |
810 | - "1:"user_lb("%0", "0(%2)")"\n\t" \ |
811 | - "2:"user_lbu("$1", "1(%2)")"\n\t" \ |
812 | + "1:"type##_lb("%0", "0(%2)")"\n\t" \ |
813 | + "2:"type##_lbu("$1", "1(%2)")"\n\t" \ |
814 | "sll\t%0, 0x8\n\t" \ |
815 | "or\t%0, $1\n\t" \ |
816 | - "3:"user_lbu("$1", "2(%2)")"\n\t" \ |
817 | + "3:"type##_lbu("$1", "2(%2)")"\n\t" \ |
818 | "sll\t%0, 0x8\n\t" \ |
819 | "or\t%0, $1\n\t" \ |
820 | - "4:"user_lbu("$1", "3(%2)")"\n\t" \ |
821 | + "4:"type##_lbu("$1", "3(%2)")"\n\t" \ |
822 | "sll\t%0, 0x8\n\t" \ |
823 | "or\t%0, $1\n\t" \ |
824 | "li\t%1, 0\n" \ |
825 | @@ -178,14 +184,17 @@ extern void show_registers(struct pt_regs *regs); |
826 | STR(PTR)"\t4b, 11b\n\t" \ |
827 | ".previous" \ |
828 | : "=&r" (value), "=r" (res) \ |
829 | - : "r" (addr), "i" (-EFAULT)); |
830 | + : "r" (addr), "i" (-EFAULT)); \ |
831 | +} while(0) |
832 | + |
833 | #endif /* CONFIG_CPU_MIPSR6 */ |
834 | |
835 | -#define LoadHWU(addr, value, res) \ |
836 | +#define _LoadHWU(addr, value, res, type) \ |
837 | +do { \ |
838 | __asm__ __volatile__ ( \ |
839 | ".set\tnoat\n" \ |
840 | - "1:\t"user_lbu("%0", "0(%2)")"\n" \ |
841 | - "2:\t"user_lbu("$1", "1(%2)")"\n\t" \ |
842 | + "1:\t"type##_lbu("%0", "0(%2)")"\n" \ |
843 | + "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ |
844 | "sll\t%0, 0x8\n\t" \ |
845 | "or\t%0, $1\n\t" \ |
846 | "li\t%1, 0\n" \ |
847 | @@ -201,13 +210,15 @@ extern void show_registers(struct pt_regs *regs); |
848 | STR(PTR)"\t2b, 4b\n\t" \ |
849 | ".previous" \ |
850 | : "=&r" (value), "=r" (res) \ |
851 | - : "r" (addr), "i" (-EFAULT)); |
852 | + : "r" (addr), "i" (-EFAULT)); \ |
853 | +} while(0) |
854 | |
855 | #ifndef CONFIG_CPU_MIPSR6 |
856 | -#define LoadWU(addr, value, res) \ |
857 | +#define _LoadWU(addr, value, res, type) \ |
858 | +do { \ |
859 | __asm__ __volatile__ ( \ |
860 | - "1:\t"user_lwl("%0", "(%2)")"\n" \ |
861 | - "2:\t"user_lwr("%0", "3(%2)")"\n\t" \ |
862 | + "1:\t"type##_lwl("%0", "(%2)")"\n" \ |
863 | + "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ |
864 | "dsll\t%0, %0, 32\n\t" \ |
865 | "dsrl\t%0, %0, 32\n\t" \ |
866 | "li\t%1, 0\n" \ |
867 | @@ -222,9 +233,11 @@ extern void show_registers(struct pt_regs *regs); |
868 | STR(PTR)"\t2b, 4b\n\t" \ |
869 | ".previous" \ |
870 | : "=&r" (value), "=r" (res) \ |
871 | - : "r" (addr), "i" (-EFAULT)); |
872 | + : "r" (addr), "i" (-EFAULT)); \ |
873 | +} while(0) |
874 | |
875 | -#define LoadDW(addr, value, res) \ |
876 | +#define _LoadDW(addr, value, res) \ |
877 | +do { \ |
878 | __asm__ __volatile__ ( \ |
879 | "1:\tldl\t%0, (%2)\n" \ |
880 | "2:\tldr\t%0, 7(%2)\n\t" \ |
881 | @@ -240,21 +253,24 @@ extern void show_registers(struct pt_regs *regs); |
882 | STR(PTR)"\t2b, 4b\n\t" \ |
883 | ".previous" \ |
884 | : "=&r" (value), "=r" (res) \ |
885 | - : "r" (addr), "i" (-EFAULT)); |
886 | + : "r" (addr), "i" (-EFAULT)); \ |
887 | +} while(0) |
888 | + |
889 | #else |
890 | /* MIPSR6 has not lwl and ldl instructions */ |
891 | -#define LoadWU(addr, value, res) \ |
892 | +#define _LoadWU(addr, value, res, type) \ |
893 | +do { \ |
894 | __asm__ __volatile__ ( \ |
895 | ".set\tpush\n\t" \ |
896 | ".set\tnoat\n\t" \ |
897 | - "1:"user_lbu("%0", "0(%2)")"\n\t" \ |
898 | - "2:"user_lbu("$1", "1(%2)")"\n\t" \ |
899 | + "1:"type##_lbu("%0", "0(%2)")"\n\t" \ |
900 | + "2:"type##_lbu("$1", "1(%2)")"\n\t" \ |
901 | "sll\t%0, 0x8\n\t" \ |
902 | "or\t%0, $1\n\t" \ |
903 | - "3:"user_lbu("$1", "2(%2)")"\n\t" \ |
904 | + "3:"type##_lbu("$1", "2(%2)")"\n\t" \ |
905 | "sll\t%0, 0x8\n\t" \ |
906 | "or\t%0, $1\n\t" \ |
907 | - "4:"user_lbu("$1", "3(%2)")"\n\t" \ |
908 | + "4:"type##_lbu("$1", "3(%2)")"\n\t" \ |
909 | "sll\t%0, 0x8\n\t" \ |
910 | "or\t%0, $1\n\t" \ |
911 | "li\t%1, 0\n" \ |
912 | @@ -272,9 +288,11 @@ extern void show_registers(struct pt_regs *regs); |
913 | STR(PTR)"\t4b, 11b\n\t" \ |
914 | ".previous" \ |
915 | : "=&r" (value), "=r" (res) \ |
916 | - : "r" (addr), "i" (-EFAULT)); |
917 | + : "r" (addr), "i" (-EFAULT)); \ |
918 | +} while(0) |
919 | |
920 | -#define LoadDW(addr, value, res) \ |
921 | +#define _LoadDW(addr, value, res) \ |
922 | +do { \ |
923 | __asm__ __volatile__ ( \ |
924 | ".set\tpush\n\t" \ |
925 | ".set\tnoat\n\t" \ |
926 | @@ -319,16 +337,19 @@ extern void show_registers(struct pt_regs *regs); |
927 | STR(PTR)"\t8b, 11b\n\t" \ |
928 | ".previous" \ |
929 | : "=&r" (value), "=r" (res) \ |
930 | - : "r" (addr), "i" (-EFAULT)); |
931 | + : "r" (addr), "i" (-EFAULT)); \ |
932 | +} while(0) |
933 | + |
934 | #endif /* CONFIG_CPU_MIPSR6 */ |
935 | |
936 | |
937 | -#define StoreHW(addr, value, res) \ |
938 | +#define _StoreHW(addr, value, res, type) \ |
939 | +do { \ |
940 | __asm__ __volatile__ ( \ |
941 | ".set\tnoat\n" \ |
942 | - "1:\t"user_sb("%1", "1(%2)")"\n" \ |
943 | + "1:\t"type##_sb("%1", "1(%2)")"\n" \ |
944 | "srl\t$1, %1, 0x8\n" \ |
945 | - "2:\t"user_sb("$1", "0(%2)")"\n" \ |
946 | + "2:\t"type##_sb("$1", "0(%2)")"\n" \ |
947 | ".set\tat\n\t" \ |
948 | "li\t%0, 0\n" \ |
949 | "3:\n\t" \ |
950 | @@ -342,13 +363,15 @@ extern void show_registers(struct pt_regs *regs); |
951 | STR(PTR)"\t2b, 4b\n\t" \ |
952 | ".previous" \ |
953 | : "=r" (res) \ |
954 | - : "r" (value), "r" (addr), "i" (-EFAULT)); |
955 | + : "r" (value), "r" (addr), "i" (-EFAULT));\ |
956 | +} while(0) |
957 | |
958 | #ifndef CONFIG_CPU_MIPSR6 |
959 | -#define StoreW(addr, value, res) \ |
960 | +#define _StoreW(addr, value, res, type) \ |
961 | +do { \ |
962 | __asm__ __volatile__ ( \ |
963 | - "1:\t"user_swl("%1", "(%2)")"\n" \ |
964 | - "2:\t"user_swr("%1", "3(%2)")"\n\t" \ |
965 | + "1:\t"type##_swl("%1", "(%2)")"\n" \ |
966 | + "2:\t"type##_swr("%1", "3(%2)")"\n\t"\ |
967 | "li\t%0, 0\n" \ |
968 | "3:\n\t" \ |
969 | ".insn\n\t" \ |
970 | @@ -361,9 +384,11 @@ extern void show_registers(struct pt_regs *regs); |
971 | STR(PTR)"\t2b, 4b\n\t" \ |
972 | ".previous" \ |
973 | : "=r" (res) \ |
974 | - : "r" (value), "r" (addr), "i" (-EFAULT)); |
975 | + : "r" (value), "r" (addr), "i" (-EFAULT)); \ |
976 | +} while(0) |
977 | |
978 | -#define StoreDW(addr, value, res) \ |
979 | +#define _StoreDW(addr, value, res) \ |
980 | +do { \ |
981 | __asm__ __volatile__ ( \ |
982 | "1:\tsdl\t%1,(%2)\n" \ |
983 | "2:\tsdr\t%1, 7(%2)\n\t" \ |
984 | @@ -379,20 +404,23 @@ extern void show_registers(struct pt_regs *regs); |
985 | STR(PTR)"\t2b, 4b\n\t" \ |
986 | ".previous" \ |
987 | : "=r" (res) \ |
988 | - : "r" (value), "r" (addr), "i" (-EFAULT)); |
989 | + : "r" (value), "r" (addr), "i" (-EFAULT)); \ |
990 | +} while(0) |
991 | + |
992 | #else |
993 | /* MIPSR6 has no swl and sdl instructions */ |
994 | -#define StoreW(addr, value, res) \ |
995 | +#define _StoreW(addr, value, res, type) \ |
996 | +do { \ |
997 | __asm__ __volatile__ ( \ |
998 | ".set\tpush\n\t" \ |
999 | ".set\tnoat\n\t" \ |
1000 | - "1:"user_sb("%1", "3(%2)")"\n\t" \ |
1001 | + "1:"type##_sb("%1", "3(%2)")"\n\t" \ |
1002 | "srl\t$1, %1, 0x8\n\t" \ |
1003 | - "2:"user_sb("$1", "2(%2)")"\n\t" \ |
1004 | + "2:"type##_sb("$1", "2(%2)")"\n\t" \ |
1005 | "srl\t$1, $1, 0x8\n\t" \ |
1006 | - "3:"user_sb("$1", "1(%2)")"\n\t" \ |
1007 | + "3:"type##_sb("$1", "1(%2)")"\n\t" \ |
1008 | "srl\t$1, $1, 0x8\n\t" \ |
1009 | - "4:"user_sb("$1", "0(%2)")"\n\t" \ |
1010 | + "4:"type##_sb("$1", "0(%2)")"\n\t" \ |
1011 | ".set\tpop\n\t" \ |
1012 | "li\t%0, 0\n" \ |
1013 | "10:\n\t" \ |
1014 | @@ -409,9 +437,11 @@ extern void show_registers(struct pt_regs *regs); |
1015 | ".previous" \ |
1016 | : "=&r" (res) \ |
1017 | : "r" (value), "r" (addr), "i" (-EFAULT) \ |
1018 | - : "memory"); |
1019 | + : "memory"); \ |
1020 | +} while(0) |
1021 | |
1022 | #define StoreDW(addr, value, res) \ |
1023 | +do { \ |
1024 | __asm__ __volatile__ ( \ |
1025 | ".set\tpush\n\t" \ |
1026 | ".set\tnoat\n\t" \ |
1027 | @@ -451,15 +481,18 @@ extern void show_registers(struct pt_regs *regs); |
1028 | ".previous" \ |
1029 | : "=&r" (res) \ |
1030 | : "r" (value), "r" (addr), "i" (-EFAULT) \ |
1031 | - : "memory"); |
1032 | + : "memory"); \ |
1033 | +} while(0) |
1034 | + |
1035 | #endif /* CONFIG_CPU_MIPSR6 */ |
1036 | |
1037 | #else /* __BIG_ENDIAN */ |
1038 | |
1039 | -#define LoadHW(addr, value, res) \ |
1040 | +#define _LoadHW(addr, value, res, type) \ |
1041 | +do { \ |
1042 | __asm__ __volatile__ (".set\tnoat\n" \ |
1043 | - "1:\t"user_lb("%0", "1(%2)")"\n" \ |
1044 | - "2:\t"user_lbu("$1", "0(%2)")"\n\t" \ |
1045 | + "1:\t"type##_lb("%0", "1(%2)")"\n" \ |
1046 | + "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ |
1047 | "sll\t%0, 0x8\n\t" \ |
1048 | "or\t%0, $1\n\t" \ |
1049 | "li\t%1, 0\n" \ |
1050 | @@ -474,13 +507,15 @@ extern void show_registers(struct pt_regs *regs); |
1051 | STR(PTR)"\t2b, 4b\n\t" \ |
1052 | ".previous" \ |
1053 | : "=&r" (value), "=r" (res) \ |
1054 | - : "r" (addr), "i" (-EFAULT)); |
1055 | + : "r" (addr), "i" (-EFAULT)); \ |
1056 | +} while(0) |
1057 | |
1058 | #ifndef CONFIG_CPU_MIPSR6 |
1059 | -#define LoadW(addr, value, res) \ |
1060 | +#define _LoadW(addr, value, res, type) \ |
1061 | +do { \ |
1062 | __asm__ __volatile__ ( \ |
1063 | - "1:\t"user_lwl("%0", "3(%2)")"\n" \ |
1064 | - "2:\t"user_lwr("%0", "(%2)")"\n\t" \ |
1065 | + "1:\t"type##_lwl("%0", "3(%2)")"\n" \ |
1066 | + "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ |
1067 | "li\t%1, 0\n" \ |
1068 | "3:\n\t" \ |
1069 | ".insn\n\t" \ |
1070 | @@ -493,21 +528,24 @@ extern void show_registers(struct pt_regs *regs); |
1071 | STR(PTR)"\t2b, 4b\n\t" \ |
1072 | ".previous" \ |
1073 | : "=&r" (value), "=r" (res) \ |
1074 | - : "r" (addr), "i" (-EFAULT)); |
1075 | + : "r" (addr), "i" (-EFAULT)); \ |
1076 | +} while(0) |
1077 | + |
1078 | #else |
1079 | /* MIPSR6 has no lwl instruction */ |
1080 | -#define LoadW(addr, value, res) \ |
1081 | +#define _LoadW(addr, value, res, type) \ |
1082 | +do { \ |
1083 | __asm__ __volatile__ ( \ |
1084 | ".set\tpush\n" \ |
1085 | ".set\tnoat\n\t" \ |
1086 | - "1:"user_lb("%0", "3(%2)")"\n\t" \ |
1087 | - "2:"user_lbu("$1", "2(%2)")"\n\t" \ |
1088 | + "1:"type##_lb("%0", "3(%2)")"\n\t" \ |
1089 | + "2:"type##_lbu("$1", "2(%2)")"\n\t" \ |
1090 | "sll\t%0, 0x8\n\t" \ |
1091 | "or\t%0, $1\n\t" \ |
1092 | - "3:"user_lbu("$1", "1(%2)")"\n\t" \ |
1093 | + "3:"type##_lbu("$1", "1(%2)")"\n\t" \ |
1094 | "sll\t%0, 0x8\n\t" \ |
1095 | "or\t%0, $1\n\t" \ |
1096 | - "4:"user_lbu("$1", "0(%2)")"\n\t" \ |
1097 | + "4:"type##_lbu("$1", "0(%2)")"\n\t" \ |
1098 | "sll\t%0, 0x8\n\t" \ |
1099 | "or\t%0, $1\n\t" \ |
1100 | "li\t%1, 0\n" \ |
1101 | @@ -525,15 +563,18 @@ extern void show_registers(struct pt_regs *regs); |
1102 | STR(PTR)"\t4b, 11b\n\t" \ |
1103 | ".previous" \ |
1104 | : "=&r" (value), "=r" (res) \ |
1105 | - : "r" (addr), "i" (-EFAULT)); |
1106 | + : "r" (addr), "i" (-EFAULT)); \ |
1107 | +} while(0) |
1108 | + |
1109 | #endif /* CONFIG_CPU_MIPSR6 */ |
1110 | |
1111 | |
1112 | -#define LoadHWU(addr, value, res) \ |
1113 | +#define _LoadHWU(addr, value, res, type) \ |
1114 | +do { \ |
1115 | __asm__ __volatile__ ( \ |
1116 | ".set\tnoat\n" \ |
1117 | - "1:\t"user_lbu("%0", "1(%2)")"\n" \ |
1118 | - "2:\t"user_lbu("$1", "0(%2)")"\n\t" \ |
1119 | + "1:\t"type##_lbu("%0", "1(%2)")"\n" \ |
1120 | + "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ |
1121 | "sll\t%0, 0x8\n\t" \ |
1122 | "or\t%0, $1\n\t" \ |
1123 | "li\t%1, 0\n" \ |
1124 | @@ -549,13 +590,15 @@ extern void show_registers(struct pt_regs *regs); |
1125 | STR(PTR)"\t2b, 4b\n\t" \ |
1126 | ".previous" \ |
1127 | : "=&r" (value), "=r" (res) \ |
1128 | - : "r" (addr), "i" (-EFAULT)); |
1129 | + : "r" (addr), "i" (-EFAULT)); \ |
1130 | +} while(0) |
1131 | |
1132 | #ifndef CONFIG_CPU_MIPSR6 |
1133 | -#define LoadWU(addr, value, res) \ |
1134 | +#define _LoadWU(addr, value, res, type) \ |
1135 | +do { \ |
1136 | __asm__ __volatile__ ( \ |
1137 | - "1:\t"user_lwl("%0", "3(%2)")"\n" \ |
1138 | - "2:\t"user_lwr("%0", "(%2)")"\n\t" \ |
1139 | + "1:\t"type##_lwl("%0", "3(%2)")"\n" \ |
1140 | + "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ |
1141 | "dsll\t%0, %0, 32\n\t" \ |
1142 | "dsrl\t%0, %0, 32\n\t" \ |
1143 | "li\t%1, 0\n" \ |
1144 | @@ -570,9 +613,11 @@ extern void show_registers(struct pt_regs *regs); |
1145 | STR(PTR)"\t2b, 4b\n\t" \ |
1146 | ".previous" \ |
1147 | : "=&r" (value), "=r" (res) \ |
1148 | - : "r" (addr), "i" (-EFAULT)); |
1149 | + : "r" (addr), "i" (-EFAULT)); \ |
1150 | +} while(0) |
1151 | |
1152 | -#define LoadDW(addr, value, res) \ |
1153 | +#define _LoadDW(addr, value, res) \ |
1154 | +do { \ |
1155 | __asm__ __volatile__ ( \ |
1156 | "1:\tldl\t%0, 7(%2)\n" \ |
1157 | "2:\tldr\t%0, (%2)\n\t" \ |
1158 | @@ -588,21 +633,24 @@ extern void show_registers(struct pt_regs *regs); |
1159 | STR(PTR)"\t2b, 4b\n\t" \ |
1160 | ".previous" \ |
1161 | : "=&r" (value), "=r" (res) \ |
1162 | - : "r" (addr), "i" (-EFAULT)); |
1163 | + : "r" (addr), "i" (-EFAULT)); \ |
1164 | +} while(0) |
1165 | + |
1166 | #else |
1167 | /* MIPSR6 has not lwl and ldl instructions */ |
1168 | -#define LoadWU(addr, value, res) \ |
1169 | +#define _LoadWU(addr, value, res, type) \ |
1170 | +do { \ |
1171 | __asm__ __volatile__ ( \ |
1172 | ".set\tpush\n\t" \ |
1173 | ".set\tnoat\n\t" \ |
1174 | - "1:"user_lbu("%0", "3(%2)")"\n\t" \ |
1175 | - "2:"user_lbu("$1", "2(%2)")"\n\t" \ |
1176 | + "1:"type##_lbu("%0", "3(%2)")"\n\t" \ |
1177 | + "2:"type##_lbu("$1", "2(%2)")"\n\t" \ |
1178 | "sll\t%0, 0x8\n\t" \ |
1179 | "or\t%0, $1\n\t" \ |
1180 | - "3:"user_lbu("$1", "1(%2)")"\n\t" \ |
1181 | + "3:"type##_lbu("$1", "1(%2)")"\n\t" \ |
1182 | "sll\t%0, 0x8\n\t" \ |
1183 | "or\t%0, $1\n\t" \ |
1184 | - "4:"user_lbu("$1", "0(%2)")"\n\t" \ |
1185 | + "4:"type##_lbu("$1", "0(%2)")"\n\t" \ |
1186 | "sll\t%0, 0x8\n\t" \ |
1187 | "or\t%0, $1\n\t" \ |
1188 | "li\t%1, 0\n" \ |
1189 | @@ -620,9 +668,11 @@ extern void show_registers(struct pt_regs *regs); |
1190 | STR(PTR)"\t4b, 11b\n\t" \ |
1191 | ".previous" \ |
1192 | : "=&r" (value), "=r" (res) \ |
1193 | - : "r" (addr), "i" (-EFAULT)); |
1194 | + : "r" (addr), "i" (-EFAULT)); \ |
1195 | +} while(0) |
1196 | |
1197 | -#define LoadDW(addr, value, res) \ |
1198 | +#define _LoadDW(addr, value, res) \ |
1199 | +do { \ |
1200 | __asm__ __volatile__ ( \ |
1201 | ".set\tpush\n\t" \ |
1202 | ".set\tnoat\n\t" \ |
1203 | @@ -667,15 +717,17 @@ extern void show_registers(struct pt_regs *regs); |
1204 | STR(PTR)"\t8b, 11b\n\t" \ |
1205 | ".previous" \ |
1206 | : "=&r" (value), "=r" (res) \ |
1207 | - : "r" (addr), "i" (-EFAULT)); |
1208 | + : "r" (addr), "i" (-EFAULT)); \ |
1209 | +} while(0) |
1210 | #endif /* CONFIG_CPU_MIPSR6 */ |
1211 | |
1212 | -#define StoreHW(addr, value, res) \ |
1213 | +#define _StoreHW(addr, value, res, type) \ |
1214 | +do { \ |
1215 | __asm__ __volatile__ ( \ |
1216 | ".set\tnoat\n" \ |
1217 | - "1:\t"user_sb("%1", "0(%2)")"\n" \ |
1218 | + "1:\t"type##_sb("%1", "0(%2)")"\n" \ |
1219 | "srl\t$1,%1, 0x8\n" \ |
1220 | - "2:\t"user_sb("$1", "1(%2)")"\n" \ |
1221 | + "2:\t"type##_sb("$1", "1(%2)")"\n" \ |
1222 | ".set\tat\n\t" \ |
1223 | "li\t%0, 0\n" \ |
1224 | "3:\n\t" \ |
1225 | @@ -689,12 +741,15 @@ extern void show_registers(struct pt_regs *regs); |
1226 | STR(PTR)"\t2b, 4b\n\t" \ |
1227 | ".previous" \ |
1228 | : "=r" (res) \ |
1229 | - : "r" (value), "r" (addr), "i" (-EFAULT)); |
1230 | + : "r" (value), "r" (addr), "i" (-EFAULT));\ |
1231 | +} while(0) |
1232 | + |
1233 | #ifndef CONFIG_CPU_MIPSR6 |
1234 | -#define StoreW(addr, value, res) \ |
1235 | +#define _StoreW(addr, value, res, type) \ |
1236 | +do { \ |
1237 | __asm__ __volatile__ ( \ |
1238 | - "1:\t"user_swl("%1", "3(%2)")"\n" \ |
1239 | - "2:\t"user_swr("%1", "(%2)")"\n\t" \ |
1240 | + "1:\t"type##_swl("%1", "3(%2)")"\n" \ |
1241 | + "2:\t"type##_swr("%1", "(%2)")"\n\t"\ |
1242 | "li\t%0, 0\n" \ |
1243 | "3:\n\t" \ |
1244 | ".insn\n\t" \ |
1245 | @@ -707,9 +762,11 @@ extern void show_registers(struct pt_regs *regs); |
1246 | STR(PTR)"\t2b, 4b\n\t" \ |
1247 | ".previous" \ |
1248 | : "=r" (res) \ |
1249 | - : "r" (value), "r" (addr), "i" (-EFAULT)); |
1250 | + : "r" (value), "r" (addr), "i" (-EFAULT)); \ |
1251 | +} while(0) |
1252 | |
1253 | -#define StoreDW(addr, value, res) \ |
1254 | +#define _StoreDW(addr, value, res) \ |
1255 | +do { \ |
1256 | __asm__ __volatile__ ( \ |
1257 | "1:\tsdl\t%1, 7(%2)\n" \ |
1258 | "2:\tsdr\t%1, (%2)\n\t" \ |
1259 | @@ -725,20 +782,23 @@ extern void show_registers(struct pt_regs *regs); |
1260 | STR(PTR)"\t2b, 4b\n\t" \ |
1261 | ".previous" \ |
1262 | : "=r" (res) \ |
1263 | - : "r" (value), "r" (addr), "i" (-EFAULT)); |
1264 | + : "r" (value), "r" (addr), "i" (-EFAULT)); \ |
1265 | +} while(0) |
1266 | + |
1267 | #else |
1268 | /* MIPSR6 has no swl and sdl instructions */ |
1269 | -#define StoreW(addr, value, res) \ |
1270 | +#define _StoreW(addr, value, res, type) \ |
1271 | +do { \ |
1272 | __asm__ __volatile__ ( \ |
1273 | ".set\tpush\n\t" \ |
1274 | ".set\tnoat\n\t" \ |
1275 | - "1:"user_sb("%1", "0(%2)")"\n\t" \ |
1276 | + "1:"type##_sb("%1", "0(%2)")"\n\t" \ |
1277 | "srl\t$1, %1, 0x8\n\t" \ |
1278 | - "2:"user_sb("$1", "1(%2)")"\n\t" \ |
1279 | + "2:"type##_sb("$1", "1(%2)")"\n\t" \ |
1280 | "srl\t$1, $1, 0x8\n\t" \ |
1281 | - "3:"user_sb("$1", "2(%2)")"\n\t" \ |
1282 | + "3:"type##_sb("$1", "2(%2)")"\n\t" \ |
1283 | "srl\t$1, $1, 0x8\n\t" \ |
1284 | - "4:"user_sb("$1", "3(%2)")"\n\t" \ |
1285 | + "4:"type##_sb("$1", "3(%2)")"\n\t" \ |
1286 | ".set\tpop\n\t" \ |
1287 | "li\t%0, 0\n" \ |
1288 | "10:\n\t" \ |
1289 | @@ -755,9 +815,11 @@ extern void show_registers(struct pt_regs *regs); |
1290 | ".previous" \ |
1291 | : "=&r" (res) \ |
1292 | : "r" (value), "r" (addr), "i" (-EFAULT) \ |
1293 | - : "memory"); |
1294 | + : "memory"); \ |
1295 | +} while(0) |
1296 | |
1297 | -#define StoreDW(addr, value, res) \ |
1298 | +#define _StoreDW(addr, value, res) \ |
1299 | +do { \ |
1300 | __asm__ __volatile__ ( \ |
1301 | ".set\tpush\n\t" \ |
1302 | ".set\tnoat\n\t" \ |
1303 | @@ -797,10 +859,28 @@ extern void show_registers(struct pt_regs *regs); |
1304 | ".previous" \ |
1305 | : "=&r" (res) \ |
1306 | : "r" (value), "r" (addr), "i" (-EFAULT) \ |
1307 | - : "memory"); |
1308 | + : "memory"); \ |
1309 | +} while(0) |
1310 | + |
1311 | #endif /* CONFIG_CPU_MIPSR6 */ |
1312 | #endif |
1313 | |
1314 | +#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel) |
1315 | +#define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user) |
1316 | +#define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel) |
1317 | +#define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user) |
1318 | +#define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel) |
1319 | +#define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user) |
1320 | +#define LoadW(addr, value, res) _LoadW(addr, value, res, kernel) |
1321 | +#define LoadWE(addr, value, res) _LoadW(addr, value, res, user) |
1322 | +#define LoadDW(addr, value, res) _LoadDW(addr, value, res) |
1323 | + |
1324 | +#define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel) |
1325 | +#define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user) |
1326 | +#define StoreW(addr, value, res) _StoreW(addr, value, res, kernel) |
1327 | +#define StoreWE(addr, value, res) _StoreW(addr, value, res, user) |
1328 | +#define StoreDW(addr, value, res) _StoreDW(addr, value, res) |
1329 | + |
1330 | static void emulate_load_store_insn(struct pt_regs *regs, |
1331 | void __user *addr, unsigned int __user *pc) |
1332 | { |
1333 | @@ -872,7 +952,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, |
1334 | set_fs(seg); |
1335 | goto sigbus; |
1336 | } |
1337 | - LoadHW(addr, value, res); |
1338 | + LoadHWE(addr, value, res); |
1339 | if (res) { |
1340 | set_fs(seg); |
1341 | goto fault; |
1342 | @@ -885,7 +965,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, |
1343 | set_fs(seg); |
1344 | goto sigbus; |
1345 | } |
1346 | - LoadW(addr, value, res); |
1347 | + LoadWE(addr, value, res); |
1348 | if (res) { |
1349 | set_fs(seg); |
1350 | goto fault; |
1351 | @@ -898,7 +978,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, |
1352 | set_fs(seg); |
1353 | goto sigbus; |
1354 | } |
1355 | - LoadHWU(addr, value, res); |
1356 | + LoadHWUE(addr, value, res); |
1357 | if (res) { |
1358 | set_fs(seg); |
1359 | goto fault; |
1360 | @@ -913,7 +993,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, |
1361 | } |
1362 | compute_return_epc(regs); |
1363 | value = regs->regs[insn.spec3_format.rt]; |
1364 | - StoreHW(addr, value, res); |
1365 | + StoreHWE(addr, value, res); |
1366 | if (res) { |
1367 | set_fs(seg); |
1368 | goto fault; |
1369 | @@ -926,7 +1006,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, |
1370 | } |
1371 | compute_return_epc(regs); |
1372 | value = regs->regs[insn.spec3_format.rt]; |
1373 | - StoreW(addr, value, res); |
1374 | + StoreWE(addr, value, res); |
1375 | if (res) { |
1376 | set_fs(seg); |
1377 | goto fault; |
1378 | @@ -943,7 +1023,15 @@ static void emulate_load_store_insn(struct pt_regs *regs, |
1379 | if (!access_ok(VERIFY_READ, addr, 2)) |
1380 | goto sigbus; |
1381 | |
1382 | - LoadHW(addr, value, res); |
1383 | + if (config_enabled(CONFIG_EVA)) { |
1384 | + if (segment_eq(get_fs(), get_ds())) |
1385 | + LoadHW(addr, value, res); |
1386 | + else |
1387 | + LoadHWE(addr, value, res); |
1388 | + } else { |
1389 | + LoadHW(addr, value, res); |
1390 | + } |
1391 | + |
1392 | if (res) |
1393 | goto fault; |
1394 | compute_return_epc(regs); |
1395 | @@ -954,7 +1042,15 @@ static void emulate_load_store_insn(struct pt_regs *regs, |
1396 | if (!access_ok(VERIFY_READ, addr, 4)) |
1397 | goto sigbus; |
1398 | |
1399 | - LoadW(addr, value, res); |
1400 | + if (config_enabled(CONFIG_EVA)) { |
1401 | + if (segment_eq(get_fs(), get_ds())) |
1402 | + LoadW(addr, value, res); |
1403 | + else |
1404 | + LoadWE(addr, value, res); |
1405 | + } else { |
1406 | + LoadW(addr, value, res); |
1407 | + } |
1408 | + |
1409 | if (res) |
1410 | goto fault; |
1411 | compute_return_epc(regs); |
1412 | @@ -965,7 +1061,15 @@ static void emulate_load_store_insn(struct pt_regs *regs, |
1413 | if (!access_ok(VERIFY_READ, addr, 2)) |
1414 | goto sigbus; |
1415 | |
1416 | - LoadHWU(addr, value, res); |
1417 | + if (config_enabled(CONFIG_EVA)) { |
1418 | + if (segment_eq(get_fs(), get_ds())) |
1419 | + LoadHWU(addr, value, res); |
1420 | + else |
1421 | + LoadHWUE(addr, value, res); |
1422 | + } else { |
1423 | + LoadHWU(addr, value, res); |
1424 | + } |
1425 | + |
1426 | if (res) |
1427 | goto fault; |
1428 | compute_return_epc(regs); |
1429 | @@ -1024,7 +1128,16 @@ static void emulate_load_store_insn(struct pt_regs *regs, |
1430 | |
1431 | compute_return_epc(regs); |
1432 | value = regs->regs[insn.i_format.rt]; |
1433 | - StoreHW(addr, value, res); |
1434 | + |
1435 | + if (config_enabled(CONFIG_EVA)) { |
1436 | + if (segment_eq(get_fs(), get_ds())) |
1437 | + StoreHW(addr, value, res); |
1438 | + else |
1439 | + StoreHWE(addr, value, res); |
1440 | + } else { |
1441 | + StoreHW(addr, value, res); |
1442 | + } |
1443 | + |
1444 | if (res) |
1445 | goto fault; |
1446 | break; |
1447 | @@ -1035,7 +1148,16 @@ static void emulate_load_store_insn(struct pt_regs *regs, |
1448 | |
1449 | compute_return_epc(regs); |
1450 | value = regs->regs[insn.i_format.rt]; |
1451 | - StoreW(addr, value, res); |
1452 | + |
1453 | + if (config_enabled(CONFIG_EVA)) { |
1454 | + if (segment_eq(get_fs(), get_ds())) |
1455 | + StoreW(addr, value, res); |
1456 | + else |
1457 | + StoreWE(addr, value, res); |
1458 | + } else { |
1459 | + StoreW(addr, value, res); |
1460 | + } |
1461 | + |
1462 | if (res) |
1463 | goto fault; |
1464 | break; |
1465 | diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c |
1466 | index fb3e8dfd1ff6..838d3a6a5b7d 100644 |
1467 | --- a/arch/mips/kvm/emulate.c |
1468 | +++ b/arch/mips/kvm/emulate.c |
1469 | @@ -2176,6 +2176,7 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause, |
1470 | case T_SYSCALL: |
1471 | case T_BREAK: |
1472 | case T_RES_INST: |
1473 | + case T_MSADIS: |
1474 | break; |
1475 | |
1476 | case T_COP_UNUSABLE: |
1477 | diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c |
1478 | index c9eccf5df912..f5e7ddab02f7 100644 |
1479 | --- a/arch/mips/kvm/mips.c |
1480 | +++ b/arch/mips/kvm/mips.c |
1481 | @@ -1119,6 +1119,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) |
1482 | ret = kvm_mips_callbacks->handle_break(vcpu); |
1483 | break; |
1484 | |
1485 | + case T_MSADIS: |
1486 | + ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); |
1487 | + break; |
1488 | + |
1489 | default: |
1490 | kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", |
1491 | exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, |
1492 | diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c |
1493 | index fd7257b70e65..4372cc86650c 100644 |
1494 | --- a/arch/mips/kvm/trap_emul.c |
1495 | +++ b/arch/mips/kvm/trap_emul.c |
1496 | @@ -330,6 +330,33 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) |
1497 | return ret; |
1498 | } |
1499 | |
1500 | +static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) |
1501 | +{ |
1502 | + struct kvm_run *run = vcpu->run; |
1503 | + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; |
1504 | + unsigned long cause = vcpu->arch.host_cp0_cause; |
1505 | + enum emulation_result er = EMULATE_DONE; |
1506 | + int ret = RESUME_GUEST; |
1507 | + |
1508 | + /* No MSA supported in guest, guest reserved instruction exception */ |
1509 | + er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); |
1510 | + |
1511 | + switch (er) { |
1512 | + case EMULATE_DONE: |
1513 | + ret = RESUME_GUEST; |
1514 | + break; |
1515 | + |
1516 | + case EMULATE_FAIL: |
1517 | + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
1518 | + ret = RESUME_HOST; |
1519 | + break; |
1520 | + |
1521 | + default: |
1522 | + BUG(); |
1523 | + } |
1524 | + return ret; |
1525 | +} |
1526 | + |
1527 | static int kvm_trap_emul_vm_init(struct kvm *kvm) |
1528 | { |
1529 | return 0; |
1530 | @@ -470,6 +497,7 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { |
1531 | .handle_syscall = kvm_trap_emul_handle_syscall, |
1532 | .handle_res_inst = kvm_trap_emul_handle_res_inst, |
1533 | .handle_break = kvm_trap_emul_handle_break, |
1534 | + .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, |
1535 | |
1536 | .vm_init = kvm_trap_emul_vm_init, |
1537 | .vcpu_init = kvm_trap_emul_vcpu_init, |
1538 | diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c |
1539 | index 21221edda7a9..0f75b6b3d218 100644 |
1540 | --- a/arch/mips/loongson/loongson-3/irq.c |
1541 | +++ b/arch/mips/loongson/loongson-3/irq.c |
1542 | @@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pending) |
1543 | |
1544 | static struct irqaction cascade_irqaction = { |
1545 | .handler = no_action, |
1546 | + .flags = IRQF_NO_SUSPEND, |
1547 | .name = "cascade", |
1548 | }; |
1549 | |
1550 | diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c |
1551 | index 8fddd2cdbff7..efe366d618b1 100644 |
1552 | --- a/arch/mips/mti-malta/malta-memory.c |
1553 | +++ b/arch/mips/mti-malta/malta-memory.c |
1554 | @@ -53,6 +53,12 @@ fw_memblock_t * __init fw_getmdesc(int eva) |
1555 | pr_warn("memsize not set in YAMON, set to default (32Mb)\n"); |
1556 | physical_memsize = 0x02000000; |
1557 | } else { |
1558 | + if (memsize > (256 << 20)) { /* memsize should be capped to 256M */ |
1559 | + pr_warn("Unsupported memsize value (0x%lx) detected! " |
1560 | + "Using 0x10000000 (256M) instead\n", |
1561 | + memsize); |
1562 | + memsize = 256 << 20; |
1563 | + } |
1564 | /* If ememsize is set, then set physical_memsize to that */ |
1565 | physical_memsize = ememsize ? : memsize; |
1566 | } |
1567 | diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S |
1568 | index 32a7c828f073..e7567c8a9e79 100644 |
1569 | --- a/arch/mips/power/hibernate.S |
1570 | +++ b/arch/mips/power/hibernate.S |
1571 | @@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend) |
1572 | END(swsusp_arch_suspend) |
1573 | |
1574 | LEAF(swsusp_arch_resume) |
1575 | + /* Avoid TLB mismatch during and after kernel resume */ |
1576 | + jal local_flush_tlb_all |
1577 | PTR_L t0, restore_pblist |
1578 | 0: |
1579 | PTR_L t1, PBE_ADDRESS(t0) /* source */ |
1580 | @@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume) |
1581 | bne t1, t3, 1b |
1582 | PTR_L t0, PBE_NEXT(t0) |
1583 | bnez t0, 0b |
1584 | - jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */ |
1585 | PTR_LA t0, saved_regs |
1586 | PTR_L ra, PT_R31(t0) |
1587 | PTR_L sp, PT_R29(t0) |
1588 | diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c |
1589 | index ae77b7e59889..c641983bbdd6 100644 |
1590 | --- a/arch/powerpc/kernel/cacheinfo.c |
1591 | +++ b/arch/powerpc/kernel/cacheinfo.c |
1592 | @@ -61,12 +61,22 @@ struct cache_type_info { |
1593 | }; |
1594 | |
1595 | /* These are used to index the cache_type_info array. */ |
1596 | -#define CACHE_TYPE_UNIFIED 0 |
1597 | -#define CACHE_TYPE_INSTRUCTION 1 |
1598 | -#define CACHE_TYPE_DATA 2 |
1599 | +#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */ |
1600 | +#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */ |
1601 | +#define CACHE_TYPE_INSTRUCTION 2 |
1602 | +#define CACHE_TYPE_DATA 3 |
1603 | |
1604 | static const struct cache_type_info cache_type_info[] = { |
1605 | { |
1606 | + /* Embedded systems that use cache-size, cache-block-size, |
1607 | + * etc. for the Unified (typically L2) cache. */ |
1608 | + .name = "Unified", |
1609 | + .size_prop = "cache-size", |
1610 | + .line_size_props = { "cache-line-size", |
1611 | + "cache-block-size", }, |
1612 | + .nr_sets_prop = "cache-sets", |
1613 | + }, |
1614 | + { |
1615 | /* PowerPC Processor binding says the [di]-cache-* |
1616 | * must be equal on unified caches, so just use |
1617 | * d-cache properties. */ |
1618 | @@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache) |
1619 | { |
1620 | struct cache *iter; |
1621 | |
1622 | - if (cache->type == CACHE_TYPE_UNIFIED) |
1623 | + if (cache->type == CACHE_TYPE_UNIFIED || |
1624 | + cache->type == CACHE_TYPE_UNIFIED_D) |
1625 | return cache; |
1626 | |
1627 | list_for_each_entry(iter, &cache_list, list) |
1628 | @@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np) |
1629 | return of_get_property(np, "cache-unified", NULL); |
1630 | } |
1631 | |
1632 | -static struct cache *cache_do_one_devnode_unified(struct device_node *node, |
1633 | - int level) |
1634 | +/* |
1635 | + * Unified caches can have two different sets of tags. Most embedded |
1636 | + * use cache-size, etc. for the unified cache size, but open firmware systems |
1637 | + * use d-cache-size, etc. Check on initialization for which type we have, and |
1638 | + * return the appropriate structure type. Assume it's embedded if it isn't |
1639 | + * open firmware. If it's yet a 3rd type, then there will be missing entries |
1640 | + * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need |
1641 | + * to be extended further. |
1642 | + */ |
1643 | +static int cache_is_unified_d(const struct device_node *np) |
1644 | { |
1645 | - struct cache *cache; |
1646 | + return of_get_property(np, |
1647 | + cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ? |
1648 | + CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED; |
1649 | +} |
1650 | |
1651 | +/* |
1652 | + */ |
1653 | +static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level) |
1654 | +{ |
1655 | pr_debug("creating L%d ucache for %s\n", level, node->full_name); |
1656 | |
1657 | - cache = new_cache(CACHE_TYPE_UNIFIED, level, node); |
1658 | - |
1659 | - return cache; |
1660 | + return new_cache(cache_is_unified_d(node), level, node); |
1661 | } |
1662 | |
1663 | static struct cache *cache_do_one_devnode_split(struct device_node *node, |
1664 | diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c |
1665 | index 7e408bfc7948..cecbe00cee24 100644 |
1666 | --- a/arch/powerpc/mm/hugetlbpage.c |
1667 | +++ b/arch/powerpc/mm/hugetlbpage.c |
1668 | @@ -581,6 +581,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, |
1669 | pmd = pmd_offset(pud, start); |
1670 | pud_clear(pud); |
1671 | pmd_free_tlb(tlb, pmd, start); |
1672 | + mm_dec_nr_pmds(tlb->mm); |
1673 | } |
1674 | |
1675 | static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, |
1676 | diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c |
1677 | index 2396dda282cd..ead55351b254 100644 |
1678 | --- a/arch/powerpc/perf/callchain.c |
1679 | +++ b/arch/powerpc/perf/callchain.c |
1680 | @@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, |
1681 | sp = regs->gpr[1]; |
1682 | perf_callchain_store(entry, next_ip); |
1683 | |
1684 | - for (;;) { |
1685 | + while (entry->nr < PERF_MAX_STACK_DEPTH) { |
1686 | fp = (unsigned long __user *) sp; |
1687 | if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) |
1688 | return; |
1689 | diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c |
1690 | index 4c11421847be..3af8324c122e 100644 |
1691 | --- a/arch/powerpc/platforms/cell/interrupt.c |
1692 | +++ b/arch/powerpc/platforms/cell/interrupt.c |
1693 | @@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void) |
1694 | |
1695 | void iic_setup_cpu(void) |
1696 | { |
1697 | - out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff); |
1698 | + out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff); |
1699 | } |
1700 | |
1701 | u8 iic_get_target_id(int cpu) |
1702 | diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c |
1703 | index c7c8720aa39f..63db1b03e756 100644 |
1704 | --- a/arch/powerpc/platforms/cell/iommu.c |
1705 | +++ b/arch/powerpc/platforms/cell/iommu.c |
1706 | @@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages, |
1707 | |
1708 | io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); |
1709 | |
1710 | - for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift) |
1711 | + for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) |
1712 | io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); |
1713 | |
1714 | mb(); |
1715 | diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c |
1716 | index 6c9ff2b95119..1d9369e1e0f4 100644 |
1717 | --- a/arch/powerpc/platforms/powernv/pci-ioda.c |
1718 | +++ b/arch/powerpc/platforms/powernv/pci-ioda.c |
1719 | @@ -1777,7 +1777,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose, |
1720 | region.start += phb->ioda.io_segsize; |
1721 | index++; |
1722 | } |
1723 | - } else if (res->flags & IORESOURCE_MEM) { |
1724 | + } else if ((res->flags & IORESOURCE_MEM) && |
1725 | + !pnv_pci_is_mem_pref_64(res->flags)) { |
1726 | region.start = res->start - |
1727 | hose->mem_offset[0] - |
1728 | phb->ioda.m32_pci_base; |
1729 | diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c |
1730 | index 1c4c5accd220..d3236c9e226b 100644 |
1731 | --- a/arch/s390/kernel/suspend.c |
1732 | +++ b/arch/s390/kernel/suspend.c |
1733 | @@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn) |
1734 | { |
1735 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); |
1736 | unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); |
1737 | + unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; |
1738 | + unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); |
1739 | |
1740 | /* Always save lowcore pages (LC protection might be enabled). */ |
1741 | if (pfn <= LC_PAGES) |
1742 | @@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn) |
1743 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) |
1744 | return 1; |
1745 | /* Skip memory holes and read-only pages (NSS, DCSS, ...). */ |
1746 | + if (pfn >= stext_pfn && pfn <= eshared_pfn) |
1747 | + return ipl_info.type == IPL_TYPE_NSS ? 1 : 0; |
1748 | if (tprot(PFN_PHYS(pfn))) |
1749 | return 1; |
1750 | return 0; |
1751 | diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c |
1752 | index 073b5f387d1d..e7bc2fdb6f67 100644 |
1753 | --- a/arch/s390/kvm/interrupt.c |
1754 | +++ b/arch/s390/kvm/interrupt.c |
1755 | @@ -17,6 +17,7 @@ |
1756 | #include <linux/signal.h> |
1757 | #include <linux/slab.h> |
1758 | #include <linux/bitmap.h> |
1759 | +#include <linux/vmalloc.h> |
1760 | #include <asm/asm-offsets.h> |
1761 | #include <asm/uaccess.h> |
1762 | #include <asm/sclp.h> |
1763 | @@ -1332,10 +1333,10 @@ int kvm_s390_inject_vm(struct kvm *kvm, |
1764 | return rc; |
1765 | } |
1766 | |
1767 | -void kvm_s390_reinject_io_int(struct kvm *kvm, |
1768 | +int kvm_s390_reinject_io_int(struct kvm *kvm, |
1769 | struct kvm_s390_interrupt_info *inti) |
1770 | { |
1771 | - __inject_vm(kvm, inti); |
1772 | + return __inject_vm(kvm, inti); |
1773 | } |
1774 | |
1775 | int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, |
1776 | @@ -1455,61 +1456,66 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm) |
1777 | spin_unlock(&fi->lock); |
1778 | } |
1779 | |
1780 | -static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, |
1781 | - u8 *addr) |
1782 | +static void inti_to_irq(struct kvm_s390_interrupt_info *inti, |
1783 | + struct kvm_s390_irq *irq) |
1784 | { |
1785 | - struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; |
1786 | - struct kvm_s390_irq irq = {0}; |
1787 | - |
1788 | - irq.type = inti->type; |
1789 | + irq->type = inti->type; |
1790 | switch (inti->type) { |
1791 | case KVM_S390_INT_PFAULT_INIT: |
1792 | case KVM_S390_INT_PFAULT_DONE: |
1793 | case KVM_S390_INT_VIRTIO: |
1794 | case KVM_S390_INT_SERVICE: |
1795 | - irq.u.ext = inti->ext; |
1796 | + irq->u.ext = inti->ext; |
1797 | break; |
1798 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
1799 | - irq.u.io = inti->io; |
1800 | + irq->u.io = inti->io; |
1801 | break; |
1802 | case KVM_S390_MCHK: |
1803 | - irq.u.mchk = inti->mchk; |
1804 | + irq->u.mchk = inti->mchk; |
1805 | break; |
1806 | - default: |
1807 | - return -EINVAL; |
1808 | } |
1809 | - |
1810 | - if (copy_to_user(uptr, &irq, sizeof(irq))) |
1811 | - return -EFAULT; |
1812 | - |
1813 | - return 0; |
1814 | } |
1815 | |
1816 | -static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) |
1817 | +static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) |
1818 | { |
1819 | struct kvm_s390_interrupt_info *inti; |
1820 | struct kvm_s390_float_interrupt *fi; |
1821 | + struct kvm_s390_irq *buf; |
1822 | + int max_irqs; |
1823 | int ret = 0; |
1824 | int n = 0; |
1825 | |
1826 | + if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) |
1827 | + return -EINVAL; |
1828 | + |
1829 | + /* |
1830 | + * We are already using -ENOMEM to signal |
1831 | + * userspace it may retry with a bigger buffer, |
1832 | + * so we need to use something else for this case |
1833 | + */ |
1834 | + buf = vzalloc(len); |
1835 | + if (!buf) |
1836 | + return -ENOBUFS; |
1837 | + |
1838 | + max_irqs = len / sizeof(struct kvm_s390_irq); |
1839 | + |
1840 | fi = &kvm->arch.float_int; |
1841 | spin_lock(&fi->lock); |
1842 | - |
1843 | list_for_each_entry(inti, &fi->list, list) { |
1844 | - if (len < sizeof(struct kvm_s390_irq)) { |
1845 | + if (n == max_irqs) { |
1846 | /* signal userspace to try again */ |
1847 | ret = -ENOMEM; |
1848 | break; |
1849 | } |
1850 | - ret = copy_irq_to_user(inti, buf); |
1851 | - if (ret) |
1852 | - break; |
1853 | - buf += sizeof(struct kvm_s390_irq); |
1854 | - len -= sizeof(struct kvm_s390_irq); |
1855 | + inti_to_irq(inti, &buf[n]); |
1856 | n++; |
1857 | } |
1858 | - |
1859 | spin_unlock(&fi->lock); |
1860 | + if (!ret && n > 0) { |
1861 | + if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) |
1862 | + ret = -EFAULT; |
1863 | + } |
1864 | + vfree(buf); |
1865 | |
1866 | return ret < 0 ? ret : n; |
1867 | } |
1868 | @@ -1520,7 +1526,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) |
1869 | |
1870 | switch (attr->group) { |
1871 | case KVM_DEV_FLIC_GET_ALL_IRQS: |
1872 | - r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr, |
1873 | + r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr, |
1874 | attr->attr); |
1875 | break; |
1876 | default: |
1877 | diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h |
1878 | index c34109aa552d..6995a3080a0e 100644 |
1879 | --- a/arch/s390/kvm/kvm-s390.h |
1880 | +++ b/arch/s390/kvm/kvm-s390.h |
1881 | @@ -151,8 +151,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, |
1882 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); |
1883 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
1884 | u64 cr6, u64 schid); |
1885 | -void kvm_s390_reinject_io_int(struct kvm *kvm, |
1886 | - struct kvm_s390_interrupt_info *inti); |
1887 | +int kvm_s390_reinject_io_int(struct kvm *kvm, |
1888 | + struct kvm_s390_interrupt_info *inti); |
1889 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); |
1890 | |
1891 | /* implemented in intercept.c */ |
1892 | diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c |
1893 | index 351116939ea2..b982fbca34df 100644 |
1894 | --- a/arch/s390/kvm/priv.c |
1895 | +++ b/arch/s390/kvm/priv.c |
1896 | @@ -229,18 +229,19 @@ static int handle_tpi(struct kvm_vcpu *vcpu) |
1897 | struct kvm_s390_interrupt_info *inti; |
1898 | unsigned long len; |
1899 | u32 tpi_data[3]; |
1900 | - int cc, rc; |
1901 | + int rc; |
1902 | u64 addr; |
1903 | |
1904 | - rc = 0; |
1905 | addr = kvm_s390_get_base_disp_s(vcpu); |
1906 | if (addr & 3) |
1907 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
1908 | - cc = 0; |
1909 | + |
1910 | inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); |
1911 | - if (!inti) |
1912 | - goto no_interrupt; |
1913 | - cc = 1; |
1914 | + if (!inti) { |
1915 | + kvm_s390_set_psw_cc(vcpu, 0); |
1916 | + return 0; |
1917 | + } |
1918 | + |
1919 | tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; |
1920 | tpi_data[1] = inti->io.io_int_parm; |
1921 | tpi_data[2] = inti->io.io_int_word; |
1922 | @@ -251,30 +252,38 @@ static int handle_tpi(struct kvm_vcpu *vcpu) |
1923 | */ |
1924 | len = sizeof(tpi_data) - 4; |
1925 | rc = write_guest(vcpu, addr, &tpi_data, len); |
1926 | - if (rc) |
1927 | - return kvm_s390_inject_prog_cond(vcpu, rc); |
1928 | + if (rc) { |
1929 | + rc = kvm_s390_inject_prog_cond(vcpu, rc); |
1930 | + goto reinject_interrupt; |
1931 | + } |
1932 | } else { |
1933 | /* |
1934 | * Store the three-word I/O interruption code into |
1935 | * the appropriate lowcore area. |
1936 | */ |
1937 | len = sizeof(tpi_data); |
1938 | - if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) |
1939 | + if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { |
1940 | + /* failed writes to the low core are not recoverable */ |
1941 | rc = -EFAULT; |
1942 | + goto reinject_interrupt; |
1943 | + } |
1944 | } |
1945 | + |
1946 | + /* irq was successfully handed to the guest */ |
1947 | + kfree(inti); |
1948 | + kvm_s390_set_psw_cc(vcpu, 1); |
1949 | + return 0; |
1950 | +reinject_interrupt: |
1951 | /* |
1952 | * If we encounter a problem storing the interruption code, the |
1953 | * instruction is suppressed from the guest's view: reinject the |
1954 | * interrupt. |
1955 | */ |
1956 | - if (!rc) |
1957 | + if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { |
1958 | kfree(inti); |
1959 | - else |
1960 | - kvm_s390_reinject_io_int(vcpu->kvm, inti); |
1961 | -no_interrupt: |
1962 | - /* Set condition code and we're done. */ |
1963 | - if (!rc) |
1964 | - kvm_s390_set_psw_cc(vcpu, cc); |
1965 | + rc = -EFAULT; |
1966 | + } |
1967 | + /* don't set the cc, a pgm irq was injected or we drop to user space */ |
1968 | return rc ? -EFAULT : 0; |
1969 | } |
1970 | |
1971 | @@ -467,6 +476,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) |
1972 | for (n = mem->count - 1; n > 0 ; n--) |
1973 | memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); |
1974 | |
1975 | + memset(&mem->vm[0], 0, sizeof(mem->vm[0])); |
1976 | mem->vm[0].cpus_total = cpus; |
1977 | mem->vm[0].cpus_configured = cpus; |
1978 | mem->vm[0].cpus_standby = 0; |
1979 | diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h |
1980 | index 47f29b1d1846..e7814b74caf8 100644 |
1981 | --- a/arch/x86/include/asm/insn.h |
1982 | +++ b/arch/x86/include/asm/insn.h |
1983 | @@ -69,7 +69,7 @@ struct insn { |
1984 | const insn_byte_t *next_byte; |
1985 | }; |
1986 | |
1987 | -#define MAX_INSN_SIZE 16 |
1988 | +#define MAX_INSN_SIZE 15 |
1989 | |
1990 | #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6) |
1991 | #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3) |
1992 | diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h |
1993 | index a1410db38a1a..653dfa7662e1 100644 |
1994 | --- a/arch/x86/include/asm/mwait.h |
1995 | +++ b/arch/x86/include/asm/mwait.h |
1996 | @@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) |
1997 | :: "a" (eax), "c" (ecx)); |
1998 | } |
1999 | |
2000 | +static inline void __sti_mwait(unsigned long eax, unsigned long ecx) |
2001 | +{ |
2002 | + trace_hardirqs_on(); |
2003 | + /* "mwait %eax, %ecx;" */ |
2004 | + asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" |
2005 | + :: "a" (eax), "c" (ecx)); |
2006 | +} |
2007 | + |
2008 | /* |
2009 | * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, |
2010 | * which can obviate IPI to trigger checking of need_resched. |
2011 | diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h |
2012 | index d6b078e9fa28..25b1cc07d496 100644 |
2013 | --- a/arch/x86/include/asm/pvclock.h |
2014 | +++ b/arch/x86/include/asm/pvclock.h |
2015 | @@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, |
2016 | |
2017 | struct pvclock_vsyscall_time_info { |
2018 | struct pvclock_vcpu_time_info pvti; |
2019 | + u32 migrate_count; |
2020 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); |
2021 | |
2022 | #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) |
2023 | diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c |
2024 | index 073983398364..666bcf14ce10 100644 |
2025 | --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c |
2026 | +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c |
2027 | @@ -557,6 +557,8 @@ struct event_constraint intel_core2_pebs_event_constraints[] = { |
2028 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ |
2029 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ |
2030 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ |
2031 | + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
2032 | + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), |
2033 | EVENT_CONSTRAINT_END |
2034 | }; |
2035 | |
2036 | @@ -564,6 +566,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { |
2037 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ |
2038 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ |
2039 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ |
2040 | + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
2041 | + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), |
2042 | EVENT_CONSTRAINT_END |
2043 | }; |
2044 | |
2045 | @@ -587,6 +591,8 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = { |
2046 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ |
2047 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ |
2048 | INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ |
2049 | + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
2050 | + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), |
2051 | EVENT_CONSTRAINT_END |
2052 | }; |
2053 | |
2054 | @@ -602,6 +608,8 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = { |
2055 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ |
2056 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ |
2057 | INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ |
2058 | + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
2059 | + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), |
2060 | EVENT_CONSTRAINT_END |
2061 | }; |
2062 | |
2063 | diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c |
2064 | index 046e2d620bbe..a388bb883128 100644 |
2065 | --- a/arch/x86/kernel/process.c |
2066 | +++ b/arch/x86/kernel/process.c |
2067 | @@ -24,6 +24,7 @@ |
2068 | #include <asm/syscalls.h> |
2069 | #include <asm/idle.h> |
2070 | #include <asm/uaccess.h> |
2071 | +#include <asm/mwait.h> |
2072 | #include <asm/i387.h> |
2073 | #include <asm/fpu-internal.h> |
2074 | #include <asm/debugreg.h> |
2075 | @@ -399,6 +400,53 @@ static void amd_e400_idle(void) |
2076 | default_idle(); |
2077 | } |
2078 | |
2079 | +/* |
2080 | + * Intel Core2 and older machines prefer MWAIT over HALT for C1. |
2081 | + * We can't rely on cpuidle installing MWAIT, because it will not load |
2082 | + * on systems that support only C1 -- so the boot default must be MWAIT. |
2083 | + * |
2084 | + * Some AMD machines are the opposite, they depend on using HALT. |
2085 | + * |
2086 | + * So for default C1, which is used during boot until cpuidle loads, |
2087 | + * use MWAIT-C1 on Intel HW that has it, else use HALT. |
2088 | + */ |
2089 | +static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) |
2090 | +{ |
2091 | + if (c->x86_vendor != X86_VENDOR_INTEL) |
2092 | + return 0; |
2093 | + |
2094 | + if (!cpu_has(c, X86_FEATURE_MWAIT)) |
2095 | + return 0; |
2096 | + |
2097 | + return 1; |
2098 | +} |
2099 | + |
2100 | +/* |
2101 | + * MONITOR/MWAIT with no hints, used for default default C1 state. |
2102 | + * This invokes MWAIT with interrutps enabled and no flags, |
2103 | + * which is backwards compatible with the original MWAIT implementation. |
2104 | + */ |
2105 | + |
2106 | +static void mwait_idle(void) |
2107 | +{ |
2108 | + if (!current_set_polling_and_test()) { |
2109 | + if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { |
2110 | + smp_mb(); /* quirk */ |
2111 | + clflush((void *)¤t_thread_info()->flags); |
2112 | + smp_mb(); /* quirk */ |
2113 | + } |
2114 | + |
2115 | + __monitor((void *)¤t_thread_info()->flags, 0, 0); |
2116 | + if (!need_resched()) |
2117 | + __sti_mwait(0, 0); |
2118 | + else |
2119 | + local_irq_enable(); |
2120 | + } else { |
2121 | + local_irq_enable(); |
2122 | + } |
2123 | + __current_clr_polling(); |
2124 | +} |
2125 | + |
2126 | void select_idle_routine(const struct cpuinfo_x86 *c) |
2127 | { |
2128 | #ifdef CONFIG_SMP |
2129 | @@ -412,6 +460,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c) |
2130 | /* E400: APIC timer interrupt does not wake up CPU from C1e */ |
2131 | pr_info("using AMD E400 aware idle routine\n"); |
2132 | x86_idle = amd_e400_idle; |
2133 | + } else if (prefer_mwait_c1_over_halt(c)) { |
2134 | + pr_info("using mwait in idle threads\n"); |
2135 | + x86_idle = mwait_idle; |
2136 | } else |
2137 | x86_idle = default_idle; |
2138 | } |
2139 | diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c |
2140 | index 2f355d229a58..e5ecd20e72dd 100644 |
2141 | --- a/arch/x86/kernel/pvclock.c |
2142 | +++ b/arch/x86/kernel/pvclock.c |
2143 | @@ -141,7 +141,46 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock, |
2144 | set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); |
2145 | } |
2146 | |
2147 | +static struct pvclock_vsyscall_time_info *pvclock_vdso_info; |
2148 | + |
2149 | +static struct pvclock_vsyscall_time_info * |
2150 | +pvclock_get_vsyscall_user_time_info(int cpu) |
2151 | +{ |
2152 | + if (!pvclock_vdso_info) { |
2153 | + BUG(); |
2154 | + return NULL; |
2155 | + } |
2156 | + |
2157 | + return &pvclock_vdso_info[cpu]; |
2158 | +} |
2159 | + |
2160 | +struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu) |
2161 | +{ |
2162 | + return &pvclock_get_vsyscall_user_time_info(cpu)->pvti; |
2163 | +} |
2164 | + |
2165 | #ifdef CONFIG_X86_64 |
2166 | +static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l, |
2167 | + void *v) |
2168 | +{ |
2169 | + struct task_migration_notifier *mn = v; |
2170 | + struct pvclock_vsyscall_time_info *pvti; |
2171 | + |
2172 | + pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu); |
2173 | + |
2174 | + /* this is NULL when pvclock vsyscall is not initialized */ |
2175 | + if (unlikely(pvti == NULL)) |
2176 | + return NOTIFY_DONE; |
2177 | + |
2178 | + pvti->migrate_count++; |
2179 | + |
2180 | + return NOTIFY_DONE; |
2181 | +} |
2182 | + |
2183 | +static struct notifier_block pvclock_migrate = { |
2184 | + .notifier_call = pvclock_task_migrate, |
2185 | +}; |
2186 | + |
2187 | /* |
2188 | * Initialize the generic pvclock vsyscall state. This will allocate |
2189 | * a/some page(s) for the per-vcpu pvclock information, set up a |
2190 | @@ -155,12 +194,17 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i, |
2191 | |
2192 | WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE); |
2193 | |
2194 | + pvclock_vdso_info = i; |
2195 | + |
2196 | for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { |
2197 | __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, |
2198 | __pa(i) + (idx*PAGE_SIZE), |
2199 | PAGE_KERNEL_VVAR); |
2200 | } |
2201 | |
2202 | + |
2203 | + register_task_migration_notifier(&pvclock_migrate); |
2204 | + |
2205 | return 0; |
2206 | } |
2207 | #endif |
2208 | diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
2209 | index ae4f6d35d19c..a60bd3aa0965 100644 |
2210 | --- a/arch/x86/kvm/vmx.c |
2211 | +++ b/arch/x86/kvm/vmx.c |
2212 | @@ -3621,8 +3621,16 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
2213 | |
2214 | static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
2215 | { |
2216 | - unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ? |
2217 | - KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); |
2218 | + /* |
2219 | + * Pass through host's Machine Check Enable value to hw_cr4, which |
2220 | + * is in force while we are in guest mode. Do not let guests control |
2221 | + * this bit, even if host CR4.MCE == 0. |
2222 | + */ |
2223 | + unsigned long hw_cr4 = |
2224 | + (cr4_read_shadow() & X86_CR4_MCE) | |
2225 | + (cr4 & ~X86_CR4_MCE) | |
2226 | + (to_vmx(vcpu)->rmode.vm86_active ? |
2227 | + KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); |
2228 | |
2229 | if (cr4 & X86_CR4_VMXE) { |
2230 | /* |
2231 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
2232 | index 32bf19ef3115..e222ba5d2beb 100644 |
2233 | --- a/arch/x86/kvm/x86.c |
2234 | +++ b/arch/x86/kvm/x86.c |
2235 | @@ -5775,7 +5775,6 @@ int kvm_arch_init(void *opaque) |
2236 | kvm_set_mmio_spte_mask(); |
2237 | |
2238 | kvm_x86_ops = ops; |
2239 | - kvm_init_msr_list(); |
2240 | |
2241 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, |
2242 | PT_DIRTY_MASK, PT64_NX_MASK, 0); |
2243 | @@ -7209,7 +7208,14 @@ void kvm_arch_hardware_disable(void) |
2244 | |
2245 | int kvm_arch_hardware_setup(void) |
2246 | { |
2247 | - return kvm_x86_ops->hardware_setup(); |
2248 | + int r; |
2249 | + |
2250 | + r = kvm_x86_ops->hardware_setup(); |
2251 | + if (r != 0) |
2252 | + return r; |
2253 | + |
2254 | + kvm_init_msr_list(); |
2255 | + return 0; |
2256 | } |
2257 | |
2258 | void kvm_arch_hardware_unsetup(void) |
2259 | diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c |
2260 | index 1313ae6b478b..85994f5d48e4 100644 |
2261 | --- a/arch/x86/lib/insn.c |
2262 | +++ b/arch/x86/lib/insn.c |
2263 | @@ -52,6 +52,13 @@ |
2264 | */ |
2265 | void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64) |
2266 | { |
2267 | + /* |
2268 | + * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid |
2269 | + * even if the input buffer is long enough to hold them. |
2270 | + */ |
2271 | + if (buf_len > MAX_INSN_SIZE) |
2272 | + buf_len = MAX_INSN_SIZE; |
2273 | + |
2274 | memset(insn, 0, sizeof(*insn)); |
2275 | insn->kaddr = kaddr; |
2276 | insn->end_kaddr = kaddr + buf_len; |
2277 | diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c |
2278 | index 1f33b3d1fd68..0a42327a59d7 100644 |
2279 | --- a/arch/x86/lib/usercopy_64.c |
2280 | +++ b/arch/x86/lib/usercopy_64.c |
2281 | @@ -82,7 +82,7 @@ copy_user_handle_tail(char *to, char *from, unsigned len) |
2282 | clac(); |
2283 | |
2284 | /* If the destination is a kernel buffer, we always clear the end */ |
2285 | - if ((unsigned long)to >= TASK_SIZE_MAX) |
2286 | + if (!__addr_ok(to)) |
2287 | memset(to, 0, len); |
2288 | return len; |
2289 | } |
2290 | diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c |
2291 | index 9793322751e0..40d2473836c9 100644 |
2292 | --- a/arch/x86/vdso/vclock_gettime.c |
2293 | +++ b/arch/x86/vdso/vclock_gettime.c |
2294 | @@ -82,18 +82,15 @@ static notrace cycle_t vread_pvclock(int *mode) |
2295 | cycle_t ret; |
2296 | u64 last; |
2297 | u32 version; |
2298 | + u32 migrate_count; |
2299 | u8 flags; |
2300 | unsigned cpu, cpu1; |
2301 | |
2302 | |
2303 | /* |
2304 | - * Note: hypervisor must guarantee that: |
2305 | - * 1. cpu ID number maps 1:1 to per-CPU pvclock time info. |
2306 | - * 2. that per-CPU pvclock time info is updated if the |
2307 | - * underlying CPU changes. |
2308 | - * 3. that version is increased whenever underlying CPU |
2309 | - * changes. |
2310 | - * |
2311 | + * When looping to get a consistent (time-info, tsc) pair, we |
2312 | + * also need to deal with the possibility we can switch vcpus, |
2313 | + * so make sure we always re-fetch time-info for the current vcpu. |
2314 | */ |
2315 | do { |
2316 | cpu = __getcpu() & VGETCPU_CPU_MASK; |
2317 | @@ -102,20 +99,27 @@ static notrace cycle_t vread_pvclock(int *mode) |
2318 | * __getcpu() calls (Gleb). |
2319 | */ |
2320 | |
2321 | - pvti = get_pvti(cpu); |
2322 | + /* Make sure migrate_count will change if we leave the VCPU. */ |
2323 | + do { |
2324 | + pvti = get_pvti(cpu); |
2325 | + migrate_count = pvti->migrate_count; |
2326 | + |
2327 | + cpu1 = cpu; |
2328 | + cpu = __getcpu() & VGETCPU_CPU_MASK; |
2329 | + } while (unlikely(cpu != cpu1)); |
2330 | |
2331 | version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); |
2332 | |
2333 | /* |
2334 | * Test we're still on the cpu as well as the version. |
2335 | - * We could have been migrated just after the first |
2336 | - * vgetcpu but before fetching the version, so we |
2337 | - * wouldn't notice a version change. |
2338 | + * - We must read TSC of pvti's VCPU. |
2339 | + * - KVM doesn't follow the versioning protocol, so data could |
2340 | + * change before version if we left the VCPU. |
2341 | */ |
2342 | - cpu1 = __getcpu() & VGETCPU_CPU_MASK; |
2343 | - } while (unlikely(cpu != cpu1 || |
2344 | - (pvti->pvti.version & 1) || |
2345 | - pvti->pvti.version != version)); |
2346 | + smp_rmb(); |
2347 | + } while (unlikely((pvti->pvti.version & 1) || |
2348 | + pvti->pvti.version != version || |
2349 | + pvti->migrate_count != migrate_count)); |
2350 | |
2351 | if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) |
2352 | *mode = VCLOCK_NONE; |
2353 | diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig |
2354 | index e31d4949124a..87be10e8b57a 100644 |
2355 | --- a/arch/xtensa/Kconfig |
2356 | +++ b/arch/xtensa/Kconfig |
2357 | @@ -428,6 +428,36 @@ config DEFAULT_MEM_SIZE |
2358 | |
2359 | If unsure, leave the default value here. |
2360 | |
2361 | +config XTFPGA_LCD |
2362 | + bool "Enable XTFPGA LCD driver" |
2363 | + depends on XTENSA_PLATFORM_XTFPGA |
2364 | + default n |
2365 | + help |
2366 | + There's a 2x16 LCD on most of XTFPGA boards, kernel may output |
2367 | + progress messages there during bootup/shutdown. It may be useful |
2368 | + during board bringup. |
2369 | + |
2370 | + If unsure, say N. |
2371 | + |
2372 | +config XTFPGA_LCD_BASE_ADDR |
2373 | + hex "XTFPGA LCD base address" |
2374 | + depends on XTFPGA_LCD |
2375 | + default "0x0d0c0000" |
2376 | + help |
2377 | + Base address of the LCD controller inside KIO region. |
2378 | + Different boards from XTFPGA family have LCD controller at different |
2379 | + addresses. Please consult prototyping user guide for your board for |
2380 | + the correct address. Wrong address here may lead to hardware lockup. |
2381 | + |
2382 | +config XTFPGA_LCD_8BIT_ACCESS |
2383 | + bool "Use 8-bit access to XTFPGA LCD" |
2384 | + depends on XTFPGA_LCD |
2385 | + default n |
2386 | + help |
2387 | + LCD may be connected with 4- or 8-bit interface, 8-bit access may |
2388 | + only be used with 8-bit interface. Please consult prototyping user |
2389 | + guide for your board for the correct interface width. |
2390 | + |
2391 | endmenu |
2392 | |
2393 | menu "Executable file formats" |
2394 | diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h |
2395 | index db5bb72e2f4e..62d84657c60b 100644 |
2396 | --- a/arch/xtensa/include/uapi/asm/unistd.h |
2397 | +++ b/arch/xtensa/include/uapi/asm/unistd.h |
2398 | @@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6) |
2399 | __SYSCALL(324, sys_name_to_handle_at, 5) |
2400 | #define __NR_open_by_handle_at 325 |
2401 | __SYSCALL(325, sys_open_by_handle_at, 3) |
2402 | -#define __NR_sync_file_range 326 |
2403 | +#define __NR_sync_file_range2 326 |
2404 | __SYSCALL(326, sys_sync_file_range2, 6) |
2405 | #define __NR_perf_event_open 327 |
2406 | __SYSCALL(327, sys_perf_event_open, 5) |
2407 | diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c |
2408 | index d05f8feeb8d7..17b1ef3232e4 100644 |
2409 | --- a/arch/xtensa/platforms/iss/network.c |
2410 | +++ b/arch/xtensa/platforms/iss/network.c |
2411 | @@ -349,8 +349,8 @@ static void iss_net_timer(unsigned long priv) |
2412 | { |
2413 | struct iss_net_private *lp = (struct iss_net_private *)priv; |
2414 | |
2415 | - spin_lock(&lp->lock); |
2416 | iss_net_poll(); |
2417 | + spin_lock(&lp->lock); |
2418 | mod_timer(&lp->timer, jiffies + lp->timer_val); |
2419 | spin_unlock(&lp->lock); |
2420 | } |
2421 | @@ -361,7 +361,7 @@ static int iss_net_open(struct net_device *dev) |
2422 | struct iss_net_private *lp = netdev_priv(dev); |
2423 | int err; |
2424 | |
2425 | - spin_lock(&lp->lock); |
2426 | + spin_lock_bh(&lp->lock); |
2427 | |
2428 | err = lp->tp.open(lp); |
2429 | if (err < 0) |
2430 | @@ -376,9 +376,11 @@ static int iss_net_open(struct net_device *dev) |
2431 | while ((err = iss_net_rx(dev)) > 0) |
2432 | ; |
2433 | |
2434 | - spin_lock(&opened_lock); |
2435 | + spin_unlock_bh(&lp->lock); |
2436 | + spin_lock_bh(&opened_lock); |
2437 | list_add(&lp->opened_list, &opened); |
2438 | - spin_unlock(&opened_lock); |
2439 | + spin_unlock_bh(&opened_lock); |
2440 | + spin_lock_bh(&lp->lock); |
2441 | |
2442 | init_timer(&lp->timer); |
2443 | lp->timer_val = ISS_NET_TIMER_VALUE; |
2444 | @@ -387,7 +389,7 @@ static int iss_net_open(struct net_device *dev) |
2445 | mod_timer(&lp->timer, jiffies + lp->timer_val); |
2446 | |
2447 | out: |
2448 | - spin_unlock(&lp->lock); |
2449 | + spin_unlock_bh(&lp->lock); |
2450 | return err; |
2451 | } |
2452 | |
2453 | @@ -395,7 +397,7 @@ static int iss_net_close(struct net_device *dev) |
2454 | { |
2455 | struct iss_net_private *lp = netdev_priv(dev); |
2456 | netif_stop_queue(dev); |
2457 | - spin_lock(&lp->lock); |
2458 | + spin_lock_bh(&lp->lock); |
2459 | |
2460 | spin_lock(&opened_lock); |
2461 | list_del(&opened); |
2462 | @@ -405,18 +407,17 @@ static int iss_net_close(struct net_device *dev) |
2463 | |
2464 | lp->tp.close(lp); |
2465 | |
2466 | - spin_unlock(&lp->lock); |
2467 | + spin_unlock_bh(&lp->lock); |
2468 | return 0; |
2469 | } |
2470 | |
2471 | static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2472 | { |
2473 | struct iss_net_private *lp = netdev_priv(dev); |
2474 | - unsigned long flags; |
2475 | int len; |
2476 | |
2477 | netif_stop_queue(dev); |
2478 | - spin_lock_irqsave(&lp->lock, flags); |
2479 | + spin_lock_bh(&lp->lock); |
2480 | |
2481 | len = lp->tp.write(lp, &skb); |
2482 | |
2483 | @@ -438,7 +439,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2484 | pr_err("%s: %s failed(%d)\n", dev->name, __func__, len); |
2485 | } |
2486 | |
2487 | - spin_unlock_irqrestore(&lp->lock, flags); |
2488 | + spin_unlock_bh(&lp->lock); |
2489 | |
2490 | dev_kfree_skb(skb); |
2491 | return NETDEV_TX_OK; |
2492 | @@ -466,9 +467,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr) |
2493 | |
2494 | if (!is_valid_ether_addr(hwaddr->sa_data)) |
2495 | return -EADDRNOTAVAIL; |
2496 | - spin_lock(&lp->lock); |
2497 | + spin_lock_bh(&lp->lock); |
2498 | memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN); |
2499 | - spin_unlock(&lp->lock); |
2500 | + spin_unlock_bh(&lp->lock); |
2501 | return 0; |
2502 | } |
2503 | |
2504 | @@ -520,11 +521,11 @@ static int iss_net_configure(int index, char *init) |
2505 | *lp = (struct iss_net_private) { |
2506 | .device_list = LIST_HEAD_INIT(lp->device_list), |
2507 | .opened_list = LIST_HEAD_INIT(lp->opened_list), |
2508 | - .lock = __SPIN_LOCK_UNLOCKED(lp.lock), |
2509 | .dev = dev, |
2510 | .index = index, |
2511 | - }; |
2512 | + }; |
2513 | |
2514 | + spin_lock_init(&lp->lock); |
2515 | /* |
2516 | * If this name ends up conflicting with an existing registered |
2517 | * netdevice, that is OK, register_netdev{,ice}() will notice this |
2518 | diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile |
2519 | index b9ae206340cd..7839d38b2337 100644 |
2520 | --- a/arch/xtensa/platforms/xtfpga/Makefile |
2521 | +++ b/arch/xtensa/platforms/xtfpga/Makefile |
2522 | @@ -6,4 +6,5 @@ |
2523 | # |
2524 | # Note 2! The CFLAGS definitions are in the main makefile... |
2525 | |
2526 | -obj-y = setup.o lcd.o |
2527 | +obj-y += setup.o |
2528 | +obj-$(CONFIG_XTFPGA_LCD) += lcd.o |
2529 | diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h |
2530 | index 6edd20bb4565..4e0af2662a21 100644 |
2531 | --- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h |
2532 | +++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h |
2533 | @@ -40,9 +40,6 @@ |
2534 | |
2535 | /* UART */ |
2536 | #define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020) |
2537 | -/* LCD instruction and data addresses. */ |
2538 | -#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000)) |
2539 | -#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004)) |
2540 | |
2541 | /* Misc. */ |
2542 | #define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000) |
2543 | diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h |
2544 | index 0e435645af5a..4c8541ed1139 100644 |
2545 | --- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h |
2546 | +++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h |
2547 | @@ -11,10 +11,25 @@ |
2548 | #ifndef __XTENSA_XTAVNET_LCD_H |
2549 | #define __XTENSA_XTAVNET_LCD_H |
2550 | |
2551 | +#ifdef CONFIG_XTFPGA_LCD |
2552 | /* Display string STR at position POS on the LCD. */ |
2553 | void lcd_disp_at_pos(char *str, unsigned char pos); |
2554 | |
2555 | /* Shift the contents of the LCD display left or right. */ |
2556 | void lcd_shiftleft(void); |
2557 | void lcd_shiftright(void); |
2558 | +#else |
2559 | +static inline void lcd_disp_at_pos(char *str, unsigned char pos) |
2560 | +{ |
2561 | +} |
2562 | + |
2563 | +static inline void lcd_shiftleft(void) |
2564 | +{ |
2565 | +} |
2566 | + |
2567 | +static inline void lcd_shiftright(void) |
2568 | +{ |
2569 | +} |
2570 | +#endif |
2571 | + |
2572 | #endif |
2573 | diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c |
2574 | index 2872301598df..4dc0c1b43f4b 100644 |
2575 | --- a/arch/xtensa/platforms/xtfpga/lcd.c |
2576 | +++ b/arch/xtensa/platforms/xtfpga/lcd.c |
2577 | @@ -1,50 +1,63 @@ |
2578 | /* |
2579 | - * Driver for the LCD display on the Tensilica LX60 Board. |
2580 | + * Driver for the LCD display on the Tensilica XTFPGA board family. |
2581 | + * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf |
2582 | * |
2583 | * This file is subject to the terms and conditions of the GNU General Public |
2584 | * License. See the file "COPYING" in the main directory of this archive |
2585 | * for more details. |
2586 | * |
2587 | * Copyright (C) 2001, 2006 Tensilica Inc. |
2588 | + * Copyright (C) 2015 Cadence Design Systems Inc. |
2589 | */ |
2590 | |
2591 | -/* |
2592 | - * |
2593 | - * FIXME: this code is from the examples from the LX60 user guide. |
2594 | - * |
2595 | - * The lcd_pause function does busy waiting, which is probably not |
2596 | - * great. Maybe the code could be changed to use kernel timers, or |
2597 | - * change the hardware to not need to wait. |
2598 | - */ |
2599 | - |
2600 | +#include <linux/delay.h> |
2601 | #include <linux/init.h> |
2602 | #include <linux/io.h> |
2603 | |
2604 | #include <platform/hardware.h> |
2605 | #include <platform/lcd.h> |
2606 | -#include <linux/delay.h> |
2607 | |
2608 | -#define LCD_PAUSE_ITERATIONS 4000 |
2609 | +/* LCD instruction and data addresses. */ |
2610 | +#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR)) |
2611 | +#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4) |
2612 | + |
2613 | #define LCD_CLEAR 0x1 |
2614 | #define LCD_DISPLAY_ON 0xc |
2615 | |
2616 | /* 8bit and 2 lines display */ |
2617 | #define LCD_DISPLAY_MODE8BIT 0x38 |
2618 | +#define LCD_DISPLAY_MODE4BIT 0x28 |
2619 | #define LCD_DISPLAY_POS 0x80 |
2620 | #define LCD_SHIFT_LEFT 0x18 |
2621 | #define LCD_SHIFT_RIGHT 0x1c |
2622 | |
2623 | +static void lcd_put_byte(u8 *addr, u8 data) |
2624 | +{ |
2625 | +#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS |
2626 | + ACCESS_ONCE(*addr) = data; |
2627 | +#else |
2628 | + ACCESS_ONCE(*addr) = data & 0xf0; |
2629 | + ACCESS_ONCE(*addr) = (data << 4) & 0xf0; |
2630 | +#endif |
2631 | +} |
2632 | + |
2633 | static int __init lcd_init(void) |
2634 | { |
2635 | - *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT; |
2636 | + ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; |
2637 | mdelay(5); |
2638 | - *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT; |
2639 | + ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; |
2640 | udelay(200); |
2641 | - *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT; |
2642 | + ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; |
2643 | + udelay(50); |
2644 | +#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS |
2645 | + ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT; |
2646 | + udelay(50); |
2647 | + lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT); |
2648 | udelay(50); |
2649 | - *LCD_INSTR_ADDR = LCD_DISPLAY_ON; |
2650 | +#endif |
2651 | + lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON); |
2652 | udelay(50); |
2653 | - *LCD_INSTR_ADDR = LCD_CLEAR; |
2654 | + lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR); |
2655 | mdelay(10); |
2656 | lcd_disp_at_pos("XTENSA LINUX", 0); |
2657 | return 0; |
2658 | @@ -52,10 +65,10 @@ static int __init lcd_init(void) |
2659 | |
2660 | void lcd_disp_at_pos(char *str, unsigned char pos) |
2661 | { |
2662 | - *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos; |
2663 | + lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos); |
2664 | udelay(100); |
2665 | while (*str != 0) { |
2666 | - *LCD_DATA_ADDR = *str; |
2667 | + lcd_put_byte(LCD_DATA_ADDR, *str); |
2668 | udelay(200); |
2669 | str++; |
2670 | } |
2671 | @@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos) |
2672 | |
2673 | void lcd_shiftleft(void) |
2674 | { |
2675 | - *LCD_INSTR_ADDR = LCD_SHIFT_LEFT; |
2676 | + lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT); |
2677 | udelay(50); |
2678 | } |
2679 | |
2680 | void lcd_shiftright(void) |
2681 | { |
2682 | - *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT; |
2683 | + lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT); |
2684 | udelay(50); |
2685 | } |
2686 | |
2687 | diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c |
2688 | index 5ed064e8673c..ccf793247447 100644 |
2689 | --- a/drivers/acpi/acpica/evgpe.c |
2690 | +++ b/drivers/acpi/acpica/evgpe.c |
2691 | @@ -92,6 +92,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) |
2692 | ACPI_SET_BIT(gpe_register_info->enable_for_run, |
2693 | (u8)register_bit); |
2694 | } |
2695 | + gpe_register_info->enable_mask = gpe_register_info->enable_for_run; |
2696 | |
2697 | return_ACPI_STATUS(AE_OK); |
2698 | } |
2699 | @@ -123,7 +124,7 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) |
2700 | |
2701 | /* Enable the requested GPE */ |
2702 | |
2703 | - status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE_SAVE); |
2704 | + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); |
2705 | return_ACPI_STATUS(status); |
2706 | } |
2707 | |
2708 | @@ -202,7 +203,7 @@ acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) |
2709 | if (ACPI_SUCCESS(status)) { |
2710 | status = |
2711 | acpi_hw_low_set_gpe(gpe_event_info, |
2712 | - ACPI_GPE_DISABLE_SAVE); |
2713 | + ACPI_GPE_DISABLE); |
2714 | } |
2715 | |
2716 | if (ACPI_FAILURE(status)) { |
2717 | diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c |
2718 | index 84bc550f4f1d..af6514ed64c5 100644 |
2719 | --- a/drivers/acpi/acpica/hwgpe.c |
2720 | +++ b/drivers/acpi/acpica/hwgpe.c |
2721 | @@ -89,6 +89,8 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info) |
2722 | * RETURN: Status |
2723 | * |
2724 | * DESCRIPTION: Enable or disable a single GPE in the parent enable register. |
2725 | + * The enable_mask field of the involved GPE register must be |
2726 | + * updated by the caller if necessary. |
2727 | * |
2728 | ******************************************************************************/ |
2729 | |
2730 | @@ -119,7 +121,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) |
2731 | /* Set or clear just the bit that corresponds to this GPE */ |
2732 | |
2733 | register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); |
2734 | - switch (action & ~ACPI_GPE_SAVE_MASK) { |
2735 | + switch (action) { |
2736 | case ACPI_GPE_CONDITIONAL_ENABLE: |
2737 | |
2738 | /* Only enable if the corresponding enable_mask bit is set */ |
2739 | @@ -149,9 +151,6 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) |
2740 | /* Write the updated enable mask */ |
2741 | |
2742 | status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address); |
2743 | - if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) { |
2744 | - gpe_register_info->enable_mask = (u8)enable_mask; |
2745 | - } |
2746 | return (status); |
2747 | } |
2748 | |
2749 | @@ -286,10 +285,8 @@ acpi_hw_gpe_enable_write(u8 enable_mask, |
2750 | { |
2751 | acpi_status status; |
2752 | |
2753 | + gpe_register_info->enable_mask = enable_mask; |
2754 | status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address); |
2755 | - if (ACPI_SUCCESS(status)) { |
2756 | - gpe_register_info->enable_mask = enable_mask; |
2757 | - } |
2758 | return (status); |
2759 | } |
2760 | |
2761 | diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c |
2762 | index 9bad45e63a45..7fbc2b9dcbbb 100644 |
2763 | --- a/drivers/acpi/acpica/tbinstal.c |
2764 | +++ b/drivers/acpi/acpica/tbinstal.c |
2765 | @@ -346,7 +346,6 @@ acpi_tb_install_standard_table(acpi_physical_address address, |
2766 | */ |
2767 | acpi_tb_uninstall_table(&new_table_desc); |
2768 | *table_index = i; |
2769 | - (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); |
2770 | return_ACPI_STATUS(AE_OK); |
2771 | } |
2772 | } |
2773 | diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c |
2774 | index bbca7830e18a..349f4fdd0b25 100644 |
2775 | --- a/drivers/acpi/scan.c |
2776 | +++ b/drivers/acpi/scan.c |
2777 | @@ -298,7 +298,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent) |
2778 | struct acpi_device_physical_node *pn; |
2779 | bool offline = true; |
2780 | |
2781 | - mutex_lock(&adev->physical_node_lock); |
2782 | + /* |
2783 | + * acpi_container_offline() calls this for all of the container's |
2784 | + * children under the container's physical_node_lock lock. |
2785 | + */ |
2786 | + mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING); |
2787 | |
2788 | list_for_each_entry(pn, &adev->physical_node_list, node) |
2789 | if (device_supports_offline(pn->dev) && !pn->dev->offline) { |
2790 | diff --git a/drivers/base/bus.c b/drivers/base/bus.c |
2791 | index 876bae5ade33..79bc203f51ef 100644 |
2792 | --- a/drivers/base/bus.c |
2793 | +++ b/drivers/base/bus.c |
2794 | @@ -515,11 +515,11 @@ int bus_add_device(struct device *dev) |
2795 | goto out_put; |
2796 | error = device_add_groups(dev, bus->dev_groups); |
2797 | if (error) |
2798 | - goto out_groups; |
2799 | + goto out_id; |
2800 | error = sysfs_create_link(&bus->p->devices_kset->kobj, |
2801 | &dev->kobj, dev_name(dev)); |
2802 | if (error) |
2803 | - goto out_id; |
2804 | + goto out_groups; |
2805 | error = sysfs_create_link(&dev->kobj, |
2806 | &dev->bus->p->subsys.kobj, "subsystem"); |
2807 | if (error) |
2808 | diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c |
2809 | index 6e64563361f0..9c2ba1c97c42 100644 |
2810 | --- a/drivers/base/cacheinfo.c |
2811 | +++ b/drivers/base/cacheinfo.c |
2812 | @@ -62,15 +62,21 @@ static int cache_setup_of_node(unsigned int cpu) |
2813 | return -ENOENT; |
2814 | } |
2815 | |
2816 | - while (np && index < cache_leaves(cpu)) { |
2817 | + while (index < cache_leaves(cpu)) { |
2818 | this_leaf = this_cpu_ci->info_list + index; |
2819 | if (this_leaf->level != 1) |
2820 | np = of_find_next_cache_node(np); |
2821 | else |
2822 | np = of_node_get(np);/* cpu node itself */ |
2823 | + if (!np) |
2824 | + break; |
2825 | this_leaf->of_node = np; |
2826 | index++; |
2827 | } |
2828 | + |
2829 | + if (index != cache_leaves(cpu)) /* not all OF nodes populated */ |
2830 | + return -ENOENT; |
2831 | + |
2832 | return 0; |
2833 | } |
2834 | |
2835 | @@ -189,8 +195,11 @@ static int detect_cache_attributes(unsigned int cpu) |
2836 | * will be set up here only if they are not populated already |
2837 | */ |
2838 | ret = cache_shared_cpu_map_setup(cpu); |
2839 | - if (ret) |
2840 | + if (ret) { |
2841 | + pr_warn("Unable to detect cache hierarcy from DT for CPU %d\n", |
2842 | + cpu); |
2843 | goto free_ci; |
2844 | + } |
2845 | return 0; |
2846 | |
2847 | free_ci: |
2848 | diff --git a/drivers/base/platform.c b/drivers/base/platform.c |
2849 | index 9421fed40905..e68ab79df28b 100644 |
2850 | --- a/drivers/base/platform.c |
2851 | +++ b/drivers/base/platform.c |
2852 | @@ -101,6 +101,15 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) |
2853 | } |
2854 | |
2855 | r = platform_get_resource(dev, IORESOURCE_IRQ, num); |
2856 | + /* |
2857 | + * The resources may pass trigger flags to the irqs that need |
2858 | + * to be set up. It so happens that the trigger flags for |
2859 | + * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* |
2860 | + * settings. |
2861 | + */ |
2862 | + if (r && r->flags & IORESOURCE_BITS) |
2863 | + irqd_set_trigger_type(irq_get_irq_data(r->start), |
2864 | + r->flags & IORESOURCE_BITS); |
2865 | |
2866 | return r ? r->start : -ENXIO; |
2867 | #endif |
2868 | diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c |
2869 | index de4c8499cbac..288547a3c566 100644 |
2870 | --- a/drivers/bluetooth/ath3k.c |
2871 | +++ b/drivers/bluetooth/ath3k.c |
2872 | @@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = { |
2873 | /* Atheros AR3011 with sflash firmware*/ |
2874 | { USB_DEVICE(0x0489, 0xE027) }, |
2875 | { USB_DEVICE(0x0489, 0xE03D) }, |
2876 | + { USB_DEVICE(0x04F2, 0xAFF1) }, |
2877 | { USB_DEVICE(0x0930, 0x0215) }, |
2878 | { USB_DEVICE(0x0CF3, 0x3002) }, |
2879 | { USB_DEVICE(0x0CF3, 0xE019) }, |
2880 | diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c |
2881 | index 8bfc4c2bba87..2c527da668ae 100644 |
2882 | --- a/drivers/bluetooth/btusb.c |
2883 | +++ b/drivers/bluetooth/btusb.c |
2884 | @@ -159,6 +159,7 @@ static const struct usb_device_id blacklist_table[] = { |
2885 | /* Atheros 3011 with sflash firmware */ |
2886 | { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, |
2887 | { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, |
2888 | + { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE }, |
2889 | { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, |
2890 | { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, |
2891 | { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE }, |
2892 | diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c |
2893 | index e096e9cddb40..283f00a7f036 100644 |
2894 | --- a/drivers/char/tpm/tpm-chip.c |
2895 | +++ b/drivers/char/tpm/tpm-chip.c |
2896 | @@ -170,6 +170,41 @@ static void tpm_dev_del_device(struct tpm_chip *chip) |
2897 | device_unregister(&chip->dev); |
2898 | } |
2899 | |
2900 | +static int tpm1_chip_register(struct tpm_chip *chip) |
2901 | +{ |
2902 | + int rc; |
2903 | + |
2904 | + if (chip->flags & TPM_CHIP_FLAG_TPM2) |
2905 | + return 0; |
2906 | + |
2907 | + rc = tpm_sysfs_add_device(chip); |
2908 | + if (rc) |
2909 | + return rc; |
2910 | + |
2911 | + rc = tpm_add_ppi(chip); |
2912 | + if (rc) { |
2913 | + tpm_sysfs_del_device(chip); |
2914 | + return rc; |
2915 | + } |
2916 | + |
2917 | + chip->bios_dir = tpm_bios_log_setup(chip->devname); |
2918 | + |
2919 | + return 0; |
2920 | +} |
2921 | + |
2922 | +static void tpm1_chip_unregister(struct tpm_chip *chip) |
2923 | +{ |
2924 | + if (chip->flags & TPM_CHIP_FLAG_TPM2) |
2925 | + return; |
2926 | + |
2927 | + if (chip->bios_dir) |
2928 | + tpm_bios_log_teardown(chip->bios_dir); |
2929 | + |
2930 | + tpm_remove_ppi(chip); |
2931 | + |
2932 | + tpm_sysfs_del_device(chip); |
2933 | +} |
2934 | + |
2935 | /* |
2936 | * tpm_chip_register() - create a character device for the TPM chip |
2937 | * @chip: TPM chip to use. |
2938 | @@ -185,22 +220,13 @@ int tpm_chip_register(struct tpm_chip *chip) |
2939 | { |
2940 | int rc; |
2941 | |
2942 | - /* Populate sysfs for TPM1 devices. */ |
2943 | - if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { |
2944 | - rc = tpm_sysfs_add_device(chip); |
2945 | - if (rc) |
2946 | - goto del_misc; |
2947 | - |
2948 | - rc = tpm_add_ppi(chip); |
2949 | - if (rc) |
2950 | - goto del_sysfs; |
2951 | - |
2952 | - chip->bios_dir = tpm_bios_log_setup(chip->devname); |
2953 | - } |
2954 | + rc = tpm1_chip_register(chip); |
2955 | + if (rc) |
2956 | + return rc; |
2957 | |
2958 | rc = tpm_dev_add_device(chip); |
2959 | if (rc) |
2960 | - return rc; |
2961 | + goto out_err; |
2962 | |
2963 | /* Make the chip available. */ |
2964 | spin_lock(&driver_lock); |
2965 | @@ -210,10 +236,8 @@ int tpm_chip_register(struct tpm_chip *chip) |
2966 | chip->flags |= TPM_CHIP_FLAG_REGISTERED; |
2967 | |
2968 | return 0; |
2969 | -del_sysfs: |
2970 | - tpm_sysfs_del_device(chip); |
2971 | -del_misc: |
2972 | - tpm_dev_del_device(chip); |
2973 | +out_err: |
2974 | + tpm1_chip_unregister(chip); |
2975 | return rc; |
2976 | } |
2977 | EXPORT_SYMBOL_GPL(tpm_chip_register); |
2978 | @@ -238,13 +262,7 @@ void tpm_chip_unregister(struct tpm_chip *chip) |
2979 | spin_unlock(&driver_lock); |
2980 | synchronize_rcu(); |
2981 | |
2982 | - if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { |
2983 | - if (chip->bios_dir) |
2984 | - tpm_bios_log_teardown(chip->bios_dir); |
2985 | - tpm_remove_ppi(chip); |
2986 | - tpm_sysfs_del_device(chip); |
2987 | - } |
2988 | - |
2989 | + tpm1_chip_unregister(chip); |
2990 | tpm_dev_del_device(chip); |
2991 | } |
2992 | EXPORT_SYMBOL_GPL(tpm_chip_unregister); |
2993 | diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c |
2994 | index a23ac0c724f0..0b7c3e8840ba 100644 |
2995 | --- a/drivers/clk/at91/clk-usb.c |
2996 | +++ b/drivers/clk/at91/clk-usb.c |
2997 | @@ -56,22 +56,55 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw, |
2998 | return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1)); |
2999 | } |
3000 | |
3001 | -static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate, |
3002 | - unsigned long *parent_rate) |
3003 | +static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw, |
3004 | + unsigned long rate, |
3005 | + unsigned long min_rate, |
3006 | + unsigned long max_rate, |
3007 | + unsigned long *best_parent_rate, |
3008 | + struct clk_hw **best_parent_hw) |
3009 | { |
3010 | - unsigned long div; |
3011 | + struct clk *parent = NULL; |
3012 | + long best_rate = -EINVAL; |
3013 | + unsigned long tmp_rate; |
3014 | + int best_diff = -1; |
3015 | + int tmp_diff; |
3016 | + int i; |
3017 | |
3018 | - if (!rate) |
3019 | - return -EINVAL; |
3020 | + for (i = 0; i < __clk_get_num_parents(hw->clk); i++) { |
3021 | + int div; |
3022 | |
3023 | - if (rate >= *parent_rate) |
3024 | - return *parent_rate; |
3025 | + parent = clk_get_parent_by_index(hw->clk, i); |
3026 | + if (!parent) |
3027 | + continue; |
3028 | + |
3029 | + for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) { |
3030 | + unsigned long tmp_parent_rate; |
3031 | + |
3032 | + tmp_parent_rate = rate * div; |
3033 | + tmp_parent_rate = __clk_round_rate(parent, |
3034 | + tmp_parent_rate); |
3035 | + tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div); |
3036 | + if (tmp_rate < rate) |
3037 | + tmp_diff = rate - tmp_rate; |
3038 | + else |
3039 | + tmp_diff = tmp_rate - rate; |
3040 | + |
3041 | + if (best_diff < 0 || best_diff > tmp_diff) { |
3042 | + best_rate = tmp_rate; |
3043 | + best_diff = tmp_diff; |
3044 | + *best_parent_rate = tmp_parent_rate; |
3045 | + *best_parent_hw = __clk_get_hw(parent); |
3046 | + } |
3047 | + |
3048 | + if (!best_diff || tmp_rate < rate) |
3049 | + break; |
3050 | + } |
3051 | |
3052 | - div = DIV_ROUND_CLOSEST(*parent_rate, rate); |
3053 | - if (div > SAM9X5_USB_MAX_DIV + 1) |
3054 | - div = SAM9X5_USB_MAX_DIV + 1; |
3055 | + if (!best_diff) |
3056 | + break; |
3057 | + } |
3058 | |
3059 | - return DIV_ROUND_CLOSEST(*parent_rate, div); |
3060 | + return best_rate; |
3061 | } |
3062 | |
3063 | static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index) |
3064 | @@ -121,7 +154,7 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate, |
3065 | |
3066 | static const struct clk_ops at91sam9x5_usb_ops = { |
3067 | .recalc_rate = at91sam9x5_clk_usb_recalc_rate, |
3068 | - .round_rate = at91sam9x5_clk_usb_round_rate, |
3069 | + .determine_rate = at91sam9x5_clk_usb_determine_rate, |
3070 | .get_parent = at91sam9x5_clk_usb_get_parent, |
3071 | .set_parent = at91sam9x5_clk_usb_set_parent, |
3072 | .set_rate = at91sam9x5_clk_usb_set_rate, |
3073 | @@ -159,7 +192,7 @@ static const struct clk_ops at91sam9n12_usb_ops = { |
3074 | .disable = at91sam9n12_clk_usb_disable, |
3075 | .is_enabled = at91sam9n12_clk_usb_is_enabled, |
3076 | .recalc_rate = at91sam9x5_clk_usb_recalc_rate, |
3077 | - .round_rate = at91sam9x5_clk_usb_round_rate, |
3078 | + .determine_rate = at91sam9x5_clk_usb_determine_rate, |
3079 | .set_rate = at91sam9x5_clk_usb_set_rate, |
3080 | }; |
3081 | |
3082 | @@ -179,7 +212,8 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name, |
3083 | init.ops = &at91sam9x5_usb_ops; |
3084 | init.parent_names = parent_names; |
3085 | init.num_parents = num_parents; |
3086 | - init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE; |
3087 | + init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | |
3088 | + CLK_SET_RATE_PARENT; |
3089 | |
3090 | usb->hw.init = &init; |
3091 | usb->pmc = pmc; |
3092 | @@ -207,7 +241,7 @@ at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name, |
3093 | init.ops = &at91sam9n12_usb_ops; |
3094 | init.parent_names = &parent_name; |
3095 | init.num_parents = 1; |
3096 | - init.flags = CLK_SET_RATE_GATE; |
3097 | + init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT; |
3098 | |
3099 | usb->hw.init = &init; |
3100 | usb->pmc = pmc; |
3101 | diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c |
3102 | index 0039bd7d3965..466f30ca65c2 100644 |
3103 | --- a/drivers/clk/qcom/clk-rcg.c |
3104 | +++ b/drivers/clk/qcom/clk-rcg.c |
3105 | @@ -495,6 +495,57 @@ static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate, |
3106 | return __clk_rcg_set_rate(rcg, rcg->freq_tbl); |
3107 | } |
3108 | |
3109 | +/* |
3110 | + * This type of clock has a glitch-free mux that switches between the output of |
3111 | + * the M/N counter and an always on clock source (XO). When clk_set_rate() is |
3112 | + * called we need to make sure that we don't switch to the M/N counter if it |
3113 | + * isn't clocking because the mux will get stuck and the clock will stop |
3114 | + * outputting a clock. This can happen if the framework isn't aware that this |
3115 | + * clock is on and so clk_set_rate() doesn't turn on the new parent. To fix |
3116 | + * this we switch the mux in the enable/disable ops and reprogram the M/N |
3117 | + * counter in the set_rate op. We also make sure to switch away from the M/N |
3118 | + * counter in set_rate if software thinks the clock is off. |
3119 | + */ |
3120 | +static int clk_rcg_lcc_set_rate(struct clk_hw *hw, unsigned long rate, |
3121 | + unsigned long parent_rate) |
3122 | +{ |
3123 | + struct clk_rcg *rcg = to_clk_rcg(hw); |
3124 | + const struct freq_tbl *f; |
3125 | + int ret; |
3126 | + u32 gfm = BIT(10); |
3127 | + |
3128 | + f = qcom_find_freq(rcg->freq_tbl, rate); |
3129 | + if (!f) |
3130 | + return -EINVAL; |
3131 | + |
3132 | + /* Switch to XO to avoid glitches */ |
3133 | + regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0); |
3134 | + ret = __clk_rcg_set_rate(rcg, f); |
3135 | + /* Switch back to M/N if it's clocking */ |
3136 | + if (__clk_is_enabled(hw->clk)) |
3137 | + regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm); |
3138 | + |
3139 | + return ret; |
3140 | +} |
3141 | + |
3142 | +static int clk_rcg_lcc_enable(struct clk_hw *hw) |
3143 | +{ |
3144 | + struct clk_rcg *rcg = to_clk_rcg(hw); |
3145 | + u32 gfm = BIT(10); |
3146 | + |
3147 | + /* Use M/N */ |
3148 | + return regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm); |
3149 | +} |
3150 | + |
3151 | +static void clk_rcg_lcc_disable(struct clk_hw *hw) |
3152 | +{ |
3153 | + struct clk_rcg *rcg = to_clk_rcg(hw); |
3154 | + u32 gfm = BIT(10); |
3155 | + |
3156 | + /* Use XO */ |
3157 | + regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0); |
3158 | +} |
3159 | + |
3160 | static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate) |
3161 | { |
3162 | struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); |
3163 | @@ -543,6 +594,17 @@ const struct clk_ops clk_rcg_bypass_ops = { |
3164 | }; |
3165 | EXPORT_SYMBOL_GPL(clk_rcg_bypass_ops); |
3166 | |
3167 | +const struct clk_ops clk_rcg_lcc_ops = { |
3168 | + .enable = clk_rcg_lcc_enable, |
3169 | + .disable = clk_rcg_lcc_disable, |
3170 | + .get_parent = clk_rcg_get_parent, |
3171 | + .set_parent = clk_rcg_set_parent, |
3172 | + .recalc_rate = clk_rcg_recalc_rate, |
3173 | + .determine_rate = clk_rcg_determine_rate, |
3174 | + .set_rate = clk_rcg_lcc_set_rate, |
3175 | +}; |
3176 | +EXPORT_SYMBOL_GPL(clk_rcg_lcc_ops); |
3177 | + |
3178 | const struct clk_ops clk_dyn_rcg_ops = { |
3179 | .enable = clk_enable_regmap, |
3180 | .is_enabled = clk_is_enabled_regmap, |
3181 | diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h |
3182 | index 687e41f91d7c..d09d06ba278e 100644 |
3183 | --- a/drivers/clk/qcom/clk-rcg.h |
3184 | +++ b/drivers/clk/qcom/clk-rcg.h |
3185 | @@ -96,6 +96,7 @@ struct clk_rcg { |
3186 | |
3187 | extern const struct clk_ops clk_rcg_ops; |
3188 | extern const struct clk_ops clk_rcg_bypass_ops; |
3189 | +extern const struct clk_ops clk_rcg_lcc_ops; |
3190 | |
3191 | #define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr) |
3192 | |
3193 | diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c |
3194 | index 742acfa18d63..381f27469a9c 100644 |
3195 | --- a/drivers/clk/qcom/clk-rcg2.c |
3196 | +++ b/drivers/clk/qcom/clk-rcg2.c |
3197 | @@ -243,7 +243,7 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) |
3198 | mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK; |
3199 | cfg = f->pre_div << CFG_SRC_DIV_SHIFT; |
3200 | cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT; |
3201 | - if (rcg->mnd_width && f->n) |
3202 | + if (rcg->mnd_width && f->n && (f->m != f->n)) |
3203 | cfg |= CFG_MODE_DUAL_EDGE; |
3204 | ret = regmap_update_bits(rcg->clkr.regmap, |
3205 | rcg->cmd_rcgr + CFG_REG, mask, cfg); |
3206 | diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c |
3207 | index cbdc31dea7f4..a015bb06c09b 100644 |
3208 | --- a/drivers/clk/qcom/gcc-ipq806x.c |
3209 | +++ b/drivers/clk/qcom/gcc-ipq806x.c |
3210 | @@ -525,8 +525,8 @@ static struct freq_tbl clk_tbl_gsbi_qup[] = { |
3211 | { 10800000, P_PXO, 1, 2, 5 }, |
3212 | { 15060000, P_PLL8, 1, 2, 51 }, |
3213 | { 24000000, P_PLL8, 4, 1, 4 }, |
3214 | + { 25000000, P_PXO, 1, 0, 0 }, |
3215 | { 25600000, P_PLL8, 1, 1, 15 }, |
3216 | - { 27000000, P_PXO, 1, 0, 0 }, |
3217 | { 48000000, P_PLL8, 4, 1, 2 }, |
3218 | { 51200000, P_PLL8, 1, 2, 15 }, |
3219 | { } |
3220 | diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c |
3221 | index c9ff27b4648b..a6d3a6745c4d 100644 |
3222 | --- a/drivers/clk/qcom/lcc-ipq806x.c |
3223 | +++ b/drivers/clk/qcom/lcc-ipq806x.c |
3224 | @@ -294,14 +294,14 @@ static struct clk_regmap_mux pcm_clk = { |
3225 | }; |
3226 | |
3227 | static struct freq_tbl clk_tbl_aif_osr[] = { |
3228 | - { 22050, P_PLL4, 1, 147, 20480 }, |
3229 | - { 32000, P_PLL4, 1, 1, 96 }, |
3230 | - { 44100, P_PLL4, 1, 147, 10240 }, |
3231 | - { 48000, P_PLL4, 1, 1, 64 }, |
3232 | - { 88200, P_PLL4, 1, 147, 5120 }, |
3233 | - { 96000, P_PLL4, 1, 1, 32 }, |
3234 | - { 176400, P_PLL4, 1, 147, 2560 }, |
3235 | - { 192000, P_PLL4, 1, 1, 16 }, |
3236 | + { 2822400, P_PLL4, 1, 147, 20480 }, |
3237 | + { 4096000, P_PLL4, 1, 1, 96 }, |
3238 | + { 5644800, P_PLL4, 1, 147, 10240 }, |
3239 | + { 6144000, P_PLL4, 1, 1, 64 }, |
3240 | + { 11289600, P_PLL4, 1, 147, 5120 }, |
3241 | + { 12288000, P_PLL4, 1, 1, 32 }, |
3242 | + { 22579200, P_PLL4, 1, 147, 2560 }, |
3243 | + { 24576000, P_PLL4, 1, 1, 16 }, |
3244 | { }, |
3245 | }; |
3246 | |
3247 | @@ -360,7 +360,7 @@ static struct clk_branch spdif_clk = { |
3248 | }; |
3249 | |
3250 | static struct freq_tbl clk_tbl_ahbix[] = { |
3251 | - { 131072, P_PLL4, 1, 1, 3 }, |
3252 | + { 131072000, P_PLL4, 1, 1, 3 }, |
3253 | { }, |
3254 | }; |
3255 | |
3256 | @@ -386,13 +386,12 @@ static struct clk_rcg ahbix_clk = { |
3257 | .freq_tbl = clk_tbl_ahbix, |
3258 | .clkr = { |
3259 | .enable_reg = 0x38, |
3260 | - .enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */ |
3261 | + .enable_mask = BIT(11), |
3262 | .hw.init = &(struct clk_init_data){ |
3263 | .name = "ahbix", |
3264 | .parent_names = lcc_pxo_pll4, |
3265 | .num_parents = 2, |
3266 | - .ops = &clk_rcg_ops, |
3267 | - .flags = CLK_SET_RATE_GATE, |
3268 | + .ops = &clk_rcg_lcc_ops, |
3269 | }, |
3270 | }, |
3271 | }; |
3272 | diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c |
3273 | index 51462e85675f..714d6ba782c8 100644 |
3274 | --- a/drivers/clk/samsung/clk-exynos4.c |
3275 | +++ b/drivers/clk/samsung/clk-exynos4.c |
3276 | @@ -1354,7 +1354,7 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = { |
3277 | VPLL_LOCK, VPLL_CON0, NULL), |
3278 | }; |
3279 | |
3280 | -static void __init exynos4_core_down_clock(enum exynos4_soc soc) |
3281 | +static void __init exynos4x12_core_down_clock(void) |
3282 | { |
3283 | unsigned int tmp; |
3284 | |
3285 | @@ -1373,11 +1373,9 @@ static void __init exynos4_core_down_clock(enum exynos4_soc soc) |
3286 | __raw_writel(tmp, reg_base + PWR_CTRL1); |
3287 | |
3288 | /* |
3289 | - * Disable the clock up feature on Exynos4x12, in case it was |
3290 | - * enabled by bootloader. |
3291 | + * Disable the clock up feature in case it was enabled by bootloader. |
3292 | */ |
3293 | - if (exynos4_soc == EXYNOS4X12) |
3294 | - __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2); |
3295 | + __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2); |
3296 | } |
3297 | |
3298 | /* register exynos4 clocks */ |
3299 | @@ -1474,7 +1472,8 @@ static void __init exynos4_clk_init(struct device_node *np, |
3300 | samsung_clk_register_alias(ctx, exynos4_aliases, |
3301 | ARRAY_SIZE(exynos4_aliases)); |
3302 | |
3303 | - exynos4_core_down_clock(soc); |
3304 | + if (soc == EXYNOS4X12) |
3305 | + exynos4x12_core_down_clock(); |
3306 | exynos4_clk_sleep_init(); |
3307 | |
3308 | samsung_clk_of_add_provider(np, ctx); |
3309 | diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c |
3310 | index 9a893f2fe8e9..23ce0afefca5 100644 |
3311 | --- a/drivers/clk/tegra/clk-tegra124.c |
3312 | +++ b/drivers/clk/tegra/clk-tegra124.c |
3313 | @@ -1110,16 +1110,18 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base, |
3314 | 1, 2); |
3315 | clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk; |
3316 | |
3317 | - clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0, |
3318 | + clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0, |
3319 | clk_base + PLLD_MISC, 30, 0, &pll_d_lock); |
3320 | - clks[TEGRA124_CLK_PLLD_DSI] = clk; |
3321 | + clks[TEGRA124_CLK_PLL_D_DSI_OUT] = clk; |
3322 | |
3323 | - clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base, |
3324 | - 0, 48, periph_clk_enb_refcnt); |
3325 | + clk = tegra_clk_register_periph_gate("dsia", "pll_d_dsi_out", 0, |
3326 | + clk_base, 0, 48, |
3327 | + periph_clk_enb_refcnt); |
3328 | clks[TEGRA124_CLK_DSIA] = clk; |
3329 | |
3330 | - clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base, |
3331 | - 0, 82, periph_clk_enb_refcnt); |
3332 | + clk = tegra_clk_register_periph_gate("dsib", "pll_d_dsi_out", 0, |
3333 | + clk_base, 0, 82, |
3334 | + periph_clk_enb_refcnt); |
3335 | clks[TEGRA124_CLK_DSIB] = clk; |
3336 | |
3337 | /* emc mux */ |
3338 | diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c |
3339 | index 9ddb7547cb43..7a1df61847fc 100644 |
3340 | --- a/drivers/clk/tegra/clk.c |
3341 | +++ b/drivers/clk/tegra/clk.c |
3342 | @@ -272,7 +272,7 @@ void __init tegra_add_of_provider(struct device_node *np) |
3343 | of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); |
3344 | |
3345 | rst_ctlr.of_node = np; |
3346 | - rst_ctlr.nr_resets = clk_num * 32; |
3347 | + rst_ctlr.nr_resets = periph_banks * 32; |
3348 | reset_controller_register(&rst_ctlr); |
3349 | } |
3350 | |
3351 | diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c |
3352 | index 42f95a4326b0..9a28b7e07c71 100644 |
3353 | --- a/drivers/crypto/omap-aes.c |
3354 | +++ b/drivers/crypto/omap-aes.c |
3355 | @@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) |
3356 | return err; |
3357 | } |
3358 | |
3359 | -static int omap_aes_check_aligned(struct scatterlist *sg) |
3360 | +static int omap_aes_check_aligned(struct scatterlist *sg, int total) |
3361 | { |
3362 | + int len = 0; |
3363 | + |
3364 | while (sg) { |
3365 | if (!IS_ALIGNED(sg->offset, 4)) |
3366 | return -1; |
3367 | if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) |
3368 | return -1; |
3369 | + |
3370 | + len += sg->length; |
3371 | sg = sg_next(sg); |
3372 | } |
3373 | + |
3374 | + if (len != total) |
3375 | + return -1; |
3376 | + |
3377 | return 0; |
3378 | } |
3379 | |
3380 | @@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, |
3381 | dd->in_sg = req->src; |
3382 | dd->out_sg = req->dst; |
3383 | |
3384 | - if (omap_aes_check_aligned(dd->in_sg) || |
3385 | - omap_aes_check_aligned(dd->out_sg)) { |
3386 | + if (omap_aes_check_aligned(dd->in_sg, dd->total) || |
3387 | + omap_aes_check_aligned(dd->out_sg, dd->total)) { |
3388 | if (omap_aes_copy_sgs(dd)) |
3389 | pr_err("Failed to copy SGs for unaligned cases\n"); |
3390 | dd->sgs_copied = 1; |
3391 | diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c |
3392 | index d0bc123c7975..1a54205860f5 100644 |
3393 | --- a/drivers/gpio/gpio-mvebu.c |
3394 | +++ b/drivers/gpio/gpio-mvebu.c |
3395 | @@ -320,11 +320,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d) |
3396 | { |
3397 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
3398 | struct mvebu_gpio_chip *mvchip = gc->private; |
3399 | + struct irq_chip_type *ct = irq_data_get_chip_type(d); |
3400 | u32 mask = 1 << (d->irq - gc->irq_base); |
3401 | |
3402 | irq_gc_lock(gc); |
3403 | - gc->mask_cache &= ~mask; |
3404 | - writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip)); |
3405 | + ct->mask_cache_priv &= ~mask; |
3406 | + |
3407 | + writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip)); |
3408 | irq_gc_unlock(gc); |
3409 | } |
3410 | |
3411 | @@ -332,11 +334,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d) |
3412 | { |
3413 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
3414 | struct mvebu_gpio_chip *mvchip = gc->private; |
3415 | + struct irq_chip_type *ct = irq_data_get_chip_type(d); |
3416 | + |
3417 | u32 mask = 1 << (d->irq - gc->irq_base); |
3418 | |
3419 | irq_gc_lock(gc); |
3420 | - gc->mask_cache |= mask; |
3421 | - writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip)); |
3422 | + ct->mask_cache_priv |= mask; |
3423 | + writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip)); |
3424 | irq_gc_unlock(gc); |
3425 | } |
3426 | |
3427 | @@ -344,11 +348,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d) |
3428 | { |
3429 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
3430 | struct mvebu_gpio_chip *mvchip = gc->private; |
3431 | + struct irq_chip_type *ct = irq_data_get_chip_type(d); |
3432 | + |
3433 | u32 mask = 1 << (d->irq - gc->irq_base); |
3434 | |
3435 | irq_gc_lock(gc); |
3436 | - gc->mask_cache &= ~mask; |
3437 | - writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip)); |
3438 | + ct->mask_cache_priv &= ~mask; |
3439 | + writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip)); |
3440 | irq_gc_unlock(gc); |
3441 | } |
3442 | |
3443 | @@ -356,11 +362,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d) |
3444 | { |
3445 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
3446 | struct mvebu_gpio_chip *mvchip = gc->private; |
3447 | + struct irq_chip_type *ct = irq_data_get_chip_type(d); |
3448 | + |
3449 | u32 mask = 1 << (d->irq - gc->irq_base); |
3450 | |
3451 | irq_gc_lock(gc); |
3452 | - gc->mask_cache |= mask; |
3453 | - writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip)); |
3454 | + ct->mask_cache_priv |= mask; |
3455 | + writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip)); |
3456 | irq_gc_unlock(gc); |
3457 | } |
3458 | |
3459 | diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c |
3460 | index bf17a60b40ed..1dbfba58f909 100644 |
3461 | --- a/drivers/gpu/drm/exynos/exynos_dp_core.c |
3462 | +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c |
3463 | @@ -32,10 +32,16 @@ |
3464 | #include <drm/bridge/ptn3460.h> |
3465 | |
3466 | #include "exynos_dp_core.h" |
3467 | +#include "exynos_drm_fimd.h" |
3468 | |
3469 | #define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \ |
3470 | connector) |
3471 | |
3472 | +static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp) |
3473 | +{ |
3474 | + return to_exynos_crtc(dp->encoder->crtc); |
3475 | +} |
3476 | + |
3477 | static inline struct exynos_dp_device * |
3478 | display_to_dp(struct exynos_drm_display *d) |
3479 | { |
3480 | @@ -1070,6 +1076,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp) |
3481 | } |
3482 | } |
3483 | |
3484 | + fimd_dp_clock_enable(dp_to_crtc(dp), true); |
3485 | + |
3486 | clk_prepare_enable(dp->clock); |
3487 | exynos_dp_phy_init(dp); |
3488 | exynos_dp_init_dp(dp); |
3489 | @@ -1094,6 +1102,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp) |
3490 | exynos_dp_phy_exit(dp); |
3491 | clk_disable_unprepare(dp->clock); |
3492 | |
3493 | + fimd_dp_clock_enable(dp_to_crtc(dp), false); |
3494 | + |
3495 | if (dp->panel) { |
3496 | if (drm_panel_unprepare(dp->panel)) |
3497 | DRM_ERROR("failed to turnoff the panel\n"); |
3498 | diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c |
3499 | index 33a10ce967ea..5d58f6cc0397 100644 |
3500 | --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c |
3501 | +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c |
3502 | @@ -32,6 +32,7 @@ |
3503 | #include "exynos_drm_fbdev.h" |
3504 | #include "exynos_drm_crtc.h" |
3505 | #include "exynos_drm_iommu.h" |
3506 | +#include "exynos_drm_fimd.h" |
3507 | |
3508 | /* |
3509 | * FIMD stands for Fully Interactive Mobile Display and |
3510 | @@ -1233,6 +1234,24 @@ static int fimd_remove(struct platform_device *pdev) |
3511 | return 0; |
3512 | } |
3513 | |
3514 | +void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) |
3515 | +{ |
3516 | + struct fimd_context *ctx = crtc->ctx; |
3517 | + u32 val; |
3518 | + |
3519 | + /* |
3520 | + * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE |
3521 | + * clock. On these SoCs the bootloader may enable it but any |
3522 | + * power domain off/on will reset it to disable state. |
3523 | + */ |
3524 | + if (ctx->driver_data != &exynos5_fimd_driver_data) |
3525 | + return; |
3526 | + |
3527 | + val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; |
3528 | + writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); |
3529 | +} |
3530 | +EXPORT_SYMBOL_GPL(fimd_dp_clock_enable); |
3531 | + |
3532 | struct platform_driver fimd_driver = { |
3533 | .probe = fimd_probe, |
3534 | .remove = fimd_remove, |
3535 | diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.h b/drivers/gpu/drm/exynos/exynos_drm_fimd.h |
3536 | new file mode 100644 |
3537 | index 000000000000..b4fcaa568456 |
3538 | --- /dev/null |
3539 | +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.h |
3540 | @@ -0,0 +1,15 @@ |
3541 | +/* |
3542 | + * Copyright (c) 2015 Samsung Electronics Co., Ltd. |
3543 | + * |
3544 | + * This program is free software; you can redistribute it and/or modify it |
3545 | + * under the terms of the GNU General Public License as published by the |
3546 | + * Free Software Foundation; either version 2 of the License, or (at your |
3547 | + * option) any later version. |
3548 | + */ |
3549 | + |
3550 | +#ifndef _EXYNOS_DRM_FIMD_H_ |
3551 | +#define _EXYNOS_DRM_FIMD_H_ |
3552 | + |
3553 | +extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable); |
3554 | + |
3555 | +#endif /* _EXYNOS_DRM_FIMD_H_ */ |
3556 | diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c |
3557 | index fa140e04d5fa..60ab1f75d58e 100644 |
3558 | --- a/drivers/gpu/drm/i2c/adv7511.c |
3559 | +++ b/drivers/gpu/drm/i2c/adv7511.c |
3560 | @@ -33,6 +33,7 @@ struct adv7511 { |
3561 | |
3562 | unsigned int current_edid_segment; |
3563 | uint8_t edid_buf[256]; |
3564 | + bool edid_read; |
3565 | |
3566 | wait_queue_head_t wq; |
3567 | struct drm_encoder *encoder; |
3568 | @@ -379,69 +380,71 @@ static bool adv7511_hpd(struct adv7511 *adv7511) |
3569 | return false; |
3570 | } |
3571 | |
3572 | -static irqreturn_t adv7511_irq_handler(int irq, void *devid) |
3573 | -{ |
3574 | - struct adv7511 *adv7511 = devid; |
3575 | - |
3576 | - if (adv7511_hpd(adv7511)) |
3577 | - drm_helper_hpd_irq_event(adv7511->encoder->dev); |
3578 | - |
3579 | - wake_up_all(&adv7511->wq); |
3580 | - |
3581 | - return IRQ_HANDLED; |
3582 | -} |
3583 | - |
3584 | -static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511, |
3585 | - unsigned int irq) |
3586 | +static int adv7511_irq_process(struct adv7511 *adv7511) |
3587 | { |
3588 | unsigned int irq0, irq1; |
3589 | - unsigned int pending; |
3590 | int ret; |
3591 | |
3592 | ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0); |
3593 | if (ret < 0) |
3594 | - return 0; |
3595 | + return ret; |
3596 | + |
3597 | ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1); |
3598 | if (ret < 0) |
3599 | - return 0; |
3600 | + return ret; |
3601 | + |
3602 | + regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); |
3603 | + regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); |
3604 | + |
3605 | + if (irq0 & ADV7511_INT0_HDP) |
3606 | + drm_helper_hpd_irq_event(adv7511->encoder->dev); |
3607 | + |
3608 | + if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { |
3609 | + adv7511->edid_read = true; |
3610 | + |
3611 | + if (adv7511->i2c_main->irq) |
3612 | + wake_up_all(&adv7511->wq); |
3613 | + } |
3614 | + |
3615 | + return 0; |
3616 | +} |
3617 | |
3618 | - pending = (irq1 << 8) | irq0; |
3619 | +static irqreturn_t adv7511_irq_handler(int irq, void *devid) |
3620 | +{ |
3621 | + struct adv7511 *adv7511 = devid; |
3622 | + int ret; |
3623 | |
3624 | - return pending & irq; |
3625 | + ret = adv7511_irq_process(adv7511); |
3626 | + return ret < 0 ? IRQ_NONE : IRQ_HANDLED; |
3627 | } |
3628 | |
3629 | -static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq, |
3630 | - int timeout) |
3631 | +/* ----------------------------------------------------------------------------- |
3632 | + * EDID retrieval |
3633 | + */ |
3634 | + |
3635 | +static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout) |
3636 | { |
3637 | - unsigned int pending; |
3638 | int ret; |
3639 | |
3640 | if (adv7511->i2c_main->irq) { |
3641 | ret = wait_event_interruptible_timeout(adv7511->wq, |
3642 | - adv7511_is_interrupt_pending(adv7511, irq), |
3643 | - msecs_to_jiffies(timeout)); |
3644 | - if (ret <= 0) |
3645 | - return 0; |
3646 | - pending = adv7511_is_interrupt_pending(adv7511, irq); |
3647 | + adv7511->edid_read, msecs_to_jiffies(timeout)); |
3648 | } else { |
3649 | - if (timeout < 25) |
3650 | - timeout = 25; |
3651 | - do { |
3652 | - pending = adv7511_is_interrupt_pending(adv7511, irq); |
3653 | - if (pending) |
3654 | + for (; timeout > 0; timeout -= 25) { |
3655 | + ret = adv7511_irq_process(adv7511); |
3656 | + if (ret < 0) |
3657 | break; |
3658 | + |
3659 | + if (adv7511->edid_read) |
3660 | + break; |
3661 | + |
3662 | msleep(25); |
3663 | - timeout -= 25; |
3664 | - } while (timeout >= 25); |
3665 | + } |
3666 | } |
3667 | |
3668 | - return pending; |
3669 | + return adv7511->edid_read ? 0 : -EIO; |
3670 | } |
3671 | |
3672 | -/* ----------------------------------------------------------------------------- |
3673 | - * EDID retrieval |
3674 | - */ |
3675 | - |
3676 | static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block, |
3677 | size_t len) |
3678 | { |
3679 | @@ -463,19 +466,14 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block, |
3680 | return ret; |
3681 | |
3682 | if (status != 2) { |
3683 | + adv7511->edid_read = false; |
3684 | regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT, |
3685 | block); |
3686 | - ret = adv7511_wait_for_interrupt(adv7511, |
3687 | - ADV7511_INT0_EDID_READY | |
3688 | - ADV7511_INT1_DDC_ERROR, 200); |
3689 | - |
3690 | - if (!(ret & ADV7511_INT0_EDID_READY)) |
3691 | - return -EIO; |
3692 | + ret = adv7511_wait_for_edid(adv7511, 200); |
3693 | + if (ret < 0) |
3694 | + return ret; |
3695 | } |
3696 | |
3697 | - regmap_write(adv7511->regmap, ADV7511_REG_INT(0), |
3698 | - ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR); |
3699 | - |
3700 | /* Break this apart, hopefully more I2C controllers will |
3701 | * support 64 byte transfers than 256 byte transfers |
3702 | */ |
3703 | @@ -528,7 +526,9 @@ static int adv7511_get_modes(struct drm_encoder *encoder, |
3704 | /* Reading the EDID only works if the device is powered */ |
3705 | if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) { |
3706 | regmap_write(adv7511->regmap, ADV7511_REG_INT(0), |
3707 | - ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR); |
3708 | + ADV7511_INT0_EDID_READY); |
3709 | + regmap_write(adv7511->regmap, ADV7511_REG_INT(1), |
3710 | + ADV7511_INT1_DDC_ERROR); |
3711 | regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, |
3712 | ADV7511_POWER_POWER_DOWN, 0); |
3713 | adv7511->current_edid_segment = -1; |
3714 | @@ -563,7 +563,9 @@ static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode) |
3715 | adv7511->current_edid_segment = -1; |
3716 | |
3717 | regmap_write(adv7511->regmap, ADV7511_REG_INT(0), |
3718 | - ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR); |
3719 | + ADV7511_INT0_EDID_READY); |
3720 | + regmap_write(adv7511->regmap, ADV7511_REG_INT(1), |
3721 | + ADV7511_INT1_DDC_ERROR); |
3722 | regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, |
3723 | ADV7511_POWER_POWER_DOWN, 0); |
3724 | /* |
3725 | diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c |
3726 | index 5c66b568bb81..ec4d932f8be4 100644 |
3727 | --- a/drivers/gpu/drm/i915/i915_drv.c |
3728 | +++ b/drivers/gpu/drm/i915/i915_drv.c |
3729 | @@ -1042,7 +1042,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) |
3730 | s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4); |
3731 | |
3732 | s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); |
3733 | - s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); |
3734 | + s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); |
3735 | |
3736 | s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); |
3737 | s->ecochk = I915_READ(GAM_ECOCHK); |
3738 | @@ -1124,7 +1124,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) |
3739 | I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]); |
3740 | |
3741 | I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); |
3742 | - I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count); |
3743 | + I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); |
3744 | |
3745 | I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); |
3746 | I915_WRITE(GAM_ECOCHK, s->ecochk); |
3747 | diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c |
3748 | index ede5bbbd8a08..07320cb32611 100644 |
3749 | --- a/drivers/gpu/drm/i915/i915_irq.c |
3750 | +++ b/drivers/gpu/drm/i915/i915_irq.c |
3751 | @@ -3718,14 +3718,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev) |
3752 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
3753 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
3754 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
3755 | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
3756 | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
3757 | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); |
3758 | I915_WRITE16(IMR, dev_priv->irq_mask); |
3759 | |
3760 | I915_WRITE16(IER, |
3761 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
3762 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
3763 | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | |
3764 | I915_USER_INTERRUPT); |
3765 | POSTING_READ16(IER); |
3766 | |
3767 | @@ -3887,14 +3885,12 @@ static int i915_irq_postinstall(struct drm_device *dev) |
3768 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
3769 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
3770 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
3771 | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
3772 | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
3773 | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); |
3774 | |
3775 | enable_mask = |
3776 | I915_ASLE_INTERRUPT | |
3777 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
3778 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
3779 | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | |
3780 | I915_USER_INTERRUPT; |
3781 | |
3782 | if (I915_HAS_HOTPLUG(dev)) { |
3783 | diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h |
3784 | index 33b3d0a24071..f536ff2628fd 100644 |
3785 | --- a/drivers/gpu/drm/i915/i915_reg.h |
3786 | +++ b/drivers/gpu/drm/i915/i915_reg.h |
3787 | @@ -1740,6 +1740,7 @@ enum punit_power_well { |
3788 | #define GMBUS_CYCLE_INDEX (2<<25) |
3789 | #define GMBUS_CYCLE_STOP (4<<25) |
3790 | #define GMBUS_BYTE_COUNT_SHIFT 16 |
3791 | +#define GMBUS_BYTE_COUNT_MAX 256U |
3792 | #define GMBUS_SLAVE_INDEX_SHIFT 8 |
3793 | #define GMBUS_SLAVE_ADDR_SHIFT 1 |
3794 | #define GMBUS_SLAVE_READ (1<<0) |
3795 | diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c |
3796 | index b31088a551f2..56e437e31580 100644 |
3797 | --- a/drivers/gpu/drm/i915/intel_i2c.c |
3798 | +++ b/drivers/gpu/drm/i915/intel_i2c.c |
3799 | @@ -270,18 +270,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) |
3800 | } |
3801 | |
3802 | static int |
3803 | -gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, |
3804 | - u32 gmbus1_index) |
3805 | +gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, |
3806 | + unsigned short addr, u8 *buf, unsigned int len, |
3807 | + u32 gmbus1_index) |
3808 | { |
3809 | int reg_offset = dev_priv->gpio_mmio_base; |
3810 | - u16 len = msg->len; |
3811 | - u8 *buf = msg->buf; |
3812 | |
3813 | I915_WRITE(GMBUS1 + reg_offset, |
3814 | gmbus1_index | |
3815 | GMBUS_CYCLE_WAIT | |
3816 | (len << GMBUS_BYTE_COUNT_SHIFT) | |
3817 | - (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | |
3818 | + (addr << GMBUS_SLAVE_ADDR_SHIFT) | |
3819 | GMBUS_SLAVE_READ | GMBUS_SW_RDY); |
3820 | while (len) { |
3821 | int ret; |
3822 | @@ -303,11 +302,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, |
3823 | } |
3824 | |
3825 | static int |
3826 | -gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) |
3827 | +gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, |
3828 | + u32 gmbus1_index) |
3829 | { |
3830 | - int reg_offset = dev_priv->gpio_mmio_base; |
3831 | - u16 len = msg->len; |
3832 | u8 *buf = msg->buf; |
3833 | + unsigned int rx_size = msg->len; |
3834 | + unsigned int len; |
3835 | + int ret; |
3836 | + |
3837 | + do { |
3838 | + len = min(rx_size, GMBUS_BYTE_COUNT_MAX); |
3839 | + |
3840 | + ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, |
3841 | + buf, len, gmbus1_index); |
3842 | + if (ret) |
3843 | + return ret; |
3844 | + |
3845 | + rx_size -= len; |
3846 | + buf += len; |
3847 | + } while (rx_size != 0); |
3848 | + |
3849 | + return 0; |
3850 | +} |
3851 | + |
3852 | +static int |
3853 | +gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, |
3854 | + unsigned short addr, u8 *buf, unsigned int len) |
3855 | +{ |
3856 | + int reg_offset = dev_priv->gpio_mmio_base; |
3857 | + unsigned int chunk_size = len; |
3858 | u32 val, loop; |
3859 | |
3860 | val = loop = 0; |
3861 | @@ -319,8 +342,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) |
3862 | I915_WRITE(GMBUS3 + reg_offset, val); |
3863 | I915_WRITE(GMBUS1 + reg_offset, |
3864 | GMBUS_CYCLE_WAIT | |
3865 | - (msg->len << GMBUS_BYTE_COUNT_SHIFT) | |
3866 | - (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | |
3867 | + (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | |
3868 | + (addr << GMBUS_SLAVE_ADDR_SHIFT) | |
3869 | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); |
3870 | while (len) { |
3871 | int ret; |
3872 | @@ -337,6 +360,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) |
3873 | if (ret) |
3874 | return ret; |
3875 | } |
3876 | + |
3877 | + return 0; |
3878 | +} |
3879 | + |
3880 | +static int |
3881 | +gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) |
3882 | +{ |
3883 | + u8 *buf = msg->buf; |
3884 | + unsigned int tx_size = msg->len; |
3885 | + unsigned int len; |
3886 | + int ret; |
3887 | + |
3888 | + do { |
3889 | + len = min(tx_size, GMBUS_BYTE_COUNT_MAX); |
3890 | + |
3891 | + ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len); |
3892 | + if (ret) |
3893 | + return ret; |
3894 | + |
3895 | + buf += len; |
3896 | + tx_size -= len; |
3897 | + } while (tx_size != 0); |
3898 | + |
3899 | return 0; |
3900 | } |
3901 | |
3902 | diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c |
3903 | index 86807ee91bd1..9bd56116fd5a 100644 |
3904 | --- a/drivers/gpu/drm/radeon/atombios_crtc.c |
3905 | +++ b/drivers/gpu/drm/radeon/atombios_crtc.c |
3906 | @@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, |
3907 | misc |= ATOM_COMPOSITESYNC; |
3908 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
3909 | misc |= ATOM_INTERLACE; |
3910 | - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
3911 | + if (mode->flags & DRM_MODE_FLAG_DBLCLK) |
3912 | misc |= ATOM_DOUBLE_CLOCK_MODE; |
3913 | + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
3914 | + misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; |
3915 | |
3916 | args.susModeMiscInfo.usAccess = cpu_to_le16(misc); |
3917 | args.ucCRTC = radeon_crtc->crtc_id; |
3918 | @@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, |
3919 | misc |= ATOM_COMPOSITESYNC; |
3920 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
3921 | misc |= ATOM_INTERLACE; |
3922 | - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
3923 | + if (mode->flags & DRM_MODE_FLAG_DBLCLK) |
3924 | misc |= ATOM_DOUBLE_CLOCK_MODE; |
3925 | + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
3926 | + misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; |
3927 | |
3928 | args.susModeMiscInfo.usAccess = cpu_to_le16(misc); |
3929 | args.ucCRTC = radeon_crtc->crtc_id; |
3930 | diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
3931 | index 9c4786759f16..7fe5590b328b 100644 |
3932 | --- a/drivers/hid/hid-ids.h |
3933 | +++ b/drivers/hid/hid-ids.h |
3934 | @@ -459,6 +459,10 @@ |
3935 | #define USB_DEVICE_ID_UGCI_FLYING 0x0020 |
3936 | #define USB_DEVICE_ID_UGCI_FIGHTING 0x0030 |
3937 | |
3938 | +#define USB_VENDOR_ID_HP 0x03f0 |
3939 | +#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE 0x0a4a |
3940 | +#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a |
3941 | + |
3942 | #define USB_VENDOR_ID_HUION 0x256c |
3943 | #define USB_DEVICE_ID_HUION_TABLET 0x006e |
3944 | |
3945 | diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c |
3946 | index a82127753461..4e3ae9fbb9b5 100644 |
3947 | --- a/drivers/hid/usbhid/hid-quirks.c |
3948 | +++ b/drivers/hid/usbhid/hid-quirks.c |
3949 | @@ -78,6 +78,8 @@ static const struct hid_blacklist { |
3950 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, |
3951 | { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, |
3952 | { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, |
3953 | + { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, |
3954 | + { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, |
3955 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, |
3956 | { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, |
3957 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, |
3958 | diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c |
3959 | index 2978f5ee8d2a..00bc30e0db7f 100644 |
3960 | --- a/drivers/hv/channel.c |
3961 | +++ b/drivers/hv/channel.c |
3962 | @@ -135,7 +135,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, |
3963 | GFP_KERNEL); |
3964 | if (!open_info) { |
3965 | err = -ENOMEM; |
3966 | - goto error0; |
3967 | + goto error_gpadl; |
3968 | } |
3969 | |
3970 | init_completion(&open_info->waitevent); |
3971 | @@ -151,7 +151,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, |
3972 | |
3973 | if (userdatalen > MAX_USER_DEFINED_BYTES) { |
3974 | err = -EINVAL; |
3975 | - goto error0; |
3976 | + goto error_gpadl; |
3977 | } |
3978 | |
3979 | if (userdatalen) |
3980 | @@ -195,6 +195,9 @@ error1: |
3981 | list_del(&open_info->msglistentry); |
3982 | spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); |
3983 | |
3984 | +error_gpadl: |
3985 | + vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle); |
3986 | + |
3987 | error0: |
3988 | free_pages((unsigned long)out, |
3989 | get_order(send_ringbuffer_size + recv_ringbuffer_size)); |
3990 | diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c |
3991 | index 5f96b1b3e3a5..019d5426fe52 100644 |
3992 | --- a/drivers/i2c/busses/i2c-rk3x.c |
3993 | +++ b/drivers/i2c/busses/i2c-rk3x.c |
3994 | @@ -833,7 +833,7 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap, |
3995 | clk_disable(i2c->clk); |
3996 | spin_unlock_irqrestore(&i2c->lock, flags); |
3997 | |
3998 | - return ret; |
3999 | + return ret < 0 ? ret : num; |
4000 | } |
4001 | |
4002 | static u32 rk3x_i2c_func(struct i2c_adapter *adap) |
4003 | diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c |
4004 | index edf274cabe81..8143162b374d 100644 |
4005 | --- a/drivers/i2c/i2c-core.c |
4006 | +++ b/drivers/i2c/i2c-core.c |
4007 | @@ -596,6 +596,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) |
4008 | adap->bus_recovery_info->set_scl(adap, 1); |
4009 | return i2c_generic_recovery(adap); |
4010 | } |
4011 | +EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery); |
4012 | |
4013 | int i2c_generic_gpio_recovery(struct i2c_adapter *adap) |
4014 | { |
4015 | @@ -610,6 +611,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap) |
4016 | |
4017 | return ret; |
4018 | } |
4019 | +EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery); |
4020 | |
4021 | int i2c_recover_bus(struct i2c_adapter *adap) |
4022 | { |
4023 | @@ -619,6 +621,7 @@ int i2c_recover_bus(struct i2c_adapter *adap) |
4024 | dev_dbg(&adap->dev, "Trying i2c bus recovery\n"); |
4025 | return adap->bus_recovery_info->recover_bus(adap); |
4026 | } |
4027 | +EXPORT_SYMBOL_GPL(i2c_recover_bus); |
4028 | |
4029 | static int i2c_device_probe(struct device *dev) |
4030 | { |
4031 | @@ -1410,6 +1413,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap) |
4032 | |
4033 | dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); |
4034 | |
4035 | + pm_runtime_no_callbacks(&adap->dev); |
4036 | + |
4037 | #ifdef CONFIG_I2C_COMPAT |
4038 | res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev, |
4039 | adap->dev.parent); |
4040 | diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c |
4041 | index 593f7ca9adc7..06cc1ff088f1 100644 |
4042 | --- a/drivers/i2c/i2c-mux.c |
4043 | +++ b/drivers/i2c/i2c-mux.c |
4044 | @@ -32,8 +32,9 @@ struct i2c_mux_priv { |
4045 | struct i2c_algorithm algo; |
4046 | |
4047 | struct i2c_adapter *parent; |
4048 | - void *mux_priv; /* the mux chip/device */ |
4049 | - u32 chan_id; /* the channel id */ |
4050 | + struct device *mux_dev; |
4051 | + void *mux_priv; |
4052 | + u32 chan_id; |
4053 | |
4054 | int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id); |
4055 | int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id); |
4056 | @@ -119,6 +120,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, |
4057 | |
4058 | /* Set up private adapter data */ |
4059 | priv->parent = parent; |
4060 | + priv->mux_dev = mux_dev; |
4061 | priv->mux_priv = mux_priv; |
4062 | priv->chan_id = chan_id; |
4063 | priv->select = select; |
4064 | @@ -203,7 +205,7 @@ void i2c_del_mux_adapter(struct i2c_adapter *adap) |
4065 | char symlink_name[20]; |
4066 | |
4067 | snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id); |
4068 | - sysfs_remove_link(&adap->dev.parent->kobj, symlink_name); |
4069 | + sysfs_remove_link(&priv->mux_dev->kobj, symlink_name); |
4070 | |
4071 | sysfs_remove_link(&priv->adap.dev.kobj, "mux_device"); |
4072 | i2c_del_adapter(adap); |
4073 | diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c |
4074 | index b0e58522780d..44d1d7920202 100644 |
4075 | --- a/drivers/idle/intel_idle.c |
4076 | +++ b/drivers/idle/intel_idle.c |
4077 | @@ -218,18 +218,10 @@ static struct cpuidle_state byt_cstates[] = { |
4078 | .enter = &intel_idle, |
4079 | .enter_freeze = intel_idle_freeze, }, |
4080 | { |
4081 | - .name = "C1E-BYT", |
4082 | - .desc = "MWAIT 0x01", |
4083 | - .flags = MWAIT2flg(0x01), |
4084 | - .exit_latency = 15, |
4085 | - .target_residency = 30, |
4086 | - .enter = &intel_idle, |
4087 | - .enter_freeze = intel_idle_freeze, }, |
4088 | - { |
4089 | .name = "C6N-BYT", |
4090 | .desc = "MWAIT 0x58", |
4091 | .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, |
4092 | - .exit_latency = 40, |
4093 | + .exit_latency = 300, |
4094 | .target_residency = 275, |
4095 | .enter = &intel_idle, |
4096 | .enter_freeze = intel_idle_freeze, }, |
4097 | @@ -237,7 +229,7 @@ static struct cpuidle_state byt_cstates[] = { |
4098 | .name = "C6S-BYT", |
4099 | .desc = "MWAIT 0x52", |
4100 | .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, |
4101 | - .exit_latency = 140, |
4102 | + .exit_latency = 500, |
4103 | .target_residency = 560, |
4104 | .enter = &intel_idle, |
4105 | .enter_freeze = intel_idle_freeze, }, |
4106 | @@ -246,7 +238,7 @@ static struct cpuidle_state byt_cstates[] = { |
4107 | .desc = "MWAIT 0x60", |
4108 | .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, |
4109 | .exit_latency = 1200, |
4110 | - .target_residency = 1500, |
4111 | + .target_residency = 4000, |
4112 | .enter = &intel_idle, |
4113 | .enter_freeze = intel_idle_freeze, }, |
4114 | { |
4115 | diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c |
4116 | index 8c014b5dab4c..38acb3cfc545 100644 |
4117 | --- a/drivers/infiniband/core/umem.c |
4118 | +++ b/drivers/infiniband/core/umem.c |
4119 | @@ -99,12 +99,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, |
4120 | if (dmasync) |
4121 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); |
4122 | |
4123 | + if (!size) |
4124 | + return ERR_PTR(-EINVAL); |
4125 | + |
4126 | /* |
4127 | * If the combination of the addr and size requested for this memory |
4128 | * region causes an integer overflow, return error. |
4129 | */ |
4130 | - if ((PAGE_ALIGN(addr + size) <= size) || |
4131 | - (PAGE_ALIGN(addr + size) <= addr)) |
4132 | + if (((addr + size) < addr) || |
4133 | + PAGE_ALIGN(addr + size) < (addr + size)) |
4134 | return ERR_PTR(-EINVAL); |
4135 | |
4136 | if (!can_do_mlock()) |
4137 | diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c |
4138 | index ed2bd6701f9b..fbde33a5228c 100644 |
4139 | --- a/drivers/infiniband/hw/mlx4/qp.c |
4140 | +++ b/drivers/infiniband/hw/mlx4/qp.c |
4141 | @@ -2605,8 +2605,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, |
4142 | |
4143 | memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); |
4144 | |
4145 | - *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | |
4146 | - wr->wr.ud.hlen); |
4147 | + *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen); |
4148 | *lso_seg_len = halign; |
4149 | return 0; |
4150 | } |
4151 | diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c |
4152 | index 20e859a6f1a6..76eb57b31a59 100644 |
4153 | --- a/drivers/infiniband/ulp/iser/iser_initiator.c |
4154 | +++ b/drivers/infiniband/ulp/iser/iser_initiator.c |
4155 | @@ -409,8 +409,8 @@ int iser_send_command(struct iscsi_conn *conn, |
4156 | if (scsi_prot_sg_count(sc)) { |
4157 | prot_buf->buf = scsi_prot_sglist(sc); |
4158 | prot_buf->size = scsi_prot_sg_count(sc); |
4159 | - prot_buf->data_len = data_buf->data_len >> |
4160 | - ilog2(sc->device->sector_size) * 8; |
4161 | + prot_buf->data_len = (data_buf->data_len >> |
4162 | + ilog2(sc->device->sector_size)) * 8; |
4163 | } |
4164 | |
4165 | if (hdr->flags & ISCSI_FLAG_CMD_READ) { |
4166 | diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c |
4167 | index 075b19cc78e8..147029adb885 100644 |
4168 | --- a/drivers/infiniband/ulp/isert/ib_isert.c |
4169 | +++ b/drivers/infiniband/ulp/isert/ib_isert.c |
4170 | @@ -222,7 +222,7 @@ fail: |
4171 | static void |
4172 | isert_free_rx_descriptors(struct isert_conn *isert_conn) |
4173 | { |
4174 | - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
4175 | + struct ib_device *ib_dev = isert_conn->conn_device->ib_device; |
4176 | struct iser_rx_desc *rx_desc; |
4177 | int i; |
4178 | |
4179 | @@ -719,8 +719,8 @@ out: |
4180 | static void |
4181 | isert_connect_release(struct isert_conn *isert_conn) |
4182 | { |
4183 | - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
4184 | struct isert_device *device = isert_conn->conn_device; |
4185 | + struct ib_device *ib_dev = device->ib_device; |
4186 | |
4187 | isert_dbg("conn %p\n", isert_conn); |
4188 | |
4189 | @@ -728,7 +728,8 @@ isert_connect_release(struct isert_conn *isert_conn) |
4190 | isert_conn_free_fastreg_pool(isert_conn); |
4191 | |
4192 | isert_free_rx_descriptors(isert_conn); |
4193 | - rdma_destroy_id(isert_conn->conn_cm_id); |
4194 | + if (isert_conn->conn_cm_id) |
4195 | + rdma_destroy_id(isert_conn->conn_cm_id); |
4196 | |
4197 | if (isert_conn->conn_qp) { |
4198 | struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context; |
4199 | @@ -878,12 +879,15 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, |
4200 | return 0; |
4201 | } |
4202 | |
4203 | -static void |
4204 | +static int |
4205 | isert_connect_error(struct rdma_cm_id *cma_id) |
4206 | { |
4207 | struct isert_conn *isert_conn = cma_id->qp->qp_context; |
4208 | |
4209 | + isert_conn->conn_cm_id = NULL; |
4210 | isert_put_conn(isert_conn); |
4211 | + |
4212 | + return -1; |
4213 | } |
4214 | |
4215 | static int |
4216 | @@ -912,7 +916,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
4217 | case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ |
4218 | case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ |
4219 | case RDMA_CM_EVENT_CONNECT_ERROR: |
4220 | - isert_connect_error(cma_id); |
4221 | + ret = isert_connect_error(cma_id); |
4222 | break; |
4223 | default: |
4224 | isert_err("Unhandled RDMA CMA event: %d\n", event->event); |
4225 | @@ -1861,11 +1865,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, |
4226 | cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; |
4227 | spin_unlock_bh(&cmd->istate_lock); |
4228 | |
4229 | - if (ret) |
4230 | + if (ret) { |
4231 | + target_put_sess_cmd(se_cmd->se_sess, se_cmd); |
4232 | transport_send_check_condition_and_sense(se_cmd, |
4233 | se_cmd->pi_err, 0); |
4234 | - else |
4235 | + } else { |
4236 | target_execute_cmd(se_cmd); |
4237 | + } |
4238 | } |
4239 | |
4240 | static void |
4241 | diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c |
4242 | index 27bcdbc950c9..ea6cb64dfb28 100644 |
4243 | --- a/drivers/input/mouse/alps.c |
4244 | +++ b/drivers/input/mouse/alps.c |
4245 | @@ -1159,13 +1159,14 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse, |
4246 | bool report_buttons) |
4247 | { |
4248 | struct alps_data *priv = psmouse->private; |
4249 | - struct input_dev *dev; |
4250 | + struct input_dev *dev, *dev2 = NULL; |
4251 | |
4252 | /* Figure out which device to use to report the bare packet */ |
4253 | if (priv->proto_version == ALPS_PROTO_V2 && |
4254 | (priv->flags & ALPS_DUALPOINT)) { |
4255 | /* On V2 devices the DualPoint Stick reports bare packets */ |
4256 | dev = priv->dev2; |
4257 | + dev2 = psmouse->dev; |
4258 | } else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) { |
4259 | /* Register dev3 mouse if we received PS/2 packet first time */ |
4260 | if (!IS_ERR(priv->dev3)) |
4261 | @@ -1177,7 +1178,7 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse, |
4262 | } |
4263 | |
4264 | if (report_buttons) |
4265 | - alps_report_buttons(dev, NULL, |
4266 | + alps_report_buttons(dev, dev2, |
4267 | packet[0] & 1, packet[0] & 2, packet[0] & 4); |
4268 | |
4269 | input_report_rel(dev, REL_X, |
4270 | diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c |
4271 | index 6e22682c8255..991dc6b20a58 100644 |
4272 | --- a/drivers/input/mouse/elantech.c |
4273 | +++ b/drivers/input/mouse/elantech.c |
4274 | @@ -893,6 +893,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse) |
4275 | } |
4276 | |
4277 | /* |
4278 | + * This writes the reg_07 value again to the hardware at the end of every |
4279 | + * set_rate call because the register loses its value. reg_07 allows setting |
4280 | + * absolute mode on v4 hardware |
4281 | + */ |
4282 | +static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse, |
4283 | + unsigned int rate) |
4284 | +{ |
4285 | + struct elantech_data *etd = psmouse->private; |
4286 | + |
4287 | + etd->original_set_rate(psmouse, rate); |
4288 | + if (elantech_write_reg(psmouse, 0x07, etd->reg_07)) |
4289 | + psmouse_err(psmouse, "restoring reg_07 failed\n"); |
4290 | +} |
4291 | + |
4292 | +/* |
4293 | * Put the touchpad into absolute mode |
4294 | */ |
4295 | static int elantech_set_absolute_mode(struct psmouse *psmouse) |
4296 | @@ -1094,6 +1109,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, |
4297 | * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons |
4298 | * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons |
4299 | * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons |
4300 | + * Asus TP500LN 0x381f17 10, 14, 0e clickpad |
4301 | + * Asus X750JN 0x381f17 10, 14, 0e clickpad |
4302 | * Asus UX31 0x361f00 20, 15, 0e clickpad |
4303 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad |
4304 | * Avatar AVIU-145A2 0x361f00 ? clickpad |
4305 | @@ -1635,6 +1652,11 @@ int elantech_init(struct psmouse *psmouse) |
4306 | goto init_fail; |
4307 | } |
4308 | |
4309 | + if (etd->fw_version == 0x381f17) { |
4310 | + etd->original_set_rate = psmouse->set_rate; |
4311 | + psmouse->set_rate = elantech_set_rate_restore_reg_07; |
4312 | + } |
4313 | + |
4314 | if (elantech_set_input_params(psmouse)) { |
4315 | psmouse_err(psmouse, "failed to query touchpad range.\n"); |
4316 | goto init_fail; |
4317 | diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h |
4318 | index 6f3afec02f03..f965d1569cc3 100644 |
4319 | --- a/drivers/input/mouse/elantech.h |
4320 | +++ b/drivers/input/mouse/elantech.h |
4321 | @@ -142,6 +142,7 @@ struct elantech_data { |
4322 | struct finger_pos mt[ETP_MAX_FINGERS]; |
4323 | unsigned char parity[256]; |
4324 | int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param); |
4325 | + void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate); |
4326 | }; |
4327 | |
4328 | #ifdef CONFIG_MOUSE_PS2_ELANTECH |
4329 | diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c |
4330 | index 713a96237a80..414739295d04 100644 |
4331 | --- a/drivers/md/dm-crypt.c |
4332 | +++ b/drivers/md/dm-crypt.c |
4333 | @@ -925,11 +925,10 @@ static int crypt_convert(struct crypt_config *cc, |
4334 | |
4335 | switch (r) { |
4336 | /* async */ |
4337 | + case -EINPROGRESS: |
4338 | case -EBUSY: |
4339 | wait_for_completion(&ctx->restart); |
4340 | reinit_completion(&ctx->restart); |
4341 | - /* fall through*/ |
4342 | - case -EINPROGRESS: |
4343 | ctx->req = NULL; |
4344 | ctx->cc_sector++; |
4345 | continue; |
4346 | @@ -1346,10 +1345,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, |
4347 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); |
4348 | struct crypt_config *cc = io->cc; |
4349 | |
4350 | - if (error == -EINPROGRESS) { |
4351 | - complete(&ctx->restart); |
4352 | + if (error == -EINPROGRESS) |
4353 | return; |
4354 | - } |
4355 | |
4356 | if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) |
4357 | error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); |
4358 | @@ -1360,12 +1357,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, |
4359 | crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); |
4360 | |
4361 | if (!atomic_dec_and_test(&ctx->cc_pending)) |
4362 | - return; |
4363 | + goto done; |
4364 | |
4365 | if (bio_data_dir(io->base_bio) == READ) |
4366 | kcryptd_crypt_read_done(io); |
4367 | else |
4368 | kcryptd_crypt_write_io_submit(io, 1); |
4369 | +done: |
4370 | + if (!completion_done(&ctx->restart)) |
4371 | + complete(&ctx->restart); |
4372 | } |
4373 | |
4374 | static void kcryptd_crypt(struct work_struct *work) |
4375 | diff --git a/drivers/md/md.c b/drivers/md/md.c |
4376 | index 717daad71fb1..e6178787ce3d 100644 |
4377 | --- a/drivers/md/md.c |
4378 | +++ b/drivers/md/md.c |
4379 | @@ -249,6 +249,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio) |
4380 | const int rw = bio_data_dir(bio); |
4381 | struct mddev *mddev = q->queuedata; |
4382 | unsigned int sectors; |
4383 | + int cpu; |
4384 | |
4385 | if (mddev == NULL || mddev->pers == NULL |
4386 | || !mddev->ready) { |
4387 | @@ -284,7 +285,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio) |
4388 | sectors = bio_sectors(bio); |
4389 | mddev->pers->make_request(mddev, bio); |
4390 | |
4391 | - generic_start_io_acct(rw, sectors, &mddev->gendisk->part0); |
4392 | + cpu = part_stat_lock(); |
4393 | + part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
4394 | + part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); |
4395 | + part_stat_unlock(); |
4396 | |
4397 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) |
4398 | wake_up(&mddev->sb_wait); |
4399 | diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c |
4400 | index 3ed9f42ddca6..3b5d7f704aa3 100644 |
4401 | --- a/drivers/md/raid0.c |
4402 | +++ b/drivers/md/raid0.c |
4403 | @@ -313,7 +313,7 @@ static struct strip_zone *find_zone(struct r0conf *conf, |
4404 | |
4405 | /* |
4406 | * remaps the bio to the target device. we separate two flows. |
4407 | - * power 2 flow and a general flow for the sake of perfromance |
4408 | + * power 2 flow and a general flow for the sake of performance |
4409 | */ |
4410 | static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, |
4411 | sector_t sector, sector_t *sector_offset) |
4412 | @@ -524,6 +524,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) |
4413 | split = bio; |
4414 | } |
4415 | |
4416 | + sector = bio->bi_iter.bi_sector; |
4417 | zone = find_zone(mddev->private, §or); |
4418 | tmp_dev = map_sector(mddev, zone, sector, §or); |
4419 | split->bi_bdev = tmp_dev->bdev; |
4420 | diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c |
4421 | index 77c78de4f5bf..7020659f23c2 100644 |
4422 | --- a/drivers/media/rc/img-ir/img-ir-core.c |
4423 | +++ b/drivers/media/rc/img-ir/img-ir-core.c |
4424 | @@ -146,7 +146,7 @@ static int img_ir_remove(struct platform_device *pdev) |
4425 | { |
4426 | struct img_ir_priv *priv = platform_get_drvdata(pdev); |
4427 | |
4428 | - free_irq(priv->irq, img_ir_isr); |
4429 | + free_irq(priv->irq, priv); |
4430 | img_ir_remove_hw(priv); |
4431 | img_ir_remove_raw(priv); |
4432 | |
4433 | diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c |
4434 | index 65a326c5128f..749ad5603c9e 100644 |
4435 | --- a/drivers/media/usb/stk1160/stk1160-v4l.c |
4436 | +++ b/drivers/media/usb/stk1160/stk1160-v4l.c |
4437 | @@ -240,6 +240,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev) |
4438 | if (mutex_lock_interruptible(&dev->v4l_lock)) |
4439 | return -ERESTARTSYS; |
4440 | |
4441 | + /* |
4442 | + * Once URBs are cancelled, the URB complete handler |
4443 | + * won't be running. This is required to safely release the |
4444 | + * current buffer (dev->isoc_ctl.buf). |
4445 | + */ |
4446 | stk1160_cancel_isoc(dev); |
4447 | |
4448 | /* |
4449 | @@ -620,8 +625,16 @@ void stk1160_clear_queue(struct stk1160 *dev) |
4450 | stk1160_info("buffer [%p/%d] aborted\n", |
4451 | buf, buf->vb.v4l2_buf.index); |
4452 | } |
4453 | - /* It's important to clear current buffer */ |
4454 | - dev->isoc_ctl.buf = NULL; |
4455 | + |
4456 | + /* It's important to release the current buffer */ |
4457 | + if (dev->isoc_ctl.buf) { |
4458 | + buf = dev->isoc_ctl.buf; |
4459 | + dev->isoc_ctl.buf = NULL; |
4460 | + |
4461 | + vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); |
4462 | + stk1160_info("buffer [%p/%d] aborted\n", |
4463 | + buf, buf->vb.v4l2_buf.index); |
4464 | + } |
4465 | spin_unlock_irqrestore(&dev->buf_lock, flags); |
4466 | } |
4467 | |
4468 | diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c |
4469 | index fc145d202c46..922a750640e8 100644 |
4470 | --- a/drivers/memstick/core/mspro_block.c |
4471 | +++ b/drivers/memstick/core/mspro_block.c |
4472 | @@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error) |
4473 | |
4474 | if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) { |
4475 | if (msb->data_dir == READ) { |
4476 | - for (cnt = 0; cnt < msb->current_seg; cnt++) |
4477 | + for (cnt = 0; cnt < msb->current_seg; cnt++) { |
4478 | t_len += msb->req_sg[cnt].length |
4479 | / msb->page_size; |
4480 | |
4481 | @@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error) |
4482 | t_len += msb->current_page - 1; |
4483 | |
4484 | t_len *= msb->page_size; |
4485 | + } |
4486 | } |
4487 | } else |
4488 | t_len = blk_rq_bytes(msb->block_req); |
4489 | diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c |
4490 | index 2a87f69be53d..1aed3b7b8d9b 100644 |
4491 | --- a/drivers/mfd/mfd-core.c |
4492 | +++ b/drivers/mfd/mfd-core.c |
4493 | @@ -128,7 +128,7 @@ static int mfd_add_device(struct device *parent, int id, |
4494 | int platform_id; |
4495 | int r; |
4496 | |
4497 | - if (id < 0) |
4498 | + if (id == PLATFORM_DEVID_AUTO) |
4499 | platform_id = id; |
4500 | else |
4501 | platform_id = id + cell->id; |
4502 | diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c |
4503 | index e8a4218b5726..459ed1b601db 100644 |
4504 | --- a/drivers/mmc/host/sunxi-mmc.c |
4505 | +++ b/drivers/mmc/host/sunxi-mmc.c |
4506 | @@ -930,7 +930,9 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, |
4507 | return PTR_ERR(host->clk_sample); |
4508 | } |
4509 | |
4510 | - host->reset = devm_reset_control_get(&pdev->dev, "ahb"); |
4511 | + host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); |
4512 | + if (PTR_ERR(host->reset) == -EPROBE_DEFER) |
4513 | + return PTR_ERR(host->reset); |
4514 | |
4515 | ret = clk_prepare_enable(host->clk_ahb); |
4516 | if (ret) { |
4517 | diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c |
4518 | index a31c3573d386..dba7e1c19dd7 100644 |
4519 | --- a/drivers/mmc/host/tmio_mmc_pio.c |
4520 | +++ b/drivers/mmc/host/tmio_mmc_pio.c |
4521 | @@ -1073,8 +1073,6 @@ EXPORT_SYMBOL(tmio_mmc_host_alloc); |
4522 | void tmio_mmc_host_free(struct tmio_mmc_host *host) |
4523 | { |
4524 | mmc_free_host(host->mmc); |
4525 | - |
4526 | - host->mmc = NULL; |
4527 | } |
4528 | EXPORT_SYMBOL(tmio_mmc_host_free); |
4529 | |
4530 | diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c |
4531 | index 9d2e16f3150a..b5e154856994 100644 |
4532 | --- a/drivers/mtd/ubi/attach.c |
4533 | +++ b/drivers/mtd/ubi/attach.c |
4534 | @@ -410,7 +410,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, |
4535 | second_is_newer = !second_is_newer; |
4536 | } else { |
4537 | dbg_bld("PEB %d CRC is OK", pnum); |
4538 | - bitflips = !!err; |
4539 | + bitflips |= !!err; |
4540 | } |
4541 | mutex_unlock(&ubi->buf_mutex); |
4542 | |
4543 | diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c |
4544 | index d647e504f9b1..d16fccf79179 100644 |
4545 | --- a/drivers/mtd/ubi/cdev.c |
4546 | +++ b/drivers/mtd/ubi/cdev.c |
4547 | @@ -455,7 +455,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd, |
4548 | /* Validate the request */ |
4549 | err = -EINVAL; |
4550 | if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || |
4551 | - req.bytes < 0 || req.lnum >= vol->usable_leb_size) |
4552 | + req.bytes < 0 || req.bytes > vol->usable_leb_size) |
4553 | break; |
4554 | |
4555 | err = get_exclusive(desc); |
4556 | diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c |
4557 | index 16e34b37d134..8c9a710def99 100644 |
4558 | --- a/drivers/mtd/ubi/eba.c |
4559 | +++ b/drivers/mtd/ubi/eba.c |
4560 | @@ -1419,7 +1419,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) |
4561 | * during re-size. |
4562 | */ |
4563 | ubi_move_aeb_to_list(av, aeb, &ai->erase); |
4564 | - vol->eba_tbl[aeb->lnum] = aeb->pnum; |
4565 | + else |
4566 | + vol->eba_tbl[aeb->lnum] = aeb->pnum; |
4567 | } |
4568 | } |
4569 | |
4570 | diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c |
4571 | index 8f7bde6a85d6..0bd92d816391 100644 |
4572 | --- a/drivers/mtd/ubi/wl.c |
4573 | +++ b/drivers/mtd/ubi/wl.c |
4574 | @@ -1002,7 +1002,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
4575 | int shutdown) |
4576 | { |
4577 | int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; |
4578 | - int vol_id = -1, uninitialized_var(lnum); |
4579 | + int vol_id = -1, lnum = -1; |
4580 | #ifdef CONFIG_MTD_UBI_FASTMAP |
4581 | int anchor = wrk->anchor; |
4582 | #endif |
4583 | diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c |
4584 | index 81d41539fcba..77bf1337d179 100644 |
4585 | --- a/drivers/net/ethernet/cadence/macb.c |
4586 | +++ b/drivers/net/ethernet/cadence/macb.c |
4587 | @@ -2165,7 +2165,7 @@ static void macb_configure_caps(struct macb *bp) |
4588 | } |
4589 | } |
4590 | |
4591 | - if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2) |
4592 | + if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) >= 0x2) |
4593 | bp->caps |= MACB_CAPS_MACB_IS_GEM; |
4594 | |
4595 | if (macb_is_gem(bp)) { |
4596 | diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c |
4597 | index 7f997d36948f..a71c446631d1 100644 |
4598 | --- a/drivers/net/ethernet/intel/e1000/e1000_main.c |
4599 | +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c |
4600 | @@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, |
4601 | static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, |
4602 | struct e1000_rx_ring *rx_ring, |
4603 | int *work_done, int work_to_do); |
4604 | +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, |
4605 | + struct e1000_rx_ring *rx_ring, |
4606 | + int cleaned_count) |
4607 | +{ |
4608 | +} |
4609 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
4610 | struct e1000_rx_ring *rx_ring, |
4611 | int cleaned_count); |
4612 | @@ -3552,8 +3557,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) |
4613 | msleep(1); |
4614 | /* e1000_down has a dependency on max_frame_size */ |
4615 | hw->max_frame_size = max_frame; |
4616 | - if (netif_running(netdev)) |
4617 | + if (netif_running(netdev)) { |
4618 | + /* prevent buffers from being reallocated */ |
4619 | + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; |
4620 | e1000_down(adapter); |
4621 | + } |
4622 | |
4623 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
4624 | * means we reserve 2 more, this pushes us to allocate from the next |
4625 | diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c |
4626 | index af829c578400..7ace07dad6a3 100644 |
4627 | --- a/drivers/net/ethernet/marvell/pxa168_eth.c |
4628 | +++ b/drivers/net/ethernet/marvell/pxa168_eth.c |
4629 | @@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) |
4630 | np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); |
4631 | if (!np) { |
4632 | dev_err(&pdev->dev, "missing phy-handle\n"); |
4633 | - return -EINVAL; |
4634 | + err = -EINVAL; |
4635 | + goto err_netdev; |
4636 | } |
4637 | of_property_read_u32(np, "reg", &pep->phy_addr); |
4638 | pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); |
4639 | @@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) |
4640 | pep->smi_bus = mdiobus_alloc(); |
4641 | if (pep->smi_bus == NULL) { |
4642 | err = -ENOMEM; |
4643 | - goto err_base; |
4644 | + goto err_netdev; |
4645 | } |
4646 | pep->smi_bus->priv = pep; |
4647 | pep->smi_bus->name = "pxa168_eth smi"; |
4648 | @@ -1551,13 +1552,10 @@ err_mdiobus: |
4649 | mdiobus_unregister(pep->smi_bus); |
4650 | err_free_mdio: |
4651 | mdiobus_free(pep->smi_bus); |
4652 | -err_base: |
4653 | - iounmap(pep->base); |
4654 | err_netdev: |
4655 | free_netdev(dev); |
4656 | err_clk: |
4657 | - clk_disable(clk); |
4658 | - clk_put(clk); |
4659 | + clk_disable_unprepare(clk); |
4660 | return err; |
4661 | } |
4662 | |
4663 | @@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev) |
4664 | if (pep->phy) |
4665 | phy_disconnect(pep->phy); |
4666 | if (pep->clk) { |
4667 | - clk_disable(pep->clk); |
4668 | - clk_put(pep->clk); |
4669 | - pep->clk = NULL; |
4670 | + clk_disable_unprepare(pep->clk); |
4671 | } |
4672 | |
4673 | - iounmap(pep->base); |
4674 | - pep->base = NULL; |
4675 | mdiobus_unregister(pep->smi_bus); |
4676 | mdiobus_free(pep->smi_bus); |
4677 | unregister_netdev(dev); |
4678 | diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c |
4679 | index a7b58ba8492b..3dccf01837db 100644 |
4680 | --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c |
4681 | +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c |
4682 | @@ -981,20 +981,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc) |
4683 | struct mlx4_en_priv *priv = netdev_priv(dev); |
4684 | |
4685 | /* check if requested function is supported by the device */ |
4686 | - if ((hfunc == ETH_RSS_HASH_TOP && |
4687 | - !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) || |
4688 | - (hfunc == ETH_RSS_HASH_XOR && |
4689 | - !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))) |
4690 | - return -EINVAL; |
4691 | + if (hfunc == ETH_RSS_HASH_TOP) { |
4692 | + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) |
4693 | + return -EINVAL; |
4694 | + if (!(dev->features & NETIF_F_RXHASH)) |
4695 | + en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); |
4696 | + return 0; |
4697 | + } else if (hfunc == ETH_RSS_HASH_XOR) { |
4698 | + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)) |
4699 | + return -EINVAL; |
4700 | + if (dev->features & NETIF_F_RXHASH) |
4701 | + en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); |
4702 | + return 0; |
4703 | + } |
4704 | |
4705 | - priv->rss_hash_fn = hfunc; |
4706 | - if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH)) |
4707 | - en_warn(priv, |
4708 | - "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); |
4709 | - if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH)) |
4710 | - en_warn(priv, |
4711 | - "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); |
4712 | - return 0; |
4713 | + return -EINVAL; |
4714 | } |
4715 | |
4716 | static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, |
4717 | @@ -1068,6 +1069,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, |
4718 | priv->prof->rss_rings = rss_rings; |
4719 | if (key) |
4720 | memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); |
4721 | + if (hfunc != ETH_RSS_HASH_NO_CHANGE) |
4722 | + priv->rss_hash_fn = hfunc; |
4723 | |
4724 | if (port_up) { |
4725 | err = mlx4_en_start_port(dev); |
4726 | diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c |
4727 | index af034dba9bd6..9d15566521a7 100644 |
4728 | --- a/drivers/net/ppp/ppp_generic.c |
4729 | +++ b/drivers/net/ppp/ppp_generic.c |
4730 | @@ -1716,6 +1716,7 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) |
4731 | { |
4732 | /* note: a 0-length skb is used as an error indication */ |
4733 | if (skb->len > 0) { |
4734 | + skb_checksum_complete_unset(skb); |
4735 | #ifdef CONFIG_PPP_MULTILINK |
4736 | /* XXX do channel-level decompression here */ |
4737 | if (PPP_PROTO(skb) == PPP_MP) |
4738 | diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c |
4739 | index 90a714c189a8..23806c243a53 100644 |
4740 | --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c |
4741 | +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c |
4742 | @@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { |
4743 | {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ |
4744 | {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ |
4745 | {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ |
4746 | + {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ |
4747 | {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ |
4748 | {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ |
4749 | {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ |
4750 | @@ -377,6 +378,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { |
4751 | {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ |
4752 | {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ |
4753 | {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ |
4754 | + {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */ |
4755 | {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/ |
4756 | {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/ |
4757 | {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/ |
4758 | diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c |
4759 | index c93fae95baac..5fbd2230f372 100644 |
4760 | --- a/drivers/net/wireless/ti/wl18xx/debugfs.c |
4761 | +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c |
4762 | @@ -139,7 +139,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u"); |
4763 | WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u"); |
4764 | WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u"); |
4765 | |
4766 | -WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u"); |
4767 | +WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50); |
4768 | |
4769 | WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate, |
4770 | AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE); |
4771 | diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h |
4772 | index 0f2cfb0d2a9e..bf14676e6515 100644 |
4773 | --- a/drivers/net/wireless/ti/wlcore/debugfs.h |
4774 | +++ b/drivers/net/wireless/ti/wlcore/debugfs.h |
4775 | @@ -26,8 +26,8 @@ |
4776 | |
4777 | #include "wlcore.h" |
4778 | |
4779 | -int wl1271_format_buffer(char __user *userbuf, size_t count, |
4780 | - loff_t *ppos, char *fmt, ...); |
4781 | +__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count, |
4782 | + loff_t *ppos, char *fmt, ...); |
4783 | |
4784 | int wl1271_debugfs_init(struct wl1271 *wl); |
4785 | void wl1271_debugfs_exit(struct wl1271 *wl); |
4786 | diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c |
4787 | index eb886932d972..7b53a5c84041 100644 |
4788 | --- a/drivers/nfc/st21nfcb/i2c.c |
4789 | +++ b/drivers/nfc/st21nfcb/i2c.c |
4790 | @@ -109,7 +109,7 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb) |
4791 | return phy->ndlc->hard_fault; |
4792 | |
4793 | r = i2c_master_send(client, skb->data, skb->len); |
4794 | - if (r == -EREMOTEIO) { /* Retry, chip was in standby */ |
4795 | + if (r < 0) { /* Retry, chip was in standby */ |
4796 | usleep_range(1000, 4000); |
4797 | r = i2c_master_send(client, skb->data, skb->len); |
4798 | } |
4799 | @@ -148,7 +148,7 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy, |
4800 | struct i2c_client *client = phy->i2c_dev; |
4801 | |
4802 | r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE); |
4803 | - if (r == -EREMOTEIO) { /* Retry, chip was in standby */ |
4804 | + if (r < 0) { /* Retry, chip was in standby */ |
4805 | usleep_range(1000, 4000); |
4806 | r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE); |
4807 | } |
4808 | diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c |
4809 | index 15c0fab2bfa1..bceb30b539f3 100644 |
4810 | --- a/drivers/platform/x86/compal-laptop.c |
4811 | +++ b/drivers/platform/x86/compal-laptop.c |
4812 | @@ -1026,9 +1026,9 @@ static int compal_probe(struct platform_device *pdev) |
4813 | if (err) |
4814 | return err; |
4815 | |
4816 | - hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, |
4817 | - "compal", data, |
4818 | - compal_hwmon_groups); |
4819 | + hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev, |
4820 | + "compal", data, |
4821 | + compal_hwmon_groups); |
4822 | if (IS_ERR(hwmon_dev)) { |
4823 | err = PTR_ERR(hwmon_dev); |
4824 | goto remove; |
4825 | @@ -1036,7 +1036,9 @@ static int compal_probe(struct platform_device *pdev) |
4826 | |
4827 | /* Power supply */ |
4828 | initialize_power_supply_data(data); |
4829 | - power_supply_register(&compal_device->dev, &data->psy); |
4830 | + err = power_supply_register(&compal_device->dev, &data->psy); |
4831 | + if (err < 0) |
4832 | + goto remove; |
4833 | |
4834 | platform_set_drvdata(pdev, data); |
4835 | |
4836 | diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/ipaq_micro_battery.c |
4837 | index 9d694605cdb7..96b15e003f3f 100644 |
4838 | --- a/drivers/power/ipaq_micro_battery.c |
4839 | +++ b/drivers/power/ipaq_micro_battery.c |
4840 | @@ -226,6 +226,7 @@ static struct power_supply micro_ac_power = { |
4841 | static int micro_batt_probe(struct platform_device *pdev) |
4842 | { |
4843 | struct micro_battery *mb; |
4844 | + int ret; |
4845 | |
4846 | mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL); |
4847 | if (!mb) |
4848 | @@ -233,14 +234,30 @@ static int micro_batt_probe(struct platform_device *pdev) |
4849 | |
4850 | mb->micro = dev_get_drvdata(pdev->dev.parent); |
4851 | mb->wq = create_singlethread_workqueue("ipaq-battery-wq"); |
4852 | + if (!mb->wq) |
4853 | + return -ENOMEM; |
4854 | + |
4855 | INIT_DELAYED_WORK(&mb->update, micro_battery_work); |
4856 | platform_set_drvdata(pdev, mb); |
4857 | queue_delayed_work(mb->wq, &mb->update, 1); |
4858 | - power_supply_register(&pdev->dev, µ_batt_power); |
4859 | - power_supply_register(&pdev->dev, µ_ac_power); |
4860 | + |
4861 | + ret = power_supply_register(&pdev->dev, µ_batt_power); |
4862 | + if (ret < 0) |
4863 | + goto batt_err; |
4864 | + |
4865 | + ret = power_supply_register(&pdev->dev, µ_ac_power); |
4866 | + if (ret < 0) |
4867 | + goto ac_err; |
4868 | |
4869 | dev_info(&pdev->dev, "iPAQ micro battery driver\n"); |
4870 | return 0; |
4871 | + |
4872 | +ac_err: |
4873 | + power_supply_unregister(µ_ac_power); |
4874 | +batt_err: |
4875 | + cancel_delayed_work_sync(&mb->update); |
4876 | + destroy_workqueue(mb->wq); |
4877 | + return ret; |
4878 | } |
4879 | |
4880 | static int micro_batt_remove(struct platform_device *pdev) |
4881 | @@ -251,6 +268,7 @@ static int micro_batt_remove(struct platform_device *pdev) |
4882 | power_supply_unregister(µ_ac_power); |
4883 | power_supply_unregister(µ_batt_power); |
4884 | cancel_delayed_work_sync(&mb->update); |
4885 | + destroy_workqueue(mb->wq); |
4886 | |
4887 | return 0; |
4888 | } |
4889 | diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c |
4890 | index 21fc233c7d61..176dab2e4c16 100644 |
4891 | --- a/drivers/power/lp8788-charger.c |
4892 | +++ b/drivers/power/lp8788-charger.c |
4893 | @@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev, |
4894 | pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop); |
4895 | pchg->battery.get_property = lp8788_battery_get_property; |
4896 | |
4897 | - if (power_supply_register(&pdev->dev, &pchg->battery)) |
4898 | + if (power_supply_register(&pdev->dev, &pchg->battery)) { |
4899 | + power_supply_unregister(&pchg->charger); |
4900 | return -EPERM; |
4901 | + } |
4902 | |
4903 | return 0; |
4904 | } |
4905 | diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c |
4906 | index 7ef445a6cfa6..cf907609ec49 100644 |
4907 | --- a/drivers/power/twl4030_madc_battery.c |
4908 | +++ b/drivers/power/twl4030_madc_battery.c |
4909 | @@ -192,6 +192,7 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev) |
4910 | { |
4911 | struct twl4030_madc_battery *twl4030_madc_bat; |
4912 | struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data; |
4913 | + int ret = 0; |
4914 | |
4915 | twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL); |
4916 | if (!twl4030_madc_bat) |
4917 | @@ -216,9 +217,11 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev) |
4918 | |
4919 | twl4030_madc_bat->pdata = pdata; |
4920 | platform_set_drvdata(pdev, twl4030_madc_bat); |
4921 | - power_supply_register(&pdev->dev, &twl4030_madc_bat->psy); |
4922 | + ret = power_supply_register(&pdev->dev, &twl4030_madc_bat->psy); |
4923 | + if (ret < 0) |
4924 | + kfree(twl4030_madc_bat); |
4925 | |
4926 | - return 0; |
4927 | + return ret; |
4928 | } |
4929 | |
4930 | static int twl4030_madc_battery_remove(struct platform_device *pdev) |
4931 | diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c |
4932 | index 675b5e7aba94..5a0800d19970 100644 |
4933 | --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c |
4934 | +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c |
4935 | @@ -1584,11 +1584,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, |
4936 | fp_possible = io_info.fpOkForIo; |
4937 | } |
4938 | |
4939 | - /* Use smp_processor_id() for now until cmd->request->cpu is CPU |
4940 | + /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU |
4941 | id by default, not CPU group id, otherwise all MSI-X queues won't |
4942 | be utilized */ |
4943 | cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ? |
4944 | - smp_processor_id() % instance->msix_vectors : 0; |
4945 | + raw_smp_processor_id() % instance->msix_vectors : 0; |
4946 | |
4947 | if (fp_possible) { |
4948 | megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, |
4949 | @@ -1693,7 +1693,10 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, |
4950 | << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; |
4951 | cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; |
4952 | cmd->request_desc->SCSIIO.MSIxIndex = |
4953 | - instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0; |
4954 | + instance->msix_vectors ? |
4955 | + raw_smp_processor_id() % |
4956 | + instance->msix_vectors : |
4957 | + 0; |
4958 | os_timeout_value = scmd->request->timeout / HZ; |
4959 | |
4960 | if (instance->secure_jbod_support && |
4961 | diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c |
4962 | index 2d5ab6d969ec..454536c49315 100644 |
4963 | --- a/drivers/scsi/mvsas/mv_sas.c |
4964 | +++ b/drivers/scsi/mvsas/mv_sas.c |
4965 | @@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) |
4966 | static int mvs_task_prep_ata(struct mvs_info *mvi, |
4967 | struct mvs_task_exec_info *tei) |
4968 | { |
4969 | - struct sas_ha_struct *sha = mvi->sas; |
4970 | struct sas_task *task = tei->task; |
4971 | struct domain_device *dev = task->dev; |
4972 | struct mvs_device *mvi_dev = dev->lldd_dev; |
4973 | struct mvs_cmd_hdr *hdr = tei->hdr; |
4974 | struct asd_sas_port *sas_port = dev->port; |
4975 | - struct sas_phy *sphy = dev->phy; |
4976 | - struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number]; |
4977 | struct mvs_slot_info *slot; |
4978 | void *buf_prd; |
4979 | u32 tag = tei->tag, hdr_tag; |
4980 | @@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, |
4981 | slot->tx = mvi->tx_prod; |
4982 | del_q = TXQ_MODE_I | tag | |
4983 | (TXQ_CMD_STP << TXQ_CMD_SHIFT) | |
4984 | - (MVS_PHY_ID << TXQ_PHY_SHIFT) | |
4985 | + ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) | |
4986 | (mvi_dev->taskfileset << TXQ_SRS_SHIFT); |
4987 | mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); |
4988 | |
4989 | diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c |
4990 | index 6b78476d04bb..3290a3ed5b31 100644 |
4991 | --- a/drivers/scsi/sd.c |
4992 | +++ b/drivers/scsi/sd.c |
4993 | @@ -3100,6 +3100,7 @@ static void scsi_disk_release(struct device *dev) |
4994 | ida_remove(&sd_index_ida, sdkp->index); |
4995 | spin_unlock(&sd_index_lock); |
4996 | |
4997 | + blk_integrity_unregister(disk); |
4998 | disk->private_data = NULL; |
4999 | put_disk(disk); |
5000 | put_device(&sdkp->device->sdev_gendev); |
5001 | diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c |
5002 | index 14c7d42a11c2..5c06d292b94c 100644 |
5003 | --- a/drivers/scsi/sd_dif.c |
5004 | +++ b/drivers/scsi/sd_dif.c |
5005 | @@ -77,7 +77,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp) |
5006 | |
5007 | disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; |
5008 | |
5009 | - if (!sdkp) |
5010 | + if (!sdkp->ATO) |
5011 | return; |
5012 | |
5013 | if (type == SD_DIF_TYPE3_PROTECTION) |
5014 | diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c |
5015 | index efc6e446b6c8..bf8c5c1e254e 100644 |
5016 | --- a/drivers/scsi/storvsc_drv.c |
5017 | +++ b/drivers/scsi/storvsc_drv.c |
5018 | @@ -746,21 +746,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, |
5019 | if (bounce_sgl[j].length == PAGE_SIZE) { |
5020 | /* full..move to next entry */ |
5021 | sg_kunmap_atomic(bounce_addr); |
5022 | + bounce_addr = 0; |
5023 | j++; |
5024 | + } |
5025 | |
5026 | - /* if we need to use another bounce buffer */ |
5027 | - if (srclen || i != orig_sgl_count - 1) |
5028 | - bounce_addr = sg_kmap_atomic(bounce_sgl,j); |
5029 | + /* if we need to use another bounce buffer */ |
5030 | + if (srclen && bounce_addr == 0) |
5031 | + bounce_addr = sg_kmap_atomic(bounce_sgl, j); |
5032 | |
5033 | - } else if (srclen == 0 && i == orig_sgl_count - 1) { |
5034 | - /* unmap the last bounce that is < PAGE_SIZE */ |
5035 | - sg_kunmap_atomic(bounce_addr); |
5036 | - } |
5037 | } |
5038 | |
5039 | sg_kunmap_atomic(src_addr - orig_sgl[i].offset); |
5040 | } |
5041 | |
5042 | + if (bounce_addr) |
5043 | + sg_kunmap_atomic(bounce_addr); |
5044 | + |
5045 | local_irq_restore(flags); |
5046 | |
5047 | return total_copied; |
5048 | diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c |
5049 | index 6fea4af51c41..aea3a67e5ce1 100644 |
5050 | --- a/drivers/spi/spi-imx.c |
5051 | +++ b/drivers/spi/spi-imx.c |
5052 | @@ -370,8 +370,6 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, |
5053 | if (spi_imx->dma_is_inited) { |
5054 | dma = readl(spi_imx->base + MX51_ECSPI_DMA); |
5055 | |
5056 | - spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2; |
5057 | - spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2; |
5058 | spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2; |
5059 | rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET; |
5060 | tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET; |
5061 | @@ -868,6 +866,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, |
5062 | master->max_dma_len = MAX_SDMA_BD_BYTES; |
5063 | spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | |
5064 | SPI_MASTER_MUST_TX; |
5065 | + spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2; |
5066 | + spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2; |
5067 | spi_imx->dma_is_inited = 1; |
5068 | |
5069 | return 0; |
5070 | diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c |
5071 | index 4eb7a980e670..7bf51860fd08 100644 |
5072 | --- a/drivers/spi/spidev.c |
5073 | +++ b/drivers/spi/spidev.c |
5074 | @@ -245,7 +245,10 @@ static int spidev_message(struct spidev_data *spidev, |
5075 | k_tmp->len = u_tmp->len; |
5076 | |
5077 | total += k_tmp->len; |
5078 | - if (total > bufsiz) { |
5079 | + /* Check total length of transfers. Also check each |
5080 | + * transfer length to avoid arithmetic overflow. |
5081 | + */ |
5082 | + if (total > bufsiz || k_tmp->len > bufsiz) { |
5083 | status = -EMSGSIZE; |
5084 | goto done; |
5085 | } |
5086 | diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c |
5087 | index 7bdb62bf6b40..f83e00c78051 100644 |
5088 | --- a/drivers/staging/android/sync.c |
5089 | +++ b/drivers/staging/android/sync.c |
5090 | @@ -114,7 +114,7 @@ void sync_timeline_signal(struct sync_timeline *obj) |
5091 | list_for_each_entry_safe(pt, next, &obj->active_list_head, |
5092 | active_list) { |
5093 | if (fence_is_signaled_locked(&pt->base)) |
5094 | - list_del(&pt->active_list); |
5095 | + list_del_init(&pt->active_list); |
5096 | } |
5097 | |
5098 | spin_unlock_irqrestore(&obj->child_list_lock, flags); |
5099 | diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c |
5100 | index 6ed35b6ecf0d..04fc217481c7 100644 |
5101 | --- a/drivers/staging/panel/panel.c |
5102 | +++ b/drivers/staging/panel/panel.c |
5103 | @@ -335,11 +335,11 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES]; |
5104 | * LCD types |
5105 | */ |
5106 | #define LCD_TYPE_NONE 0 |
5107 | -#define LCD_TYPE_OLD 1 |
5108 | -#define LCD_TYPE_KS0074 2 |
5109 | -#define LCD_TYPE_HANTRONIX 3 |
5110 | -#define LCD_TYPE_NEXCOM 4 |
5111 | -#define LCD_TYPE_CUSTOM 5 |
5112 | +#define LCD_TYPE_CUSTOM 1 |
5113 | +#define LCD_TYPE_OLD 2 |
5114 | +#define LCD_TYPE_KS0074 3 |
5115 | +#define LCD_TYPE_HANTRONIX 4 |
5116 | +#define LCD_TYPE_NEXCOM 5 |
5117 | |
5118 | /* |
5119 | * keypad types |
5120 | @@ -502,7 +502,7 @@ MODULE_PARM_DESC(keypad_type, |
5121 | static int lcd_type = NOT_SET; |
5122 | module_param(lcd_type, int, 0000); |
5123 | MODULE_PARM_DESC(lcd_type, |
5124 | - "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in"); |
5125 | + "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom"); |
5126 | |
5127 | static int lcd_height = NOT_SET; |
5128 | module_param(lcd_height, int, 0000); |
5129 | diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c |
5130 | index 07ce3fd88e70..fdf5c56251e5 100644 |
5131 | --- a/drivers/staging/vt6655/rxtx.c |
5132 | +++ b/drivers/staging/vt6655/rxtx.c |
5133 | @@ -1308,10 +1308,18 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx, |
5134 | priv->hw->conf.chandef.chan->hw_value); |
5135 | } |
5136 | |
5137 | - if (current_rate > RATE_11M) |
5138 | - pkt_type = (u8)priv->byPacketType; |
5139 | - else |
5140 | + if (current_rate > RATE_11M) { |
5141 | + if (info->band == IEEE80211_BAND_5GHZ) { |
5142 | + pkt_type = PK_TYPE_11A; |
5143 | + } else { |
5144 | + if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) |
5145 | + pkt_type = PK_TYPE_11GB; |
5146 | + else |
5147 | + pkt_type = PK_TYPE_11GA; |
5148 | + } |
5149 | + } else { |
5150 | pkt_type = PK_TYPE_11B; |
5151 | + } |
5152 | |
5153 | /*Set fifo controls */ |
5154 | if (pkt_type == PK_TYPE_11A) |
5155 | diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c |
5156 | index 77d64251af40..5e3561243eda 100644 |
5157 | --- a/drivers/target/iscsi/iscsi_target.c |
5158 | +++ b/drivers/target/iscsi/iscsi_target.c |
5159 | @@ -537,7 +537,7 @@ static struct iscsit_transport iscsi_target_transport = { |
5160 | |
5161 | static int __init iscsi_target_init_module(void) |
5162 | { |
5163 | - int ret = 0; |
5164 | + int ret = 0, size; |
5165 | |
5166 | pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); |
5167 | |
5168 | @@ -546,6 +546,7 @@ static int __init iscsi_target_init_module(void) |
5169 | pr_err("Unable to allocate memory for iscsit_global\n"); |
5170 | return -1; |
5171 | } |
5172 | + spin_lock_init(&iscsit_global->ts_bitmap_lock); |
5173 | mutex_init(&auth_id_lock); |
5174 | spin_lock_init(&sess_idr_lock); |
5175 | idr_init(&tiqn_idr); |
5176 | @@ -555,15 +556,11 @@ static int __init iscsi_target_init_module(void) |
5177 | if (ret < 0) |
5178 | goto out; |
5179 | |
5180 | - ret = iscsi_thread_set_init(); |
5181 | - if (ret < 0) |
5182 | + size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long); |
5183 | + iscsit_global->ts_bitmap = vzalloc(size); |
5184 | + if (!iscsit_global->ts_bitmap) { |
5185 | + pr_err("Unable to allocate iscsit_global->ts_bitmap\n"); |
5186 | goto configfs_out; |
5187 | - |
5188 | - if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) != |
5189 | - TARGET_THREAD_SET_COUNT) { |
5190 | - pr_err("iscsi_allocate_thread_sets() returned" |
5191 | - " unexpected value!\n"); |
5192 | - goto ts_out1; |
5193 | } |
5194 | |
5195 | lio_qr_cache = kmem_cache_create("lio_qr_cache", |
5196 | @@ -572,7 +569,7 @@ static int __init iscsi_target_init_module(void) |
5197 | if (!lio_qr_cache) { |
5198 | pr_err("nable to kmem_cache_create() for" |
5199 | " lio_qr_cache\n"); |
5200 | - goto ts_out2; |
5201 | + goto bitmap_out; |
5202 | } |
5203 | |
5204 | lio_dr_cache = kmem_cache_create("lio_dr_cache", |
5205 | @@ -617,10 +614,8 @@ dr_out: |
5206 | kmem_cache_destroy(lio_dr_cache); |
5207 | qr_out: |
5208 | kmem_cache_destroy(lio_qr_cache); |
5209 | -ts_out2: |
5210 | - iscsi_deallocate_thread_sets(); |
5211 | -ts_out1: |
5212 | - iscsi_thread_set_free(); |
5213 | +bitmap_out: |
5214 | + vfree(iscsit_global->ts_bitmap); |
5215 | configfs_out: |
5216 | iscsi_target_deregister_configfs(); |
5217 | out: |
5218 | @@ -630,8 +625,6 @@ out: |
5219 | |
5220 | static void __exit iscsi_target_cleanup_module(void) |
5221 | { |
5222 | - iscsi_deallocate_thread_sets(); |
5223 | - iscsi_thread_set_free(); |
5224 | iscsit_release_discovery_tpg(); |
5225 | iscsit_unregister_transport(&iscsi_target_transport); |
5226 | kmem_cache_destroy(lio_qr_cache); |
5227 | @@ -641,6 +634,7 @@ static void __exit iscsi_target_cleanup_module(void) |
5228 | |
5229 | iscsi_target_deregister_configfs(); |
5230 | |
5231 | + vfree(iscsit_global->ts_bitmap); |
5232 | kfree(iscsit_global); |
5233 | } |
5234 | |
5235 | @@ -3715,17 +3709,16 @@ static int iscsit_send_reject( |
5236 | |
5237 | void iscsit_thread_get_cpumask(struct iscsi_conn *conn) |
5238 | { |
5239 | - struct iscsi_thread_set *ts = conn->thread_set; |
5240 | int ord, cpu; |
5241 | /* |
5242 | - * thread_id is assigned from iscsit_global->ts_bitmap from |
5243 | - * within iscsi_thread_set.c:iscsi_allocate_thread_sets() |
5244 | + * bitmap_id is assigned from iscsit_global->ts_bitmap from |
5245 | + * within iscsit_start_kthreads() |
5246 | * |
5247 | - * Here we use thread_id to determine which CPU that this |
5248 | - * iSCSI connection's iscsi_thread_set will be scheduled to |
5249 | + * Here we use bitmap_id to determine which CPU that this |
5250 | + * iSCSI connection's RX/TX threads will be scheduled to |
5251 | * execute upon. |
5252 | */ |
5253 | - ord = ts->thread_id % cpumask_weight(cpu_online_mask); |
5254 | + ord = conn->bitmap_id % cpumask_weight(cpu_online_mask); |
5255 | for_each_online_cpu(cpu) { |
5256 | if (ord-- == 0) { |
5257 | cpumask_set_cpu(cpu, conn->conn_cpumask); |
5258 | @@ -3914,7 +3907,7 @@ check_rsp_state: |
5259 | switch (state) { |
5260 | case ISTATE_SEND_LOGOUTRSP: |
5261 | if (!iscsit_logout_post_handler(cmd, conn)) |
5262 | - goto restart; |
5263 | + return -ECONNRESET; |
5264 | /* fall through */ |
5265 | case ISTATE_SEND_STATUS: |
5266 | case ISTATE_SEND_ASYNCMSG: |
5267 | @@ -3942,8 +3935,6 @@ check_rsp_state: |
5268 | |
5269 | err: |
5270 | return -1; |
5271 | -restart: |
5272 | - return -EAGAIN; |
5273 | } |
5274 | |
5275 | static int iscsit_handle_response_queue(struct iscsi_conn *conn) |
5276 | @@ -3970,21 +3961,13 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn) |
5277 | int iscsi_target_tx_thread(void *arg) |
5278 | { |
5279 | int ret = 0; |
5280 | - struct iscsi_conn *conn; |
5281 | - struct iscsi_thread_set *ts = arg; |
5282 | + struct iscsi_conn *conn = arg; |
5283 | /* |
5284 | * Allow ourselves to be interrupted by SIGINT so that a |
5285 | * connection recovery / failure event can be triggered externally. |
5286 | */ |
5287 | allow_signal(SIGINT); |
5288 | |
5289 | -restart: |
5290 | - conn = iscsi_tx_thread_pre_handler(ts); |
5291 | - if (!conn) |
5292 | - goto out; |
5293 | - |
5294 | - ret = 0; |
5295 | - |
5296 | while (!kthread_should_stop()) { |
5297 | /* |
5298 | * Ensure that both TX and RX per connection kthreads |
5299 | @@ -3993,11 +3976,9 @@ restart: |
5300 | iscsit_thread_check_cpumask(conn, current, 1); |
5301 | |
5302 | wait_event_interruptible(conn->queues_wq, |
5303 | - !iscsit_conn_all_queues_empty(conn) || |
5304 | - ts->status == ISCSI_THREAD_SET_RESET); |
5305 | + !iscsit_conn_all_queues_empty(conn)); |
5306 | |
5307 | - if ((ts->status == ISCSI_THREAD_SET_RESET) || |
5308 | - signal_pending(current)) |
5309 | + if (signal_pending(current)) |
5310 | goto transport_err; |
5311 | |
5312 | get_immediate: |
5313 | @@ -4008,15 +3989,14 @@ get_immediate: |
5314 | ret = iscsit_handle_response_queue(conn); |
5315 | if (ret == 1) |
5316 | goto get_immediate; |
5317 | - else if (ret == -EAGAIN) |
5318 | - goto restart; |
5319 | + else if (ret == -ECONNRESET) |
5320 | + goto out; |
5321 | else if (ret < 0) |
5322 | goto transport_err; |
5323 | } |
5324 | |
5325 | transport_err: |
5326 | iscsit_take_action_for_connection_exit(conn); |
5327 | - goto restart; |
5328 | out: |
5329 | return 0; |
5330 | } |
5331 | @@ -4111,8 +4091,7 @@ int iscsi_target_rx_thread(void *arg) |
5332 | int ret; |
5333 | u8 buffer[ISCSI_HDR_LEN], opcode; |
5334 | u32 checksum = 0, digest = 0; |
5335 | - struct iscsi_conn *conn = NULL; |
5336 | - struct iscsi_thread_set *ts = arg; |
5337 | + struct iscsi_conn *conn = arg; |
5338 | struct kvec iov; |
5339 | /* |
5340 | * Allow ourselves to be interrupted by SIGINT so that a |
5341 | @@ -4120,11 +4099,6 @@ int iscsi_target_rx_thread(void *arg) |
5342 | */ |
5343 | allow_signal(SIGINT); |
5344 | |
5345 | -restart: |
5346 | - conn = iscsi_rx_thread_pre_handler(ts); |
5347 | - if (!conn) |
5348 | - goto out; |
5349 | - |
5350 | if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { |
5351 | struct completion comp; |
5352 | int rc; |
5353 | @@ -4134,7 +4108,7 @@ restart: |
5354 | if (rc < 0) |
5355 | goto transport_err; |
5356 | |
5357 | - goto out; |
5358 | + goto transport_err; |
5359 | } |
5360 | |
5361 | while (!kthread_should_stop()) { |
5362 | @@ -4210,8 +4184,6 @@ transport_err: |
5363 | if (!signal_pending(current)) |
5364 | atomic_set(&conn->transport_failed, 1); |
5365 | iscsit_take_action_for_connection_exit(conn); |
5366 | - goto restart; |
5367 | -out: |
5368 | return 0; |
5369 | } |
5370 | |
5371 | @@ -4273,7 +4245,24 @@ int iscsit_close_connection( |
5372 | if (conn->conn_transport->transport_type == ISCSI_TCP) |
5373 | complete(&conn->conn_logout_comp); |
5374 | |
5375 | - iscsi_release_thread_set(conn); |
5376 | + if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) { |
5377 | + if (conn->tx_thread && |
5378 | + cmpxchg(&conn->tx_thread_active, true, false)) { |
5379 | + send_sig(SIGINT, conn->tx_thread, 1); |
5380 | + kthread_stop(conn->tx_thread); |
5381 | + } |
5382 | + } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) { |
5383 | + if (conn->rx_thread && |
5384 | + cmpxchg(&conn->rx_thread_active, true, false)) { |
5385 | + send_sig(SIGINT, conn->rx_thread, 1); |
5386 | + kthread_stop(conn->rx_thread); |
5387 | + } |
5388 | + } |
5389 | + |
5390 | + spin_lock(&iscsit_global->ts_bitmap_lock); |
5391 | + bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, |
5392 | + get_order(1)); |
5393 | + spin_unlock(&iscsit_global->ts_bitmap_lock); |
5394 | |
5395 | iscsit_stop_timers_for_cmds(conn); |
5396 | iscsit_stop_nopin_response_timer(conn); |
5397 | @@ -4551,15 +4540,13 @@ static void iscsit_logout_post_handler_closesession( |
5398 | struct iscsi_conn *conn) |
5399 | { |
5400 | struct iscsi_session *sess = conn->sess; |
5401 | - |
5402 | - iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); |
5403 | - iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); |
5404 | + int sleep = cmpxchg(&conn->tx_thread_active, true, false); |
5405 | |
5406 | atomic_set(&conn->conn_logout_remove, 0); |
5407 | complete(&conn->conn_logout_comp); |
5408 | |
5409 | iscsit_dec_conn_usage_count(conn); |
5410 | - iscsit_stop_session(sess, 1, 1); |
5411 | + iscsit_stop_session(sess, sleep, sleep); |
5412 | iscsit_dec_session_usage_count(sess); |
5413 | target_put_session(sess->se_sess); |
5414 | } |
5415 | @@ -4567,13 +4554,12 @@ static void iscsit_logout_post_handler_closesession( |
5416 | static void iscsit_logout_post_handler_samecid( |
5417 | struct iscsi_conn *conn) |
5418 | { |
5419 | - iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); |
5420 | - iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); |
5421 | + int sleep = cmpxchg(&conn->tx_thread_active, true, false); |
5422 | |
5423 | atomic_set(&conn->conn_logout_remove, 0); |
5424 | complete(&conn->conn_logout_comp); |
5425 | |
5426 | - iscsit_cause_connection_reinstatement(conn, 1); |
5427 | + iscsit_cause_connection_reinstatement(conn, sleep); |
5428 | iscsit_dec_conn_usage_count(conn); |
5429 | } |
5430 | |
5431 | diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c |
5432 | index bdd8731a4daa..e008ed261364 100644 |
5433 | --- a/drivers/target/iscsi/iscsi_target_erl0.c |
5434 | +++ b/drivers/target/iscsi/iscsi_target_erl0.c |
5435 | @@ -860,7 +860,10 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn) |
5436 | } |
5437 | spin_unlock_bh(&conn->state_lock); |
5438 | |
5439 | - iscsi_thread_set_force_reinstatement(conn); |
5440 | + if (conn->tx_thread && conn->tx_thread_active) |
5441 | + send_sig(SIGINT, conn->tx_thread, 1); |
5442 | + if (conn->rx_thread && conn->rx_thread_active) |
5443 | + send_sig(SIGINT, conn->rx_thread, 1); |
5444 | |
5445 | sleep: |
5446 | wait_for_completion(&conn->conn_wait_rcfr_comp); |
5447 | @@ -885,10 +888,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep) |
5448 | return; |
5449 | } |
5450 | |
5451 | - if (iscsi_thread_set_force_reinstatement(conn) < 0) { |
5452 | - spin_unlock_bh(&conn->state_lock); |
5453 | - return; |
5454 | - } |
5455 | + if (conn->tx_thread && conn->tx_thread_active) |
5456 | + send_sig(SIGINT, conn->tx_thread, 1); |
5457 | + if (conn->rx_thread && conn->rx_thread_active) |
5458 | + send_sig(SIGINT, conn->rx_thread, 1); |
5459 | |
5460 | atomic_set(&conn->connection_reinstatement, 1); |
5461 | if (!sleep) { |
5462 | diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c |
5463 | index 153fb66ac1b8..345f073ff6dc 100644 |
5464 | --- a/drivers/target/iscsi/iscsi_target_login.c |
5465 | +++ b/drivers/target/iscsi/iscsi_target_login.c |
5466 | @@ -699,6 +699,51 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn) |
5467 | iscsit_start_nopin_timer(conn); |
5468 | } |
5469 | |
5470 | +int iscsit_start_kthreads(struct iscsi_conn *conn) |
5471 | +{ |
5472 | + int ret = 0; |
5473 | + |
5474 | + spin_lock(&iscsit_global->ts_bitmap_lock); |
5475 | + conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap, |
5476 | + ISCSIT_BITMAP_BITS, get_order(1)); |
5477 | + spin_unlock(&iscsit_global->ts_bitmap_lock); |
5478 | + |
5479 | + if (conn->bitmap_id < 0) { |
5480 | + pr_err("bitmap_find_free_region() failed for" |
5481 | + " iscsit_start_kthreads()\n"); |
5482 | + return -ENOMEM; |
5483 | + } |
5484 | + |
5485 | + conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn, |
5486 | + "%s", ISCSI_TX_THREAD_NAME); |
5487 | + if (IS_ERR(conn->tx_thread)) { |
5488 | + pr_err("Unable to start iscsi_target_tx_thread\n"); |
5489 | + ret = PTR_ERR(conn->tx_thread); |
5490 | + goto out_bitmap; |
5491 | + } |
5492 | + conn->tx_thread_active = true; |
5493 | + |
5494 | + conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn, |
5495 | + "%s", ISCSI_RX_THREAD_NAME); |
5496 | + if (IS_ERR(conn->rx_thread)) { |
5497 | + pr_err("Unable to start iscsi_target_rx_thread\n"); |
5498 | + ret = PTR_ERR(conn->rx_thread); |
5499 | + goto out_tx; |
5500 | + } |
5501 | + conn->rx_thread_active = true; |
5502 | + |
5503 | + return 0; |
5504 | +out_tx: |
5505 | + kthread_stop(conn->tx_thread); |
5506 | + conn->tx_thread_active = false; |
5507 | +out_bitmap: |
5508 | + spin_lock(&iscsit_global->ts_bitmap_lock); |
5509 | + bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, |
5510 | + get_order(1)); |
5511 | + spin_unlock(&iscsit_global->ts_bitmap_lock); |
5512 | + return ret; |
5513 | +} |
5514 | + |
5515 | int iscsi_post_login_handler( |
5516 | struct iscsi_np *np, |
5517 | struct iscsi_conn *conn, |
5518 | @@ -709,7 +754,7 @@ int iscsi_post_login_handler( |
5519 | struct se_session *se_sess = sess->se_sess; |
5520 | struct iscsi_portal_group *tpg = sess->tpg; |
5521 | struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; |
5522 | - struct iscsi_thread_set *ts; |
5523 | + int rc; |
5524 | |
5525 | iscsit_inc_conn_usage_count(conn); |
5526 | |
5527 | @@ -724,7 +769,6 @@ int iscsi_post_login_handler( |
5528 | /* |
5529 | * SCSI Initiator -> SCSI Target Port Mapping |
5530 | */ |
5531 | - ts = iscsi_get_thread_set(); |
5532 | if (!zero_tsih) { |
5533 | iscsi_set_session_parameters(sess->sess_ops, |
5534 | conn->param_list, 0); |
5535 | @@ -751,9 +795,11 @@ int iscsi_post_login_handler( |
5536 | sess->sess_ops->InitiatorName); |
5537 | spin_unlock_bh(&sess->conn_lock); |
5538 | |
5539 | - iscsi_post_login_start_timers(conn); |
5540 | + rc = iscsit_start_kthreads(conn); |
5541 | + if (rc) |
5542 | + return rc; |
5543 | |
5544 | - iscsi_activate_thread_set(conn, ts); |
5545 | + iscsi_post_login_start_timers(conn); |
5546 | /* |
5547 | * Determine CPU mask to ensure connection's RX and TX kthreads |
5548 | * are scheduled on the same CPU. |
5549 | @@ -810,8 +856,11 @@ int iscsi_post_login_handler( |
5550 | " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); |
5551 | spin_unlock_bh(&se_tpg->session_lock); |
5552 | |
5553 | + rc = iscsit_start_kthreads(conn); |
5554 | + if (rc) |
5555 | + return rc; |
5556 | + |
5557 | iscsi_post_login_start_timers(conn); |
5558 | - iscsi_activate_thread_set(conn, ts); |
5559 | /* |
5560 | * Determine CPU mask to ensure connection's RX and TX kthreads |
5561 | * are scheduled on the same CPU. |
5562 | diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c |
5563 | index 44620fb6bd45..cbb0cc277f4e 100644 |
5564 | --- a/drivers/target/target_core_file.c |
5565 | +++ b/drivers/target/target_core_file.c |
5566 | @@ -264,40 +264,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, |
5567 | struct se_device *se_dev = cmd->se_dev; |
5568 | struct fd_dev *dev = FD_DEV(se_dev); |
5569 | struct file *prot_fd = dev->fd_prot_file; |
5570 | - struct scatterlist *sg; |
5571 | loff_t pos = (cmd->t_task_lba * se_dev->prot_length); |
5572 | unsigned char *buf; |
5573 | - u32 prot_size, len, size; |
5574 | - int rc, ret = 1, i; |
5575 | + u32 prot_size; |
5576 | + int rc, ret = 1; |
5577 | |
5578 | prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * |
5579 | se_dev->prot_length; |
5580 | |
5581 | if (!is_write) { |
5582 | - fd_prot->prot_buf = vzalloc(prot_size); |
5583 | + fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL); |
5584 | if (!fd_prot->prot_buf) { |
5585 | pr_err("Unable to allocate fd_prot->prot_buf\n"); |
5586 | return -ENOMEM; |
5587 | } |
5588 | buf = fd_prot->prot_buf; |
5589 | |
5590 | - fd_prot->prot_sg_nents = cmd->t_prot_nents; |
5591 | - fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) * |
5592 | - fd_prot->prot_sg_nents, GFP_KERNEL); |
5593 | + fd_prot->prot_sg_nents = 1; |
5594 | + fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist), |
5595 | + GFP_KERNEL); |
5596 | if (!fd_prot->prot_sg) { |
5597 | pr_err("Unable to allocate fd_prot->prot_sg\n"); |
5598 | - vfree(fd_prot->prot_buf); |
5599 | + kfree(fd_prot->prot_buf); |
5600 | return -ENOMEM; |
5601 | } |
5602 | - size = prot_size; |
5603 | - |
5604 | - for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) { |
5605 | - |
5606 | - len = min_t(u32, PAGE_SIZE, size); |
5607 | - sg_set_buf(sg, buf, len); |
5608 | - size -= len; |
5609 | - buf += len; |
5610 | - } |
5611 | + sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents); |
5612 | + sg_set_buf(fd_prot->prot_sg, buf, prot_size); |
5613 | } |
5614 | |
5615 | if (is_write) { |
5616 | @@ -318,7 +310,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, |
5617 | |
5618 | if (is_write || ret < 0) { |
5619 | kfree(fd_prot->prot_sg); |
5620 | - vfree(fd_prot->prot_buf); |
5621 | + kfree(fd_prot->prot_buf); |
5622 | } |
5623 | |
5624 | return ret; |
5625 | @@ -549,6 +541,56 @@ fd_execute_write_same(struct se_cmd *cmd) |
5626 | return 0; |
5627 | } |
5628 | |
5629 | +static int |
5630 | +fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb, |
5631 | + void *buf, size_t bufsize) |
5632 | +{ |
5633 | + struct fd_dev *fd_dev = FD_DEV(se_dev); |
5634 | + struct file *prot_fd = fd_dev->fd_prot_file; |
5635 | + sector_t prot_length, prot; |
5636 | + loff_t pos = lba * se_dev->prot_length; |
5637 | + |
5638 | + if (!prot_fd) { |
5639 | + pr_err("Unable to locate fd_dev->fd_prot_file\n"); |
5640 | + return -ENODEV; |
5641 | + } |
5642 | + |
5643 | + prot_length = nolb * se_dev->prot_length; |
5644 | + |
5645 | + for (prot = 0; prot < prot_length;) { |
5646 | + sector_t len = min_t(sector_t, bufsize, prot_length - prot); |
5647 | + ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot); |
5648 | + |
5649 | + if (ret != len) { |
5650 | + pr_err("vfs_write to prot file failed: %zd\n", ret); |
5651 | + return ret < 0 ? ret : -ENODEV; |
5652 | + } |
5653 | + prot += ret; |
5654 | + } |
5655 | + |
5656 | + return 0; |
5657 | +} |
5658 | + |
5659 | +static int |
5660 | +fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) |
5661 | +{ |
5662 | + void *buf; |
5663 | + int rc; |
5664 | + |
5665 | + buf = (void *)__get_free_page(GFP_KERNEL); |
5666 | + if (!buf) { |
5667 | + pr_err("Unable to allocate FILEIO prot buf\n"); |
5668 | + return -ENOMEM; |
5669 | + } |
5670 | + memset(buf, 0xff, PAGE_SIZE); |
5671 | + |
5672 | + rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE); |
5673 | + |
5674 | + free_page((unsigned long)buf); |
5675 | + |
5676 | + return rc; |
5677 | +} |
5678 | + |
5679 | static sense_reason_t |
5680 | fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) |
5681 | { |
5682 | @@ -556,6 +598,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) |
5683 | struct inode *inode = file->f_mapping->host; |
5684 | int ret; |
5685 | |
5686 | + if (cmd->se_dev->dev_attrib.pi_prot_type) { |
5687 | + ret = fd_do_prot_unmap(cmd, lba, nolb); |
5688 | + if (ret) |
5689 | + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
5690 | + } |
5691 | + |
5692 | if (S_ISBLK(inode->i_mode)) { |
5693 | /* The backend is block device, use discard */ |
5694 | struct block_device *bdev = inode->i_bdev; |
5695 | @@ -658,11 +706,11 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, |
5696 | 0, fd_prot.prot_sg, 0); |
5697 | if (rc) { |
5698 | kfree(fd_prot.prot_sg); |
5699 | - vfree(fd_prot.prot_buf); |
5700 | + kfree(fd_prot.prot_buf); |
5701 | return rc; |
5702 | } |
5703 | kfree(fd_prot.prot_sg); |
5704 | - vfree(fd_prot.prot_buf); |
5705 | + kfree(fd_prot.prot_buf); |
5706 | } |
5707 | } else { |
5708 | memset(&fd_prot, 0, sizeof(struct fd_prot)); |
5709 | @@ -678,7 +726,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, |
5710 | 0, fd_prot.prot_sg, 0); |
5711 | if (rc) { |
5712 | kfree(fd_prot.prot_sg); |
5713 | - vfree(fd_prot.prot_buf); |
5714 | + kfree(fd_prot.prot_buf); |
5715 | return rc; |
5716 | } |
5717 | } |
5718 | @@ -714,7 +762,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, |
5719 | |
5720 | if (ret < 0) { |
5721 | kfree(fd_prot.prot_sg); |
5722 | - vfree(fd_prot.prot_buf); |
5723 | + kfree(fd_prot.prot_buf); |
5724 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
5725 | } |
5726 | |
5727 | @@ -878,48 +926,28 @@ static int fd_init_prot(struct se_device *dev) |
5728 | |
5729 | static int fd_format_prot(struct se_device *dev) |
5730 | { |
5731 | - struct fd_dev *fd_dev = FD_DEV(dev); |
5732 | - struct file *prot_fd = fd_dev->fd_prot_file; |
5733 | - sector_t prot_length, prot; |
5734 | unsigned char *buf; |
5735 | - loff_t pos = 0; |
5736 | int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; |
5737 | - int rc, ret = 0, size, len; |
5738 | + int ret; |
5739 | |
5740 | if (!dev->dev_attrib.pi_prot_type) { |
5741 | pr_err("Unable to format_prot while pi_prot_type == 0\n"); |
5742 | return -ENODEV; |
5743 | } |
5744 | - if (!prot_fd) { |
5745 | - pr_err("Unable to locate fd_dev->fd_prot_file\n"); |
5746 | - return -ENODEV; |
5747 | - } |
5748 | |
5749 | buf = vzalloc(unit_size); |
5750 | if (!buf) { |
5751 | pr_err("Unable to allocate FILEIO prot buf\n"); |
5752 | return -ENOMEM; |
5753 | } |
5754 | - prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length; |
5755 | - size = prot_length; |
5756 | |
5757 | pr_debug("Using FILEIO prot_length: %llu\n", |
5758 | - (unsigned long long)prot_length); |
5759 | + (unsigned long long)(dev->transport->get_blocks(dev) + 1) * |
5760 | + dev->prot_length); |
5761 | |
5762 | memset(buf, 0xff, unit_size); |
5763 | - for (prot = 0; prot < prot_length; prot += unit_size) { |
5764 | - len = min(unit_size, size); |
5765 | - rc = kernel_write(prot_fd, buf, len, pos); |
5766 | - if (rc != len) { |
5767 | - pr_err("vfs_write to prot file failed: %d\n", rc); |
5768 | - ret = -ENODEV; |
5769 | - goto out; |
5770 | - } |
5771 | - pos += len; |
5772 | - size -= len; |
5773 | - } |
5774 | - |
5775 | -out: |
5776 | + ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1, |
5777 | + buf, unit_size); |
5778 | vfree(buf); |
5779 | return ret; |
5780 | } |
5781 | diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c |
5782 | index 3e7297411110..755bd9b34612 100644 |
5783 | --- a/drivers/target/target_core_sbc.c |
5784 | +++ b/drivers/target/target_core_sbc.c |
5785 | @@ -312,7 +312,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o |
5786 | return 0; |
5787 | } |
5788 | |
5789 | -static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) |
5790 | +static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) |
5791 | { |
5792 | unsigned char *buf, *addr; |
5793 | struct scatterlist *sg; |
5794 | @@ -376,7 +376,7 @@ sbc_execute_rw(struct se_cmd *cmd) |
5795 | cmd->data_direction); |
5796 | } |
5797 | |
5798 | -static sense_reason_t compare_and_write_post(struct se_cmd *cmd) |
5799 | +static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) |
5800 | { |
5801 | struct se_device *dev = cmd->se_dev; |
5802 | |
5803 | @@ -399,7 +399,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd) |
5804 | return TCM_NO_SENSE; |
5805 | } |
5806 | |
5807 | -static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) |
5808 | +static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) |
5809 | { |
5810 | struct se_device *dev = cmd->se_dev; |
5811 | struct scatterlist *write_sg = NULL, *sg; |
5812 | @@ -414,11 +414,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) |
5813 | |
5814 | /* |
5815 | * Handle early failure in transport_generic_request_failure(), |
5816 | - * which will not have taken ->caw_mutex yet.. |
5817 | + * which will not have taken ->caw_sem yet.. |
5818 | */ |
5819 | - if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) |
5820 | + if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg)) |
5821 | return TCM_NO_SENSE; |
5822 | /* |
5823 | + * Handle special case for zero-length COMPARE_AND_WRITE |
5824 | + */ |
5825 | + if (!cmd->data_length) |
5826 | + goto out; |
5827 | + /* |
5828 | * Immediately exit + release dev->caw_sem if command has already |
5829 | * been failed with a non-zero SCSI status. |
5830 | */ |
5831 | diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c |
5832 | index ac3cbabdbdf0..f786de0290db 100644 |
5833 | --- a/drivers/target/target_core_transport.c |
5834 | +++ b/drivers/target/target_core_transport.c |
5835 | @@ -1615,11 +1615,11 @@ void transport_generic_request_failure(struct se_cmd *cmd, |
5836 | transport_complete_task_attr(cmd); |
5837 | /* |
5838 | * Handle special case for COMPARE_AND_WRITE failure, where the |
5839 | - * callback is expected to drop the per device ->caw_mutex. |
5840 | + * callback is expected to drop the per device ->caw_sem. |
5841 | */ |
5842 | if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && |
5843 | cmd->transport_complete_callback) |
5844 | - cmd->transport_complete_callback(cmd); |
5845 | + cmd->transport_complete_callback(cmd, false); |
5846 | |
5847 | switch (sense_reason) { |
5848 | case TCM_NON_EXISTENT_LUN: |
5849 | @@ -1975,8 +1975,12 @@ static void target_complete_ok_work(struct work_struct *work) |
5850 | if (cmd->transport_complete_callback) { |
5851 | sense_reason_t rc; |
5852 | |
5853 | - rc = cmd->transport_complete_callback(cmd); |
5854 | + rc = cmd->transport_complete_callback(cmd, true); |
5855 | if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { |
5856 | + if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && |
5857 | + !cmd->data_length) |
5858 | + goto queue_rsp; |
5859 | + |
5860 | return; |
5861 | } else if (rc) { |
5862 | ret = transport_send_check_condition_and_sense(cmd, |
5863 | @@ -1990,6 +1994,7 @@ static void target_complete_ok_work(struct work_struct *work) |
5864 | } |
5865 | } |
5866 | |
5867 | +queue_rsp: |
5868 | switch (cmd->data_direction) { |
5869 | case DMA_FROM_DEVICE: |
5870 | spin_lock(&cmd->se_lun->lun_sep_lock); |
5871 | @@ -2094,6 +2099,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd) |
5872 | static inline void transport_free_pages(struct se_cmd *cmd) |
5873 | { |
5874 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { |
5875 | + /* |
5876 | + * Release special case READ buffer payload required for |
5877 | + * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE |
5878 | + */ |
5879 | + if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { |
5880 | + transport_free_sgl(cmd->t_bidi_data_sg, |
5881 | + cmd->t_bidi_data_nents); |
5882 | + cmd->t_bidi_data_sg = NULL; |
5883 | + cmd->t_bidi_data_nents = 0; |
5884 | + } |
5885 | transport_reset_sgl_orig(cmd); |
5886 | return; |
5887 | } |
5888 | @@ -2246,6 +2261,7 @@ sense_reason_t |
5889 | transport_generic_new_cmd(struct se_cmd *cmd) |
5890 | { |
5891 | int ret = 0; |
5892 | + bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); |
5893 | |
5894 | /* |
5895 | * Determine is the TCM fabric module has already allocated physical |
5896 | @@ -2254,7 +2270,6 @@ transport_generic_new_cmd(struct se_cmd *cmd) |
5897 | */ |
5898 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && |
5899 | cmd->data_length) { |
5900 | - bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); |
5901 | |
5902 | if ((cmd->se_cmd_flags & SCF_BIDI) || |
5903 | (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { |
5904 | @@ -2285,6 +2300,20 @@ transport_generic_new_cmd(struct se_cmd *cmd) |
5905 | cmd->data_length, zero_flag); |
5906 | if (ret < 0) |
5907 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
5908 | + } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && |
5909 | + cmd->data_length) { |
5910 | + /* |
5911 | + * Special case for COMPARE_AND_WRITE with fabrics |
5912 | + * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. |
5913 | + */ |
5914 | + u32 caw_length = cmd->t_task_nolb * |
5915 | + cmd->se_dev->dev_attrib.block_size; |
5916 | + |
5917 | + ret = target_alloc_sgl(&cmd->t_bidi_data_sg, |
5918 | + &cmd->t_bidi_data_nents, |
5919 | + caw_length, zero_flag); |
5920 | + if (ret < 0) |
5921 | + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
5922 | } |
5923 | /* |
5924 | * If this command is not a write we can execute it right here, |
5925 | diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c |
5926 | index deae122c9c4b..d465ace95186 100644 |
5927 | --- a/drivers/tty/serial/8250/8250_core.c |
5928 | +++ b/drivers/tty/serial/8250/8250_core.c |
5929 | @@ -3444,7 +3444,8 @@ void serial8250_suspend_port(int line) |
5930 | port->type != PORT_8250) { |
5931 | unsigned char canary = 0xa5; |
5932 | serial_out(up, UART_SCR, canary); |
5933 | - up->canary = canary; |
5934 | + if (serial_in(up, UART_SCR) == canary) |
5935 | + up->canary = canary; |
5936 | } |
5937 | |
5938 | uart_suspend_port(&serial8250_reg, port); |
5939 | diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c |
5940 | index 6ae5b8560e4d..7a80250475eb 100644 |
5941 | --- a/drivers/tty/serial/8250/8250_dw.c |
5942 | +++ b/drivers/tty/serial/8250/8250_dw.c |
5943 | @@ -629,6 +629,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = { |
5944 | { "80860F0A", 0 }, |
5945 | { "8086228A", 0 }, |
5946 | { "APMC0D08", 0}, |
5947 | + { "AMD0020", 0 }, |
5948 | { }, |
5949 | }; |
5950 | MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); |
5951 | diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c |
5952 | index 0eb29b1c47ac..23061918b0e4 100644 |
5953 | --- a/drivers/tty/serial/imx.c |
5954 | +++ b/drivers/tty/serial/imx.c |
5955 | @@ -818,7 +818,7 @@ static irqreturn_t imx_int(int irq, void *dev_id) |
5956 | if (sts2 & USR2_ORE) { |
5957 | dev_err(sport->port.dev, "Rx FIFO overrun\n"); |
5958 | sport->port.icount.overrun++; |
5959 | - writel(sts2 | USR2_ORE, sport->port.membase + USR2); |
5960 | + writel(USR2_ORE, sport->port.membase + USR2); |
5961 | } |
5962 | |
5963 | return IRQ_HANDLED; |
5964 | @@ -1181,10 +1181,12 @@ static int imx_startup(struct uart_port *port) |
5965 | imx_uart_dma_init(sport); |
5966 | |
5967 | spin_lock_irqsave(&sport->port.lock, flags); |
5968 | + |
5969 | /* |
5970 | * Finally, clear and enable interrupts |
5971 | */ |
5972 | writel(USR1_RTSD, sport->port.membase + USR1); |
5973 | + writel(USR2_ORE, sport->port.membase + USR2); |
5974 | |
5975 | if (sport->dma_is_inited && !sport->dma_is_enabled) |
5976 | imx_enable_dma(sport); |
5977 | @@ -1199,10 +1201,6 @@ static int imx_startup(struct uart_port *port) |
5978 | |
5979 | writel(temp, sport->port.membase + UCR1); |
5980 | |
5981 | - /* Clear any pending ORE flag before enabling interrupt */ |
5982 | - temp = readl(sport->port.membase + USR2); |
5983 | - writel(temp | USR2_ORE, sport->port.membase + USR2); |
5984 | - |
5985 | temp = readl(sport->port.membase + UCR4); |
5986 | temp |= UCR4_OREN; |
5987 | writel(temp, sport->port.membase + UCR4); |
5988 | diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c |
5989 | index a051a7a2b1bd..a81f9dd7ee97 100644 |
5990 | --- a/drivers/usb/class/cdc-wdm.c |
5991 | +++ b/drivers/usb/class/cdc-wdm.c |
5992 | @@ -245,7 +245,7 @@ static void wdm_int_callback(struct urb *urb) |
5993 | case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: |
5994 | dev_dbg(&desc->intf->dev, |
5995 | "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d", |
5996 | - dr->wIndex, dr->wLength); |
5997 | + le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength)); |
5998 | break; |
5999 | |
6000 | case USB_CDC_NOTIFY_NETWORK_CONNECTION: |
6001 | @@ -262,7 +262,9 @@ static void wdm_int_callback(struct urb *urb) |
6002 | clear_bit(WDM_POLL_RUNNING, &desc->flags); |
6003 | dev_err(&desc->intf->dev, |
6004 | "unknown notification %d received: index %d len %d\n", |
6005 | - dr->bNotificationType, dr->wIndex, dr->wLength); |
6006 | + dr->bNotificationType, |
6007 | + le16_to_cpu(dr->wIndex), |
6008 | + le16_to_cpu(dr->wLength)); |
6009 | goto exit; |
6010 | } |
6011 | |
6012 | @@ -408,7 +410,7 @@ static ssize_t wdm_write |
6013 | USB_RECIP_INTERFACE); |
6014 | req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; |
6015 | req->wValue = 0; |
6016 | - req->wIndex = desc->inum; |
6017 | + req->wIndex = desc->inum; /* already converted */ |
6018 | req->wLength = cpu_to_le16(count); |
6019 | set_bit(WDM_IN_USE, &desc->flags); |
6020 | desc->outbuf = buf; |
6021 | @@ -422,7 +424,7 @@ static ssize_t wdm_write |
6022 | rv = usb_translate_errors(rv); |
6023 | } else { |
6024 | dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d", |
6025 | - req->wIndex); |
6026 | + le16_to_cpu(req->wIndex)); |
6027 | } |
6028 | out: |
6029 | usb_autopm_put_interface(desc->intf); |
6030 | @@ -820,7 +822,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor |
6031 | desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE); |
6032 | desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; |
6033 | desc->irq->wValue = 0; |
6034 | - desc->irq->wIndex = desc->inum; |
6035 | + desc->irq->wIndex = desc->inum; /* already converted */ |
6036 | desc->irq->wLength = cpu_to_le16(desc->wMaxCommand); |
6037 | |
6038 | usb_fill_control_urb( |
6039 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
6040 | index d7c3d5a35946..3b7151687776 100644 |
6041 | --- a/drivers/usb/core/hub.c |
6042 | +++ b/drivers/usb/core/hub.c |
6043 | @@ -3406,10 +3406,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) |
6044 | if (status) { |
6045 | dev_dbg(&port_dev->dev, "can't resume, status %d\n", status); |
6046 | } else { |
6047 | - /* drive resume for at least 20 msec */ |
6048 | + /* drive resume for USB_RESUME_TIMEOUT msec */ |
6049 | dev_dbg(&udev->dev, "usb %sresume\n", |
6050 | (PMSG_IS_AUTO(msg) ? "auto-" : "")); |
6051 | - msleep(25); |
6052 | + msleep(USB_RESUME_TIMEOUT); |
6053 | |
6054 | /* Virtual root hubs can trigger on GET_PORT_STATUS to |
6055 | * stop resume signaling. Then finish the resume |
6056 | diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c |
6057 | index c78c8740db1d..758b7e0380f6 100644 |
6058 | --- a/drivers/usb/dwc2/hcd.c |
6059 | +++ b/drivers/usb/dwc2/hcd.c |
6060 | @@ -1521,7 +1521,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, |
6061 | dev_dbg(hsotg->dev, |
6062 | "ClearPortFeature USB_PORT_FEAT_SUSPEND\n"); |
6063 | writel(0, hsotg->regs + PCGCTL); |
6064 | - usleep_range(20000, 40000); |
6065 | + msleep(USB_RESUME_TIMEOUT); |
6066 | |
6067 | hprt0 = dwc2_read_hprt0(hsotg); |
6068 | hprt0 |= HPRT0_RES; |
6069 | diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c |
6070 | index 90545980542f..6385c198c134 100644 |
6071 | --- a/drivers/usb/gadget/legacy/printer.c |
6072 | +++ b/drivers/usb/gadget/legacy/printer.c |
6073 | @@ -1031,6 +1031,15 @@ unknown: |
6074 | break; |
6075 | } |
6076 | /* host either stalls (value < 0) or reports success */ |
6077 | + if (value >= 0) { |
6078 | + req->length = value; |
6079 | + req->zero = value < wLength; |
6080 | + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); |
6081 | + if (value < 0) { |
6082 | + ERROR(dev, "%s:%d Error!\n", __func__, __LINE__); |
6083 | + req->status = 0; |
6084 | + } |
6085 | + } |
6086 | return value; |
6087 | } |
6088 | |
6089 | diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c |
6090 | index 85e56d1abd23..f4d88dfb26a7 100644 |
6091 | --- a/drivers/usb/host/ehci-hcd.c |
6092 | +++ b/drivers/usb/host/ehci-hcd.c |
6093 | @@ -792,12 +792,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) |
6094 | ehci->reset_done[i] == 0)) |
6095 | continue; |
6096 | |
6097 | - /* start 20 msec resume signaling from this port, |
6098 | - * and make hub_wq collect PORT_STAT_C_SUSPEND to |
6099 | - * stop that signaling. Use 5 ms extra for safety, |
6100 | - * like usb_port_resume() does. |
6101 | + /* start USB_RESUME_TIMEOUT msec resume signaling from |
6102 | + * this port, and make hub_wq collect |
6103 | + * PORT_STAT_C_SUSPEND to stop that signaling. |
6104 | */ |
6105 | - ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); |
6106 | + ehci->reset_done[i] = jiffies + |
6107 | + msecs_to_jiffies(USB_RESUME_TIMEOUT); |
6108 | set_bit(i, &ehci->resuming_ports); |
6109 | ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); |
6110 | usb_hcd_start_port_resume(&hcd->self, i); |
6111 | diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c |
6112 | index 87cf86f38b36..7354d0129a72 100644 |
6113 | --- a/drivers/usb/host/ehci-hub.c |
6114 | +++ b/drivers/usb/host/ehci-hub.c |
6115 | @@ -471,10 +471,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd) |
6116 | ehci_writel(ehci, temp, &ehci->regs->port_status [i]); |
6117 | } |
6118 | |
6119 | - /* msleep for 20ms only if code is trying to resume port */ |
6120 | + /* |
6121 | + * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume |
6122 | + * port |
6123 | + */ |
6124 | if (resume_needed) { |
6125 | spin_unlock_irq(&ehci->lock); |
6126 | - msleep(20); |
6127 | + msleep(USB_RESUME_TIMEOUT); |
6128 | spin_lock_irq(&ehci->lock); |
6129 | if (ehci->shutdown) |
6130 | goto shutdown; |
6131 | @@ -942,7 +945,7 @@ int ehci_hub_control( |
6132 | temp &= ~PORT_WAKE_BITS; |
6133 | ehci_writel(ehci, temp | PORT_RESUME, status_reg); |
6134 | ehci->reset_done[wIndex] = jiffies |
6135 | - + msecs_to_jiffies(20); |
6136 | + + msecs_to_jiffies(USB_RESUME_TIMEOUT); |
6137 | set_bit(wIndex, &ehci->resuming_ports); |
6138 | usb_hcd_start_port_resume(&hcd->self, wIndex); |
6139 | break; |
6140 | diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c |
6141 | index 475b21fd373b..7a6681fb7675 100644 |
6142 | --- a/drivers/usb/host/fotg210-hcd.c |
6143 | +++ b/drivers/usb/host/fotg210-hcd.c |
6144 | @@ -1595,7 +1595,7 @@ static int fotg210_hub_control( |
6145 | /* resume signaling for 20 msec */ |
6146 | fotg210_writel(fotg210, temp | PORT_RESUME, status_reg); |
6147 | fotg210->reset_done[wIndex] = jiffies |
6148 | - + msecs_to_jiffies(20); |
6149 | + + msecs_to_jiffies(USB_RESUME_TIMEOUT); |
6150 | break; |
6151 | case USB_PORT_FEAT_C_SUSPEND: |
6152 | clear_bit(wIndex, &fotg210->port_c_suspend); |
6153 | diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c |
6154 | index a83eefefffda..ba77e2e43f62 100644 |
6155 | --- a/drivers/usb/host/fusbh200-hcd.c |
6156 | +++ b/drivers/usb/host/fusbh200-hcd.c |
6157 | @@ -1550,10 +1550,9 @@ static int fusbh200_hub_control ( |
6158 | if ((temp & PORT_PE) == 0) |
6159 | goto error; |
6160 | |
6161 | - /* resume signaling for 20 msec */ |
6162 | fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg); |
6163 | fusbh200->reset_done[wIndex] = jiffies |
6164 | - + msecs_to_jiffies(20); |
6165 | + + msecs_to_jiffies(USB_RESUME_TIMEOUT); |
6166 | break; |
6167 | case USB_PORT_FEAT_C_SUSPEND: |
6168 | clear_bit(wIndex, &fusbh200->port_c_suspend); |
6169 | diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c |
6170 | index 113d0cc6cc43..9ef56443446d 100644 |
6171 | --- a/drivers/usb/host/isp116x-hcd.c |
6172 | +++ b/drivers/usb/host/isp116x-hcd.c |
6173 | @@ -1490,7 +1490,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd) |
6174 | spin_unlock_irq(&isp116x->lock); |
6175 | |
6176 | hcd->state = HC_STATE_RESUMING; |
6177 | - msleep(20); |
6178 | + msleep(USB_RESUME_TIMEOUT); |
6179 | |
6180 | /* Go operational */ |
6181 | spin_lock_irq(&isp116x->lock); |
6182 | diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c |
6183 | index ef7efb278b15..28a2866b6b16 100644 |
6184 | --- a/drivers/usb/host/oxu210hp-hcd.c |
6185 | +++ b/drivers/usb/host/oxu210hp-hcd.c |
6186 | @@ -2500,11 +2500,12 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd) |
6187 | || oxu->reset_done[i] != 0) |
6188 | continue; |
6189 | |
6190 | - /* start 20 msec resume signaling from this port, |
6191 | - * and make hub_wq collect PORT_STAT_C_SUSPEND to |
6192 | + /* start USB_RESUME_TIMEOUT resume signaling from this |
6193 | + * port, and make hub_wq collect PORT_STAT_C_SUSPEND to |
6194 | * stop that signaling. |
6195 | */ |
6196 | - oxu->reset_done[i] = jiffies + msecs_to_jiffies(20); |
6197 | + oxu->reset_done[i] = jiffies + |
6198 | + msecs_to_jiffies(USB_RESUME_TIMEOUT); |
6199 | oxu_dbg(oxu, "port %d remote wakeup\n", i + 1); |
6200 | mod_timer(&hcd->rh_timer, oxu->reset_done[i]); |
6201 | } |
6202 | diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c |
6203 | index bdc82fea0a1f..54a417043e44 100644 |
6204 | --- a/drivers/usb/host/r8a66597-hcd.c |
6205 | +++ b/drivers/usb/host/r8a66597-hcd.c |
6206 | @@ -2301,7 +2301,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd) |
6207 | rh->port &= ~USB_PORT_STAT_SUSPEND; |
6208 | rh->port |= USB_PORT_STAT_C_SUSPEND << 16; |
6209 | r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg); |
6210 | - msleep(50); |
6211 | + msleep(USB_RESUME_TIMEOUT); |
6212 | r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg); |
6213 | } |
6214 | |
6215 | diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c |
6216 | index 4f4ba1ea9e9b..9118cd8de1a7 100644 |
6217 | --- a/drivers/usb/host/sl811-hcd.c |
6218 | +++ b/drivers/usb/host/sl811-hcd.c |
6219 | @@ -1259,7 +1259,7 @@ sl811h_hub_control( |
6220 | sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1); |
6221 | |
6222 | mod_timer(&sl811->timer, jiffies |
6223 | - + msecs_to_jiffies(20)); |
6224 | + + msecs_to_jiffies(USB_RESUME_TIMEOUT)); |
6225 | break; |
6226 | case USB_PORT_FEAT_POWER: |
6227 | port_power(sl811, 0); |
6228 | diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c |
6229 | index 19ba5eafb31e..7b3d1afcc14a 100644 |
6230 | --- a/drivers/usb/host/uhci-hub.c |
6231 | +++ b/drivers/usb/host/uhci-hub.c |
6232 | @@ -166,7 +166,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci) |
6233 | /* Port received a wakeup request */ |
6234 | set_bit(port, &uhci->resuming_ports); |
6235 | uhci->ports_timeout = jiffies + |
6236 | - msecs_to_jiffies(25); |
6237 | + msecs_to_jiffies(USB_RESUME_TIMEOUT); |
6238 | usb_hcd_start_port_resume( |
6239 | &uhci_to_hcd(uhci)->self, port); |
6240 | |
6241 | @@ -338,7 +338,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, |
6242 | uhci_finish_suspend(uhci, port, port_addr); |
6243 | |
6244 | /* USB v2.0 7.1.7.5 */ |
6245 | - uhci->ports_timeout = jiffies + msecs_to_jiffies(50); |
6246 | + uhci->ports_timeout = jiffies + |
6247 | + msecs_to_jiffies(USB_RESUME_TIMEOUT); |
6248 | break; |
6249 | case USB_PORT_FEAT_POWER: |
6250 | /* UHCI has no power switching */ |
6251 | diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
6252 | index 73485fa4372f..eeedde8c435a 100644 |
6253 | --- a/drivers/usb/host/xhci-ring.c |
6254 | +++ b/drivers/usb/host/xhci-ring.c |
6255 | @@ -1574,7 +1574,7 @@ static void handle_port_status(struct xhci_hcd *xhci, |
6256 | } else { |
6257 | xhci_dbg(xhci, "resume HS port %d\n", port_id); |
6258 | bus_state->resume_done[faked_port_index] = jiffies + |
6259 | - msecs_to_jiffies(20); |
6260 | + msecs_to_jiffies(USB_RESUME_TIMEOUT); |
6261 | set_bit(faked_port_index, &bus_state->resuming_ports); |
6262 | mod_timer(&hcd->rh_timer, |
6263 | bus_state->resume_done[faked_port_index]); |
6264 | diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c |
6265 | index 3cb98b1d5d29..7911b6b6fe40 100644 |
6266 | --- a/drivers/usb/isp1760/isp1760-hcd.c |
6267 | +++ b/drivers/usb/isp1760/isp1760-hcd.c |
6268 | @@ -1869,7 +1869,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq, |
6269 | reg_write32(hcd->regs, HC_PORTSC1, |
6270 | temp | PORT_RESUME); |
6271 | priv->reset_done = jiffies + |
6272 | - msecs_to_jiffies(20); |
6273 | + msecs_to_jiffies(USB_RESUME_TIMEOUT); |
6274 | } |
6275 | break; |
6276 | case USB_PORT_FEAT_C_SUSPEND: |
6277 | diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c |
6278 | index 067920f2d570..ec0ee3b486f5 100644 |
6279 | --- a/drivers/usb/musb/musb_core.c |
6280 | +++ b/drivers/usb/musb/musb_core.c |
6281 | @@ -99,6 +99,7 @@ |
6282 | #include <linux/platform_device.h> |
6283 | #include <linux/io.h> |
6284 | #include <linux/dma-mapping.h> |
6285 | +#include <linux/usb.h> |
6286 | |
6287 | #include "musb_core.h" |
6288 | |
6289 | @@ -562,7 +563,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, |
6290 | (USB_PORT_STAT_C_SUSPEND << 16) |
6291 | | MUSB_PORT_STAT_RESUME; |
6292 | musb->rh_timer = jiffies |
6293 | - + msecs_to_jiffies(20); |
6294 | + + msecs_to_jiffies(USB_RESUME_TIMEOUT); |
6295 | musb->need_finish_resume = 1; |
6296 | |
6297 | musb->xceiv->otg->state = OTG_STATE_A_HOST; |
6298 | @@ -1597,16 +1598,30 @@ irqreturn_t musb_interrupt(struct musb *musb) |
6299 | is_host_active(musb) ? "host" : "peripheral", |
6300 | musb->int_usb, musb->int_tx, musb->int_rx); |
6301 | |
6302 | - /* the core can interrupt us for multiple reasons; docs have |
6303 | - * a generic interrupt flowchart to follow |
6304 | + /** |
6305 | + * According to Mentor Graphics' documentation, flowchart on page 98, |
6306 | + * IRQ should be handled as follows: |
6307 | + * |
6308 | + * . Resume IRQ |
6309 | + * . Session Request IRQ |
6310 | + * . VBUS Error IRQ |
6311 | + * . Suspend IRQ |
6312 | + * . Connect IRQ |
6313 | + * . Disconnect IRQ |
6314 | + * . Reset/Babble IRQ |
6315 | + * . SOF IRQ (we're not using this one) |
6316 | + * . Endpoint 0 IRQ |
6317 | + * . TX Endpoints |
6318 | + * . RX Endpoints |
6319 | + * |
6320 | + * We will be following that flowchart in order to avoid any problems |
6321 | + * that might arise with internal Finite State Machine. |
6322 | */ |
6323 | + |
6324 | if (musb->int_usb) |
6325 | retval |= musb_stage0_irq(musb, musb->int_usb, |
6326 | devctl); |
6327 | |
6328 | - /* "stage 1" is handling endpoint irqs */ |
6329 | - |
6330 | - /* handle endpoint 0 first */ |
6331 | if (musb->int_tx & 1) { |
6332 | if (is_host_active(musb)) |
6333 | retval |= musb_h_ep0_irq(musb); |
6334 | @@ -1614,37 +1629,31 @@ irqreturn_t musb_interrupt(struct musb *musb) |
6335 | retval |= musb_g_ep0_irq(musb); |
6336 | } |
6337 | |
6338 | - /* RX on endpoints 1-15 */ |
6339 | - reg = musb->int_rx >> 1; |
6340 | + reg = musb->int_tx >> 1; |
6341 | ep_num = 1; |
6342 | while (reg) { |
6343 | if (reg & 1) { |
6344 | - /* musb_ep_select(musb->mregs, ep_num); */ |
6345 | - /* REVISIT just retval = ep->rx_irq(...) */ |
6346 | retval = IRQ_HANDLED; |
6347 | if (is_host_active(musb)) |
6348 | - musb_host_rx(musb, ep_num); |
6349 | + musb_host_tx(musb, ep_num); |
6350 | else |
6351 | - musb_g_rx(musb, ep_num); |
6352 | + musb_g_tx(musb, ep_num); |
6353 | } |
6354 | - |
6355 | reg >>= 1; |
6356 | ep_num++; |
6357 | } |
6358 | |
6359 | - /* TX on endpoints 1-15 */ |
6360 | - reg = musb->int_tx >> 1; |
6361 | + reg = musb->int_rx >> 1; |
6362 | ep_num = 1; |
6363 | while (reg) { |
6364 | if (reg & 1) { |
6365 | - /* musb_ep_select(musb->mregs, ep_num); */ |
6366 | - /* REVISIT just retval |= ep->tx_irq(...) */ |
6367 | retval = IRQ_HANDLED; |
6368 | if (is_host_active(musb)) |
6369 | - musb_host_tx(musb, ep_num); |
6370 | + musb_host_rx(musb, ep_num); |
6371 | else |
6372 | - musb_g_tx(musb, ep_num); |
6373 | + musb_g_rx(musb, ep_num); |
6374 | } |
6375 | + |
6376 | reg >>= 1; |
6377 | ep_num++; |
6378 | } |
6379 | @@ -2463,7 +2472,7 @@ static int musb_resume(struct device *dev) |
6380 | if (musb->need_finish_resume) { |
6381 | musb->need_finish_resume = 0; |
6382 | schedule_delayed_work(&musb->finish_resume_work, |
6383 | - msecs_to_jiffies(20)); |
6384 | + msecs_to_jiffies(USB_RESUME_TIMEOUT)); |
6385 | } |
6386 | |
6387 | /* |
6388 | @@ -2506,7 +2515,7 @@ static int musb_runtime_resume(struct device *dev) |
6389 | if (musb->need_finish_resume) { |
6390 | musb->need_finish_resume = 0; |
6391 | schedule_delayed_work(&musb->finish_resume_work, |
6392 | - msecs_to_jiffies(20)); |
6393 | + msecs_to_jiffies(USB_RESUME_TIMEOUT)); |
6394 | } |
6395 | |
6396 | return 0; |
6397 | diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c |
6398 | index 294e159f4afe..5428ed11440d 100644 |
6399 | --- a/drivers/usb/musb/musb_virthub.c |
6400 | +++ b/drivers/usb/musb/musb_virthub.c |
6401 | @@ -136,7 +136,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend) |
6402 | /* later, GetPortStatus will stop RESUME signaling */ |
6403 | musb->port1_status |= MUSB_PORT_STAT_RESUME; |
6404 | schedule_delayed_work(&musb->finish_resume_work, |
6405 | - msecs_to_jiffies(20)); |
6406 | + msecs_to_jiffies(USB_RESUME_TIMEOUT)); |
6407 | } |
6408 | } |
6409 | |
6410 | diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c |
6411 | index 2f9735b35338..d1cd6b50f520 100644 |
6412 | --- a/drivers/usb/phy/phy.c |
6413 | +++ b/drivers/usb/phy/phy.c |
6414 | @@ -81,7 +81,9 @@ static void devm_usb_phy_release(struct device *dev, void *res) |
6415 | |
6416 | static int devm_usb_phy_match(struct device *dev, void *res, void *match_data) |
6417 | { |
6418 | - return res == match_data; |
6419 | + struct usb_phy **phy = res; |
6420 | + |
6421 | + return *phy == match_data; |
6422 | } |
6423 | |
6424 | /** |
6425 | diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c |
6426 | index 995986b8e36b..d925f55e4857 100644 |
6427 | --- a/fs/binfmt_elf.c |
6428 | +++ b/fs/binfmt_elf.c |
6429 | @@ -862,6 +862,7 @@ static int load_elf_binary(struct linux_binprm *bprm) |
6430 | i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { |
6431 | int elf_prot = 0, elf_flags; |
6432 | unsigned long k, vaddr; |
6433 | + unsigned long total_size = 0; |
6434 | |
6435 | if (elf_ppnt->p_type != PT_LOAD) |
6436 | continue; |
6437 | @@ -924,10 +925,16 @@ static int load_elf_binary(struct linux_binprm *bprm) |
6438 | #else |
6439 | load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); |
6440 | #endif |
6441 | + total_size = total_mapping_size(elf_phdata, |
6442 | + loc->elf_ex.e_phnum); |
6443 | + if (!total_size) { |
6444 | + error = -EINVAL; |
6445 | + goto out_free_dentry; |
6446 | + } |
6447 | } |
6448 | |
6449 | error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, |
6450 | - elf_prot, elf_flags, 0); |
6451 | + elf_prot, elf_flags, total_size); |
6452 | if (BAD_ADDR(error)) { |
6453 | retval = IS_ERR((void *)error) ? |
6454 | PTR_ERR((void*)error) : -EINVAL; |
6455 | diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c |
6456 | index 8b353ad02f03..0a795c969c78 100644 |
6457 | --- a/fs/btrfs/extent-tree.c |
6458 | +++ b/fs/btrfs/extent-tree.c |
6459 | @@ -6956,12 +6956,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root, |
6460 | return -ENOSPC; |
6461 | } |
6462 | |
6463 | - if (btrfs_test_opt(root, DISCARD)) |
6464 | - ret = btrfs_discard_extent(root, start, len, NULL); |
6465 | - |
6466 | if (pin) |
6467 | pin_down_extent(root, cache, start, len, 1); |
6468 | else { |
6469 | + if (btrfs_test_opt(root, DISCARD)) |
6470 | + ret = btrfs_discard_extent(root, start, len, NULL); |
6471 | btrfs_add_free_space(cache, start, len); |
6472 | btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc); |
6473 | } |
6474 | diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c |
6475 | index 74609b931ba5..f23d4be3280e 100644 |
6476 | --- a/fs/btrfs/ioctl.c |
6477 | +++ b/fs/btrfs/ioctl.c |
6478 | @@ -2897,6 +2897,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len, |
6479 | if (src == dst) |
6480 | return -EINVAL; |
6481 | |
6482 | + if (len == 0) |
6483 | + return 0; |
6484 | + |
6485 | btrfs_double_lock(src, loff, dst, dst_loff, len); |
6486 | |
6487 | ret = extent_same_check_offsets(src, loff, len); |
6488 | @@ -3626,6 +3629,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, |
6489 | if (off + len == src->i_size) |
6490 | len = ALIGN(src->i_size, bs) - off; |
6491 | |
6492 | + if (len == 0) { |
6493 | + ret = 0; |
6494 | + goto out_unlock; |
6495 | + } |
6496 | + |
6497 | /* verify the end result is block aligned */ |
6498 | if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) || |
6499 | !IS_ALIGNED(destoff, bs)) |
6500 | diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c |
6501 | index 883b93623bc5..45ea704be030 100644 |
6502 | --- a/fs/btrfs/xattr.c |
6503 | +++ b/fs/btrfs/xattr.c |
6504 | @@ -364,22 +364,42 @@ const struct xattr_handler *btrfs_xattr_handlers[] = { |
6505 | /* |
6506 | * Check if the attribute is in a supported namespace. |
6507 | * |
6508 | - * This applied after the check for the synthetic attributes in the system |
6509 | + * This is applied after the check for the synthetic attributes in the system |
6510 | * namespace. |
6511 | */ |
6512 | -static bool btrfs_is_valid_xattr(const char *name) |
6513 | +static int btrfs_is_valid_xattr(const char *name) |
6514 | { |
6515 | - return !strncmp(name, XATTR_SECURITY_PREFIX, |
6516 | - XATTR_SECURITY_PREFIX_LEN) || |
6517 | - !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) || |
6518 | - !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || |
6519 | - !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) || |
6520 | - !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN); |
6521 | + int len = strlen(name); |
6522 | + int prefixlen = 0; |
6523 | + |
6524 | + if (!strncmp(name, XATTR_SECURITY_PREFIX, |
6525 | + XATTR_SECURITY_PREFIX_LEN)) |
6526 | + prefixlen = XATTR_SECURITY_PREFIX_LEN; |
6527 | + else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) |
6528 | + prefixlen = XATTR_SYSTEM_PREFIX_LEN; |
6529 | + else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) |
6530 | + prefixlen = XATTR_TRUSTED_PREFIX_LEN; |
6531 | + else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) |
6532 | + prefixlen = XATTR_USER_PREFIX_LEN; |
6533 | + else if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) |
6534 | + prefixlen = XATTR_BTRFS_PREFIX_LEN; |
6535 | + else |
6536 | + return -EOPNOTSUPP; |
6537 | + |
6538 | + /* |
6539 | + * The name cannot consist of just prefix |
6540 | + */ |
6541 | + if (len <= prefixlen) |
6542 | + return -EINVAL; |
6543 | + |
6544 | + return 0; |
6545 | } |
6546 | |
6547 | ssize_t btrfs_getxattr(struct dentry *dentry, const char *name, |
6548 | void *buffer, size_t size) |
6549 | { |
6550 | + int ret; |
6551 | + |
6552 | /* |
6553 | * If this is a request for a synthetic attribute in the system.* |
6554 | * namespace use the generic infrastructure to resolve a handler |
6555 | @@ -388,8 +408,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name, |
6556 | if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) |
6557 | return generic_getxattr(dentry, name, buffer, size); |
6558 | |
6559 | - if (!btrfs_is_valid_xattr(name)) |
6560 | - return -EOPNOTSUPP; |
6561 | + ret = btrfs_is_valid_xattr(name); |
6562 | + if (ret) |
6563 | + return ret; |
6564 | return __btrfs_getxattr(dentry->d_inode, name, buffer, size); |
6565 | } |
6566 | |
6567 | @@ -397,6 +418,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, |
6568 | size_t size, int flags) |
6569 | { |
6570 | struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; |
6571 | + int ret; |
6572 | |
6573 | /* |
6574 | * The permission on security.* and system.* is not checked |
6575 | @@ -413,8 +435,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, |
6576 | if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) |
6577 | return generic_setxattr(dentry, name, value, size, flags); |
6578 | |
6579 | - if (!btrfs_is_valid_xattr(name)) |
6580 | - return -EOPNOTSUPP; |
6581 | + ret = btrfs_is_valid_xattr(name); |
6582 | + if (ret) |
6583 | + return ret; |
6584 | |
6585 | if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) |
6586 | return btrfs_set_prop(dentry->d_inode, name, |
6587 | @@ -430,6 +453,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, |
6588 | int btrfs_removexattr(struct dentry *dentry, const char *name) |
6589 | { |
6590 | struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; |
6591 | + int ret; |
6592 | |
6593 | /* |
6594 | * The permission on security.* and system.* is not checked |
6595 | @@ -446,8 +470,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name) |
6596 | if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) |
6597 | return generic_removexattr(dentry, name); |
6598 | |
6599 | - if (!btrfs_is_valid_xattr(name)) |
6600 | - return -EOPNOTSUPP; |
6601 | + ret = btrfs_is_valid_xattr(name); |
6602 | + if (ret) |
6603 | + return ret; |
6604 | |
6605 | if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) |
6606 | return btrfs_set_prop(dentry->d_inode, name, |
6607 | diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c |
6608 | index 28fe71a2904c..aae7011d99e8 100644 |
6609 | --- a/fs/ext4/namei.c |
6610 | +++ b/fs/ext4/namei.c |
6611 | @@ -1865,7 +1865,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, |
6612 | struct inode *inode) |
6613 | { |
6614 | struct inode *dir = dentry->d_parent->d_inode; |
6615 | - struct buffer_head *bh; |
6616 | + struct buffer_head *bh = NULL; |
6617 | struct ext4_dir_entry_2 *de; |
6618 | struct ext4_dir_entry_tail *t; |
6619 | struct super_block *sb; |
6620 | @@ -1889,14 +1889,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, |
6621 | return retval; |
6622 | if (retval == 1) { |
6623 | retval = 0; |
6624 | - return retval; |
6625 | + goto out; |
6626 | } |
6627 | } |
6628 | |
6629 | if (is_dx(dir)) { |
6630 | retval = ext4_dx_add_entry(handle, dentry, inode); |
6631 | if (!retval || (retval != ERR_BAD_DX_DIR)) |
6632 | - return retval; |
6633 | + goto out; |
6634 | ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); |
6635 | dx_fallback++; |
6636 | ext4_mark_inode_dirty(handle, dir); |
6637 | @@ -1908,14 +1908,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, |
6638 | return PTR_ERR(bh); |
6639 | |
6640 | retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh); |
6641 | - if (retval != -ENOSPC) { |
6642 | - brelse(bh); |
6643 | - return retval; |
6644 | - } |
6645 | + if (retval != -ENOSPC) |
6646 | + goto out; |
6647 | |
6648 | if (blocks == 1 && !dx_fallback && |
6649 | - EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) |
6650 | - return make_indexed_dir(handle, dentry, inode, bh); |
6651 | + EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) { |
6652 | + retval = make_indexed_dir(handle, dentry, inode, bh); |
6653 | + bh = NULL; /* make_indexed_dir releases bh */ |
6654 | + goto out; |
6655 | + } |
6656 | brelse(bh); |
6657 | } |
6658 | bh = ext4_append(handle, dir, &block); |
6659 | @@ -1931,6 +1932,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, |
6660 | } |
6661 | |
6662 | retval = add_dirent_to_buf(handle, dentry, inode, de, bh); |
6663 | +out: |
6664 | brelse(bh); |
6665 | if (retval == 0) |
6666 | ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY); |
6667 | diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c |
6668 | index 665ef5a05183..a563ddbc19e6 100644 |
6669 | --- a/fs/lockd/svcsubs.c |
6670 | +++ b/fs/lockd/svcsubs.c |
6671 | @@ -31,7 +31,7 @@ |
6672 | static struct hlist_head nlm_files[FILE_NRHASH]; |
6673 | static DEFINE_MUTEX(nlm_file_mutex); |
6674 | |
6675 | -#ifdef NFSD_DEBUG |
6676 | +#ifdef CONFIG_SUNRPC_DEBUG |
6677 | static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f) |
6678 | { |
6679 | u32 *fhp = (u32*)f->data; |
6680 | diff --git a/fs/namei.c b/fs/namei.c |
6681 | index c83145af4bfc..caa38a24e1f7 100644 |
6682 | --- a/fs/namei.c |
6683 | +++ b/fs/namei.c |
6684 | @@ -1591,7 +1591,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path, |
6685 | |
6686 | if (should_follow_link(path->dentry, follow)) { |
6687 | if (nd->flags & LOOKUP_RCU) { |
6688 | - if (unlikely(unlazy_walk(nd, path->dentry))) { |
6689 | + if (unlikely(nd->path.mnt != path->mnt || |
6690 | + unlazy_walk(nd, path->dentry))) { |
6691 | err = -ECHILD; |
6692 | goto out_err; |
6693 | } |
6694 | @@ -3047,7 +3048,8 @@ finish_lookup: |
6695 | |
6696 | if (should_follow_link(path->dentry, !symlink_ok)) { |
6697 | if (nd->flags & LOOKUP_RCU) { |
6698 | - if (unlikely(unlazy_walk(nd, path->dentry))) { |
6699 | + if (unlikely(nd->path.mnt != path->mnt || |
6700 | + unlazy_walk(nd, path->dentry))) { |
6701 | error = -ECHILD; |
6702 | goto out; |
6703 | } |
6704 | diff --git a/fs/namespace.c b/fs/namespace.c |
6705 | index 82ef1405260e..4622ee32a5e2 100644 |
6706 | --- a/fs/namespace.c |
6707 | +++ b/fs/namespace.c |
6708 | @@ -632,14 +632,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) |
6709 | */ |
6710 | struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) |
6711 | { |
6712 | - struct mount *p, *res; |
6713 | - res = p = __lookup_mnt(mnt, dentry); |
6714 | + struct mount *p, *res = NULL; |
6715 | + p = __lookup_mnt(mnt, dentry); |
6716 | if (!p) |
6717 | goto out; |
6718 | + if (!(p->mnt.mnt_flags & MNT_UMOUNT)) |
6719 | + res = p; |
6720 | hlist_for_each_entry_continue(p, mnt_hash) { |
6721 | if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry) |
6722 | break; |
6723 | - res = p; |
6724 | + if (!(p->mnt.mnt_flags & MNT_UMOUNT)) |
6725 | + res = p; |
6726 | } |
6727 | out: |
6728 | return res; |
6729 | @@ -795,10 +798,8 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns) |
6730 | /* |
6731 | * vfsmount lock must be held for write |
6732 | */ |
6733 | -static void detach_mnt(struct mount *mnt, struct path *old_path) |
6734 | +static void unhash_mnt(struct mount *mnt) |
6735 | { |
6736 | - old_path->dentry = mnt->mnt_mountpoint; |
6737 | - old_path->mnt = &mnt->mnt_parent->mnt; |
6738 | mnt->mnt_parent = mnt; |
6739 | mnt->mnt_mountpoint = mnt->mnt.mnt_root; |
6740 | list_del_init(&mnt->mnt_child); |
6741 | @@ -811,6 +812,26 @@ static void detach_mnt(struct mount *mnt, struct path *old_path) |
6742 | /* |
6743 | * vfsmount lock must be held for write |
6744 | */ |
6745 | +static void detach_mnt(struct mount *mnt, struct path *old_path) |
6746 | +{ |
6747 | + old_path->dentry = mnt->mnt_mountpoint; |
6748 | + old_path->mnt = &mnt->mnt_parent->mnt; |
6749 | + unhash_mnt(mnt); |
6750 | +} |
6751 | + |
6752 | +/* |
6753 | + * vfsmount lock must be held for write |
6754 | + */ |
6755 | +static void umount_mnt(struct mount *mnt) |
6756 | +{ |
6757 | + /* old mountpoint will be dropped when we can do that */ |
6758 | + mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint; |
6759 | + unhash_mnt(mnt); |
6760 | +} |
6761 | + |
6762 | +/* |
6763 | + * vfsmount lock must be held for write |
6764 | + */ |
6765 | void mnt_set_mountpoint(struct mount *mnt, |
6766 | struct mountpoint *mp, |
6767 | struct mount *child_mnt) |
6768 | @@ -1078,6 +1099,13 @@ static void mntput_no_expire(struct mount *mnt) |
6769 | rcu_read_unlock(); |
6770 | |
6771 | list_del(&mnt->mnt_instance); |
6772 | + |
6773 | + if (unlikely(!list_empty(&mnt->mnt_mounts))) { |
6774 | + struct mount *p, *tmp; |
6775 | + list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { |
6776 | + umount_mnt(p); |
6777 | + } |
6778 | + } |
6779 | unlock_mount_hash(); |
6780 | |
6781 | if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { |
6782 | @@ -1319,49 +1347,63 @@ static inline void namespace_lock(void) |
6783 | down_write(&namespace_sem); |
6784 | } |
6785 | |
6786 | +enum umount_tree_flags { |
6787 | + UMOUNT_SYNC = 1, |
6788 | + UMOUNT_PROPAGATE = 2, |
6789 | + UMOUNT_CONNECTED = 4, |
6790 | +}; |
6791 | /* |
6792 | * mount_lock must be held |
6793 | * namespace_sem must be held for write |
6794 | - * how = 0 => just this tree, don't propagate |
6795 | - * how = 1 => propagate; we know that nobody else has reference to any victims |
6796 | - * how = 2 => lazy umount |
6797 | */ |
6798 | -void umount_tree(struct mount *mnt, int how) |
6799 | +static void umount_tree(struct mount *mnt, enum umount_tree_flags how) |
6800 | { |
6801 | - HLIST_HEAD(tmp_list); |
6802 | + LIST_HEAD(tmp_list); |
6803 | struct mount *p; |
6804 | |
6805 | + if (how & UMOUNT_PROPAGATE) |
6806 | + propagate_mount_unlock(mnt); |
6807 | + |
6808 | + /* Gather the mounts to umount */ |
6809 | for (p = mnt; p; p = next_mnt(p, mnt)) { |
6810 | - hlist_del_init_rcu(&p->mnt_hash); |
6811 | - hlist_add_head(&p->mnt_hash, &tmp_list); |
6812 | + p->mnt.mnt_flags |= MNT_UMOUNT; |
6813 | + list_move(&p->mnt_list, &tmp_list); |
6814 | } |
6815 | |
6816 | - hlist_for_each_entry(p, &tmp_list, mnt_hash) |
6817 | + /* Hide the mounts from mnt_mounts */ |
6818 | + list_for_each_entry(p, &tmp_list, mnt_list) { |
6819 | list_del_init(&p->mnt_child); |
6820 | + } |
6821 | |
6822 | - if (how) |
6823 | + /* Add propogated mounts to the tmp_list */ |
6824 | + if (how & UMOUNT_PROPAGATE) |
6825 | propagate_umount(&tmp_list); |
6826 | |
6827 | - while (!hlist_empty(&tmp_list)) { |
6828 | - p = hlist_entry(tmp_list.first, struct mount, mnt_hash); |
6829 | - hlist_del_init_rcu(&p->mnt_hash); |
6830 | + while (!list_empty(&tmp_list)) { |
6831 | + bool disconnect; |
6832 | + p = list_first_entry(&tmp_list, struct mount, mnt_list); |
6833 | list_del_init(&p->mnt_expire); |
6834 | list_del_init(&p->mnt_list); |
6835 | __touch_mnt_namespace(p->mnt_ns); |
6836 | p->mnt_ns = NULL; |
6837 | - if (how < 2) |
6838 | + if (how & UMOUNT_SYNC) |
6839 | p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; |
6840 | |
6841 | - pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted); |
6842 | + disconnect = !(((how & UMOUNT_CONNECTED) && |
6843 | + mnt_has_parent(p) && |
6844 | + (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) || |
6845 | + IS_MNT_LOCKED_AND_LAZY(p)); |
6846 | + |
6847 | + pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, |
6848 | + disconnect ? &unmounted : NULL); |
6849 | if (mnt_has_parent(p)) { |
6850 | - hlist_del_init(&p->mnt_mp_list); |
6851 | - put_mountpoint(p->mnt_mp); |
6852 | mnt_add_count(p->mnt_parent, -1); |
6853 | - /* old mountpoint will be dropped when we can do that */ |
6854 | - p->mnt_ex_mountpoint = p->mnt_mountpoint; |
6855 | - p->mnt_mountpoint = p->mnt.mnt_root; |
6856 | - p->mnt_parent = p; |
6857 | - p->mnt_mp = NULL; |
6858 | + if (!disconnect) { |
6859 | + /* Don't forget about p */ |
6860 | + list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); |
6861 | + } else { |
6862 | + umount_mnt(p); |
6863 | + } |
6864 | } |
6865 | change_mnt_propagation(p, MS_PRIVATE); |
6866 | } |
6867 | @@ -1447,14 +1489,14 @@ static int do_umount(struct mount *mnt, int flags) |
6868 | |
6869 | if (flags & MNT_DETACH) { |
6870 | if (!list_empty(&mnt->mnt_list)) |
6871 | - umount_tree(mnt, 2); |
6872 | + umount_tree(mnt, UMOUNT_PROPAGATE); |
6873 | retval = 0; |
6874 | } else { |
6875 | shrink_submounts(mnt); |
6876 | retval = -EBUSY; |
6877 | if (!propagate_mount_busy(mnt, 2)) { |
6878 | if (!list_empty(&mnt->mnt_list)) |
6879 | - umount_tree(mnt, 1); |
6880 | + umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); |
6881 | retval = 0; |
6882 | } |
6883 | } |
6884 | @@ -1480,13 +1522,20 @@ void __detach_mounts(struct dentry *dentry) |
6885 | |
6886 | namespace_lock(); |
6887 | mp = lookup_mountpoint(dentry); |
6888 | - if (!mp) |
6889 | + if (IS_ERR_OR_NULL(mp)) |
6890 | goto out_unlock; |
6891 | |
6892 | lock_mount_hash(); |
6893 | while (!hlist_empty(&mp->m_list)) { |
6894 | mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); |
6895 | - umount_tree(mnt, 2); |
6896 | + if (mnt->mnt.mnt_flags & MNT_UMOUNT) { |
6897 | + struct mount *p, *tmp; |
6898 | + list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { |
6899 | + hlist_add_head(&p->mnt_umount.s_list, &unmounted); |
6900 | + umount_mnt(p); |
6901 | + } |
6902 | + } |
6903 | + else umount_tree(mnt, UMOUNT_CONNECTED); |
6904 | } |
6905 | unlock_mount_hash(); |
6906 | put_mountpoint(mp); |
6907 | @@ -1648,7 +1697,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, |
6908 | out: |
6909 | if (res) { |
6910 | lock_mount_hash(); |
6911 | - umount_tree(res, 0); |
6912 | + umount_tree(res, UMOUNT_SYNC); |
6913 | unlock_mount_hash(); |
6914 | } |
6915 | return q; |
6916 | @@ -1672,7 +1721,7 @@ void drop_collected_mounts(struct vfsmount *mnt) |
6917 | { |
6918 | namespace_lock(); |
6919 | lock_mount_hash(); |
6920 | - umount_tree(real_mount(mnt), 0); |
6921 | + umount_tree(real_mount(mnt), UMOUNT_SYNC); |
6922 | unlock_mount_hash(); |
6923 | namespace_unlock(); |
6924 | } |
6925 | @@ -1855,7 +1904,7 @@ static int attach_recursive_mnt(struct mount *source_mnt, |
6926 | out_cleanup_ids: |
6927 | while (!hlist_empty(&tree_list)) { |
6928 | child = hlist_entry(tree_list.first, struct mount, mnt_hash); |
6929 | - umount_tree(child, 0); |
6930 | + umount_tree(child, UMOUNT_SYNC); |
6931 | } |
6932 | unlock_mount_hash(); |
6933 | cleanup_group_ids(source_mnt, NULL); |
6934 | @@ -2035,7 +2084,7 @@ static int do_loopback(struct path *path, const char *old_name, |
6935 | err = graft_tree(mnt, parent, mp); |
6936 | if (err) { |
6937 | lock_mount_hash(); |
6938 | - umount_tree(mnt, 0); |
6939 | + umount_tree(mnt, UMOUNT_SYNC); |
6940 | unlock_mount_hash(); |
6941 | } |
6942 | out2: |
6943 | @@ -2406,7 +2455,7 @@ void mark_mounts_for_expiry(struct list_head *mounts) |
6944 | while (!list_empty(&graveyard)) { |
6945 | mnt = list_first_entry(&graveyard, struct mount, mnt_expire); |
6946 | touch_mnt_namespace(mnt->mnt_ns); |
6947 | - umount_tree(mnt, 1); |
6948 | + umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); |
6949 | } |
6950 | unlock_mount_hash(); |
6951 | namespace_unlock(); |
6952 | @@ -2477,7 +2526,7 @@ static void shrink_submounts(struct mount *mnt) |
6953 | m = list_first_entry(&graveyard, struct mount, |
6954 | mnt_expire); |
6955 | touch_mnt_namespace(m->mnt_ns); |
6956 | - umount_tree(m, 1); |
6957 | + umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC); |
6958 | } |
6959 | } |
6960 | } |
6961 | diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c |
6962 | index 351be9205bf8..8d129bb7355a 100644 |
6963 | --- a/fs/nfs/callback.c |
6964 | +++ b/fs/nfs/callback.c |
6965 | @@ -128,7 +128,7 @@ nfs41_callback_svc(void *vrqstp) |
6966 | if (try_to_freeze()) |
6967 | continue; |
6968 | |
6969 | - prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE); |
6970 | + prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); |
6971 | spin_lock_bh(&serv->sv_cb_lock); |
6972 | if (!list_empty(&serv->sv_cb_list)) { |
6973 | req = list_first_entry(&serv->sv_cb_list, |
6974 | @@ -142,10 +142,10 @@ nfs41_callback_svc(void *vrqstp) |
6975 | error); |
6976 | } else { |
6977 | spin_unlock_bh(&serv->sv_cb_lock); |
6978 | - /* schedule_timeout to game the hung task watchdog */ |
6979 | - schedule_timeout(60 * HZ); |
6980 | + schedule(); |
6981 | finish_wait(&serv->sv_cb_waitq, &wq); |
6982 | } |
6983 | + flush_signals(current); |
6984 | } |
6985 | return 0; |
6986 | } |
6987 | diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c |
6988 | index e907c8cf732e..ab21ef16a11a 100644 |
6989 | --- a/fs/nfs/direct.c |
6990 | +++ b/fs/nfs/direct.c |
6991 | @@ -129,22 +129,25 @@ nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr) |
6992 | int i; |
6993 | ssize_t count; |
6994 | |
6995 | - WARN_ON_ONCE(hdr->pgio_mirror_idx >= dreq->mirror_count); |
6996 | - |
6997 | - count = dreq->mirrors[hdr->pgio_mirror_idx].count; |
6998 | - if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) { |
6999 | - count = hdr->io_start + hdr->good_bytes - dreq->io_start; |
7000 | - dreq->mirrors[hdr->pgio_mirror_idx].count = count; |
7001 | - } |
7002 | - |
7003 | - /* update the dreq->count by finding the minimum agreed count from all |
7004 | - * mirrors */ |
7005 | - count = dreq->mirrors[0].count; |
7006 | + if (dreq->mirror_count == 1) { |
7007 | + dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes; |
7008 | + dreq->count += hdr->good_bytes; |
7009 | + } else { |
7010 | + /* mirrored writes */ |
7011 | + count = dreq->mirrors[hdr->pgio_mirror_idx].count; |
7012 | + if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) { |
7013 | + count = hdr->io_start + hdr->good_bytes - dreq->io_start; |
7014 | + dreq->mirrors[hdr->pgio_mirror_idx].count = count; |
7015 | + } |
7016 | + /* update the dreq->count by finding the minimum agreed count from all |
7017 | + * mirrors */ |
7018 | + count = dreq->mirrors[0].count; |
7019 | |
7020 | - for (i = 1; i < dreq->mirror_count; i++) |
7021 | - count = min(count, dreq->mirrors[i].count); |
7022 | + for (i = 1; i < dreq->mirror_count; i++) |
7023 | + count = min(count, dreq->mirrors[i].count); |
7024 | |
7025 | - dreq->count = count; |
7026 | + dreq->count = count; |
7027 | + } |
7028 | } |
7029 | |
7030 | /* |
7031 | diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c |
7032 | index 5c399ec41079..d494ea2d66a9 100644 |
7033 | --- a/fs/nfs/nfs4xdr.c |
7034 | +++ b/fs/nfs/nfs4xdr.c |
7035 | @@ -7365,6 +7365,11 @@ nfs4_stat_to_errno(int stat) |
7036 | .p_name = #proc, \ |
7037 | } |
7038 | |
7039 | +#define STUB(proc) \ |
7040 | +[NFSPROC4_CLNT_##proc] = { \ |
7041 | + .p_name = #proc, \ |
7042 | +} |
7043 | + |
7044 | struct rpc_procinfo nfs4_procedures[] = { |
7045 | PROC(READ, enc_read, dec_read), |
7046 | PROC(WRITE, enc_write, dec_write), |
7047 | @@ -7417,6 +7422,7 @@ struct rpc_procinfo nfs4_procedures[] = { |
7048 | PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name), |
7049 | PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid), |
7050 | PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid), |
7051 | + STUB(GETDEVICELIST), |
7052 | PROC(BIND_CONN_TO_SESSION, |
7053 | enc_bind_conn_to_session, dec_bind_conn_to_session), |
7054 | PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid), |
7055 | diff --git a/fs/nfs/read.c b/fs/nfs/read.c |
7056 | index 568ecf0a880f..848d8b1db4ce 100644 |
7057 | --- a/fs/nfs/read.c |
7058 | +++ b/fs/nfs/read.c |
7059 | @@ -284,7 +284,7 @@ int nfs_readpage(struct file *file, struct page *page) |
7060 | dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", |
7061 | page, PAGE_CACHE_SIZE, page_file_index(page)); |
7062 | nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); |
7063 | - nfs_inc_stats(inode, NFSIOS_READPAGES); |
7064 | + nfs_add_stats(inode, NFSIOS_READPAGES, 1); |
7065 | |
7066 | /* |
7067 | * Try to flush any pending writes to the file.. |
7068 | diff --git a/fs/nfs/write.c b/fs/nfs/write.c |
7069 | index 849ed784d6ac..41b3f1096d69 100644 |
7070 | --- a/fs/nfs/write.c |
7071 | +++ b/fs/nfs/write.c |
7072 | @@ -580,7 +580,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st |
7073 | int ret; |
7074 | |
7075 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); |
7076 | - nfs_inc_stats(inode, NFSIOS_WRITEPAGES); |
7077 | + nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); |
7078 | |
7079 | nfs_pageio_cond_complete(pgio, page_file_index(page)); |
7080 | ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); |
7081 | diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c |
7082 | index 92b9d97aff4f..5416968b3fb3 100644 |
7083 | --- a/fs/nfsd/nfs4proc.c |
7084 | +++ b/fs/nfsd/nfs4proc.c |
7085 | @@ -1030,6 +1030,8 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
7086 | dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n"); |
7087 | return status; |
7088 | } |
7089 | + if (!file) |
7090 | + return nfserr_bad_stateid; |
7091 | |
7092 | status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file, |
7093 | fallocate->falloc_offset, |
7094 | @@ -1069,6 +1071,8 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
7095 | dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n"); |
7096 | return status; |
7097 | } |
7098 | + if (!file) |
7099 | + return nfserr_bad_stateid; |
7100 | |
7101 | switch (seek->seek_whence) { |
7102 | case NFS4_CONTENT_DATA: |
7103 | diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
7104 | index 8ba1d888f1e6..ee1cccdb083a 100644 |
7105 | --- a/fs/nfsd/nfs4state.c |
7106 | +++ b/fs/nfsd/nfs4state.c |
7107 | @@ -1139,7 +1139,7 @@ hash_sessionid(struct nfs4_sessionid *sessionid) |
7108 | return sid->sequence % SESSION_HASH_SIZE; |
7109 | } |
7110 | |
7111 | -#ifdef NFSD_DEBUG |
7112 | +#ifdef CONFIG_SUNRPC_DEBUG |
7113 | static inline void |
7114 | dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) |
7115 | { |
7116 | diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c |
7117 | index 5fb7e78169a6..5b33ce1db616 100644 |
7118 | --- a/fs/nfsd/nfs4xdr.c |
7119 | +++ b/fs/nfsd/nfs4xdr.c |
7120 | @@ -3422,6 +3422,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, |
7121 | unsigned long maxcount; |
7122 | struct xdr_stream *xdr = &resp->xdr; |
7123 | struct file *file = read->rd_filp; |
7124 | + struct svc_fh *fhp = read->rd_fhp; |
7125 | int starting_len = xdr->buf->len; |
7126 | struct raparms *ra; |
7127 | __be32 *p; |
7128 | @@ -3445,12 +3446,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, |
7129 | maxcount = min_t(unsigned long, maxcount, (xdr->buf->buflen - xdr->buf->len)); |
7130 | maxcount = min_t(unsigned long, maxcount, read->rd_length); |
7131 | |
7132 | - if (!read->rd_filp) { |
7133 | + if (read->rd_filp) |
7134 | + err = nfsd_permission(resp->rqstp, fhp->fh_export, |
7135 | + fhp->fh_dentry, |
7136 | + NFSD_MAY_READ|NFSD_MAY_OWNER_OVERRIDE); |
7137 | + else |
7138 | err = nfsd_get_tmp_read_open(resp->rqstp, read->rd_fhp, |
7139 | &file, &ra); |
7140 | - if (err) |
7141 | - goto err_truncate; |
7142 | - } |
7143 | + if (err) |
7144 | + goto err_truncate; |
7145 | |
7146 | if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) |
7147 | err = nfsd4_encode_splice_read(resp, read, file, maxcount); |
7148 | diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c |
7149 | index aa47d75ddb26..9690cb4dd588 100644 |
7150 | --- a/fs/nfsd/nfsctl.c |
7151 | +++ b/fs/nfsd/nfsctl.c |
7152 | @@ -1250,15 +1250,15 @@ static int __init init_nfsd(void) |
7153 | int retval; |
7154 | printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n"); |
7155 | |
7156 | - retval = register_cld_notifier(); |
7157 | - if (retval) |
7158 | - return retval; |
7159 | retval = register_pernet_subsys(&nfsd_net_ops); |
7160 | if (retval < 0) |
7161 | - goto out_unregister_notifier; |
7162 | - retval = nfsd4_init_slabs(); |
7163 | + return retval; |
7164 | + retval = register_cld_notifier(); |
7165 | if (retval) |
7166 | goto out_unregister_pernet; |
7167 | + retval = nfsd4_init_slabs(); |
7168 | + if (retval) |
7169 | + goto out_unregister_notifier; |
7170 | retval = nfsd4_init_pnfs(); |
7171 | if (retval) |
7172 | goto out_free_slabs; |
7173 | @@ -1290,10 +1290,10 @@ out_exit_pnfs: |
7174 | nfsd4_exit_pnfs(); |
7175 | out_free_slabs: |
7176 | nfsd4_free_slabs(); |
7177 | -out_unregister_pernet: |
7178 | - unregister_pernet_subsys(&nfsd_net_ops); |
7179 | out_unregister_notifier: |
7180 | unregister_cld_notifier(); |
7181 | +out_unregister_pernet: |
7182 | + unregister_pernet_subsys(&nfsd_net_ops); |
7183 | return retval; |
7184 | } |
7185 | |
7186 | @@ -1308,8 +1308,8 @@ static void __exit exit_nfsd(void) |
7187 | nfsd4_exit_pnfs(); |
7188 | nfsd_fault_inject_cleanup(); |
7189 | unregister_filesystem(&nfsd_fs_type); |
7190 | - unregister_pernet_subsys(&nfsd_net_ops); |
7191 | unregister_cld_notifier(); |
7192 | + unregister_pernet_subsys(&nfsd_net_ops); |
7193 | } |
7194 | |
7195 | MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>"); |
7196 | diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h |
7197 | index 565c4da1a9eb..cf980523898b 100644 |
7198 | --- a/fs/nfsd/nfsd.h |
7199 | +++ b/fs/nfsd/nfsd.h |
7200 | @@ -24,7 +24,7 @@ |
7201 | #include "export.h" |
7202 | |
7203 | #undef ifdebug |
7204 | -#ifdef NFSD_DEBUG |
7205 | +#ifdef CONFIG_SUNRPC_DEBUG |
7206 | # define ifdebug(flag) if (nfsd_debug & NFSDDBG_##flag) |
7207 | #else |
7208 | # define ifdebug(flag) if (0) |
7209 | diff --git a/fs/open.c b/fs/open.c |
7210 | index 33f9cbf2610b..44a3be145bfe 100644 |
7211 | --- a/fs/open.c |
7212 | +++ b/fs/open.c |
7213 | @@ -570,6 +570,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group) |
7214 | uid = make_kuid(current_user_ns(), user); |
7215 | gid = make_kgid(current_user_ns(), group); |
7216 | |
7217 | +retry_deleg: |
7218 | newattrs.ia_valid = ATTR_CTIME; |
7219 | if (user != (uid_t) -1) { |
7220 | if (!uid_valid(uid)) |
7221 | @@ -586,7 +587,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group) |
7222 | if (!S_ISDIR(inode->i_mode)) |
7223 | newattrs.ia_valid |= |
7224 | ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV; |
7225 | -retry_deleg: |
7226 | mutex_lock(&inode->i_mutex); |
7227 | error = security_path_chown(path, uid, gid); |
7228 | if (!error) |
7229 | diff --git a/fs/pnode.c b/fs/pnode.c |
7230 | index 260ac8f898a4..6367e1e435c6 100644 |
7231 | --- a/fs/pnode.c |
7232 | +++ b/fs/pnode.c |
7233 | @@ -362,6 +362,46 @@ int propagate_mount_busy(struct mount *mnt, int refcnt) |
7234 | } |
7235 | |
7236 | /* |
7237 | + * Clear MNT_LOCKED when it can be shown to be safe. |
7238 | + * |
7239 | + * mount_lock lock must be held for write |
7240 | + */ |
7241 | +void propagate_mount_unlock(struct mount *mnt) |
7242 | +{ |
7243 | + struct mount *parent = mnt->mnt_parent; |
7244 | + struct mount *m, *child; |
7245 | + |
7246 | + BUG_ON(parent == mnt); |
7247 | + |
7248 | + for (m = propagation_next(parent, parent); m; |
7249 | + m = propagation_next(m, parent)) { |
7250 | + child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); |
7251 | + if (child) |
7252 | + child->mnt.mnt_flags &= ~MNT_LOCKED; |
7253 | + } |
7254 | +} |
7255 | + |
7256 | +/* |
7257 | + * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted. |
7258 | + */ |
7259 | +static void mark_umount_candidates(struct mount *mnt) |
7260 | +{ |
7261 | + struct mount *parent = mnt->mnt_parent; |
7262 | + struct mount *m; |
7263 | + |
7264 | + BUG_ON(parent == mnt); |
7265 | + |
7266 | + for (m = propagation_next(parent, parent); m; |
7267 | + m = propagation_next(m, parent)) { |
7268 | + struct mount *child = __lookup_mnt_last(&m->mnt, |
7269 | + mnt->mnt_mountpoint); |
7270 | + if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) { |
7271 | + SET_MNT_MARK(child); |
7272 | + } |
7273 | + } |
7274 | +} |
7275 | + |
7276 | +/* |
7277 | * NOTE: unmounting 'mnt' naturally propagates to all other mounts its |
7278 | * parent propagates to. |
7279 | */ |
7280 | @@ -378,13 +418,16 @@ static void __propagate_umount(struct mount *mnt) |
7281 | struct mount *child = __lookup_mnt_last(&m->mnt, |
7282 | mnt->mnt_mountpoint); |
7283 | /* |
7284 | - * umount the child only if the child has no |
7285 | - * other children |
7286 | + * umount the child only if the child has no children |
7287 | + * and the child is marked safe to unmount. |
7288 | */ |
7289 | - if (child && list_empty(&child->mnt_mounts)) { |
7290 | + if (!child || !IS_MNT_MARKED(child)) |
7291 | + continue; |
7292 | + CLEAR_MNT_MARK(child); |
7293 | + if (list_empty(&child->mnt_mounts)) { |
7294 | list_del_init(&child->mnt_child); |
7295 | - hlist_del_init_rcu(&child->mnt_hash); |
7296 | - hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash); |
7297 | + child->mnt.mnt_flags |= MNT_UMOUNT; |
7298 | + list_move_tail(&child->mnt_list, &mnt->mnt_list); |
7299 | } |
7300 | } |
7301 | } |
7302 | @@ -396,11 +439,14 @@ static void __propagate_umount(struct mount *mnt) |
7303 | * |
7304 | * vfsmount lock must be held for write |
7305 | */ |
7306 | -int propagate_umount(struct hlist_head *list) |
7307 | +int propagate_umount(struct list_head *list) |
7308 | { |
7309 | struct mount *mnt; |
7310 | |
7311 | - hlist_for_each_entry(mnt, list, mnt_hash) |
7312 | + list_for_each_entry_reverse(mnt, list, mnt_list) |
7313 | + mark_umount_candidates(mnt); |
7314 | + |
7315 | + list_for_each_entry(mnt, list, mnt_list) |
7316 | __propagate_umount(mnt); |
7317 | return 0; |
7318 | } |
7319 | diff --git a/fs/pnode.h b/fs/pnode.h |
7320 | index 4a246358b031..7114ce6e6b9e 100644 |
7321 | --- a/fs/pnode.h |
7322 | +++ b/fs/pnode.h |
7323 | @@ -19,6 +19,9 @@ |
7324 | #define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED) |
7325 | #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) |
7326 | #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) |
7327 | +#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED) |
7328 | +#define IS_MNT_LOCKED_AND_LAZY(m) \ |
7329 | + (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED) |
7330 | |
7331 | #define CL_EXPIRE 0x01 |
7332 | #define CL_SLAVE 0x02 |
7333 | @@ -40,14 +43,14 @@ static inline void set_mnt_shared(struct mount *mnt) |
7334 | void change_mnt_propagation(struct mount *, int); |
7335 | int propagate_mnt(struct mount *, struct mountpoint *, struct mount *, |
7336 | struct hlist_head *); |
7337 | -int propagate_umount(struct hlist_head *); |
7338 | +int propagate_umount(struct list_head *); |
7339 | int propagate_mount_busy(struct mount *, int); |
7340 | +void propagate_mount_unlock(struct mount *); |
7341 | void mnt_release_group_id(struct mount *); |
7342 | int get_dominating_id(struct mount *mnt, const struct path *root); |
7343 | unsigned int mnt_get_count(struct mount *mnt); |
7344 | void mnt_set_mountpoint(struct mount *, struct mountpoint *, |
7345 | struct mount *); |
7346 | -void umount_tree(struct mount *, int); |
7347 | struct mount *copy_tree(struct mount *, struct dentry *, int); |
7348 | bool is_path_reachable(struct mount *, struct dentry *, |
7349 | const struct path *root); |
7350 | diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h |
7351 | index b034f1068dfe..0d5852557f1c 100644 |
7352 | --- a/include/acpi/actypes.h |
7353 | +++ b/include/acpi/actypes.h |
7354 | @@ -199,9 +199,29 @@ typedef int s32; |
7355 | typedef s32 acpi_native_int; |
7356 | |
7357 | typedef u32 acpi_size; |
7358 | + |
7359 | +#ifdef ACPI_32BIT_PHYSICAL_ADDRESS |
7360 | + |
7361 | +/* |
7362 | + * OSPMs can define this to shrink the size of the structures for 32-bit |
7363 | + * none PAE environment. ASL compiler may always define this to generate |
7364 | + * 32-bit OSPM compliant tables. |
7365 | + */ |
7366 | typedef u32 acpi_io_address; |
7367 | typedef u32 acpi_physical_address; |
7368 | |
7369 | +#else /* ACPI_32BIT_PHYSICAL_ADDRESS */ |
7370 | + |
7371 | +/* |
7372 | + * It is reported that, after some calculations, the physical addresses can |
7373 | + * wrap over the 32-bit boundary on 32-bit PAE environment. |
7374 | + * https://bugzilla.kernel.org/show_bug.cgi?id=87971 |
7375 | + */ |
7376 | +typedef u64 acpi_io_address; |
7377 | +typedef u64 acpi_physical_address; |
7378 | + |
7379 | +#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */ |
7380 | + |
7381 | #define ACPI_MAX_PTR ACPI_UINT32_MAX |
7382 | #define ACPI_SIZE_MAX ACPI_UINT32_MAX |
7383 | |
7384 | @@ -736,10 +756,6 @@ typedef u32 acpi_event_status; |
7385 | #define ACPI_GPE_ENABLE 0 |
7386 | #define ACPI_GPE_DISABLE 1 |
7387 | #define ACPI_GPE_CONDITIONAL_ENABLE 2 |
7388 | -#define ACPI_GPE_SAVE_MASK 4 |
7389 | - |
7390 | -#define ACPI_GPE_ENABLE_SAVE (ACPI_GPE_ENABLE | ACPI_GPE_SAVE_MASK) |
7391 | -#define ACPI_GPE_DISABLE_SAVE (ACPI_GPE_DISABLE | ACPI_GPE_SAVE_MASK) |
7392 | |
7393 | /* |
7394 | * GPE info flags - Per GPE |
7395 | diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h |
7396 | index ad74dc51d5b7..ecdf9405dd3a 100644 |
7397 | --- a/include/acpi/platform/acenv.h |
7398 | +++ b/include/acpi/platform/acenv.h |
7399 | @@ -76,6 +76,7 @@ |
7400 | #define ACPI_LARGE_NAMESPACE_NODE |
7401 | #define ACPI_DATA_TABLE_DISASSEMBLY |
7402 | #define ACPI_SINGLE_THREADED |
7403 | +#define ACPI_32BIT_PHYSICAL_ADDRESS |
7404 | #endif |
7405 | |
7406 | /* acpi_exec configuration. Multithreaded with full AML debugger */ |
7407 | diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h |
7408 | index ae2eb17a1658..a2156090563f 100644 |
7409 | --- a/include/dt-bindings/clock/tegra124-car-common.h |
7410 | +++ b/include/dt-bindings/clock/tegra124-car-common.h |
7411 | @@ -297,7 +297,7 @@ |
7412 | #define TEGRA124_CLK_PLL_C4 270 |
7413 | #define TEGRA124_CLK_PLL_DP 271 |
7414 | #define TEGRA124_CLK_PLL_E_MUX 272 |
7415 | -#define TEGRA124_CLK_PLLD_DSI 273 |
7416 | +#define TEGRA124_CLK_PLL_D_DSI_OUT 273 |
7417 | /* 274 */ |
7418 | /* 275 */ |
7419 | /* 276 */ |
7420 | diff --git a/include/linux/bpf.h b/include/linux/bpf.h |
7421 | index bbfceb756452..33b52fb0e20f 100644 |
7422 | --- a/include/linux/bpf.h |
7423 | +++ b/include/linux/bpf.h |
7424 | @@ -48,7 +48,7 @@ struct bpf_map *bpf_map_get(struct fd f); |
7425 | |
7426 | /* function argument constraints */ |
7427 | enum bpf_arg_type { |
7428 | - ARG_ANYTHING = 0, /* any argument is ok */ |
7429 | + ARG_DONTCARE = 0, /* unused argument in helper function */ |
7430 | |
7431 | /* the following constraints used to prototype |
7432 | * bpf_map_lookup/update/delete_elem() functions |
7433 | @@ -62,6 +62,8 @@ enum bpf_arg_type { |
7434 | */ |
7435 | ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ |
7436 | ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ |
7437 | + |
7438 | + ARG_ANYTHING, /* any (initialized) argument is ok */ |
7439 | }; |
7440 | |
7441 | /* type of values returned from helper functions */ |
7442 | diff --git a/include/linux/mount.h b/include/linux/mount.h |
7443 | index c2c561dc0114..564beeec5d83 100644 |
7444 | --- a/include/linux/mount.h |
7445 | +++ b/include/linux/mount.h |
7446 | @@ -61,6 +61,7 @@ struct mnt_namespace; |
7447 | #define MNT_DOOMED 0x1000000 |
7448 | #define MNT_SYNC_UMOUNT 0x2000000 |
7449 | #define MNT_MARKED 0x4000000 |
7450 | +#define MNT_UMOUNT 0x8000000 |
7451 | |
7452 | struct vfsmount { |
7453 | struct dentry *mnt_root; /* root of the mounted tree */ |
7454 | diff --git a/include/linux/sched.h b/include/linux/sched.h |
7455 | index a419b65770d6..51348f77e431 100644 |
7456 | --- a/include/linux/sched.h |
7457 | +++ b/include/linux/sched.h |
7458 | @@ -176,6 +176,14 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); |
7459 | extern void calc_global_load(unsigned long ticks); |
7460 | extern void update_cpu_load_nohz(void); |
7461 | |
7462 | +/* Notifier for when a task gets migrated to a new CPU */ |
7463 | +struct task_migration_notifier { |
7464 | + struct task_struct *task; |
7465 | + int from_cpu; |
7466 | + int to_cpu; |
7467 | +}; |
7468 | +extern void register_task_migration_notifier(struct notifier_block *n); |
7469 | + |
7470 | extern unsigned long get_parent_ip(unsigned long addr); |
7471 | |
7472 | extern void dump_cpu_task(int cpu); |
7473 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
7474 | index f54d6659713a..bdccc4b46f57 100644 |
7475 | --- a/include/linux/skbuff.h |
7476 | +++ b/include/linux/skbuff.h |
7477 | @@ -769,6 +769,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, |
7478 | |
7479 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, |
7480 | int node); |
7481 | +struct sk_buff *__build_skb(void *data, unsigned int frag_size); |
7482 | struct sk_buff *build_skb(void *data, unsigned int frag_size); |
7483 | static inline struct sk_buff *alloc_skb(unsigned int size, |
7484 | gfp_t priority) |
7485 | @@ -3013,6 +3014,18 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, |
7486 | */ |
7487 | #define CHECKSUM_BREAK 76 |
7488 | |
7489 | +/* Unset checksum-complete |
7490 | + * |
7491 | + * Unset checksum complete can be done when packet is being modified |
7492 | + * (uncompressed for instance) and checksum-complete value is |
7493 | + * invalidated. |
7494 | + */ |
7495 | +static inline void skb_checksum_complete_unset(struct sk_buff *skb) |
7496 | +{ |
7497 | + if (skb->ip_summed == CHECKSUM_COMPLETE) |
7498 | + skb->ip_summed = CHECKSUM_NONE; |
7499 | +} |
7500 | + |
7501 | /* Validate (init) checksum based on checksum complete. |
7502 | * |
7503 | * Return values: |
7504 | diff --git a/include/linux/usb.h b/include/linux/usb.h |
7505 | index 7ee1b5c3b4cb..447fe29b55b4 100644 |
7506 | --- a/include/linux/usb.h |
7507 | +++ b/include/linux/usb.h |
7508 | @@ -205,6 +205,32 @@ void usb_put_intf(struct usb_interface *intf); |
7509 | #define USB_MAXINTERFACES 32 |
7510 | #define USB_MAXIADS (USB_MAXINTERFACES/2) |
7511 | |
7512 | +/* |
7513 | + * USB Resume Timer: Every Host controller driver should drive the resume |
7514 | + * signalling on the bus for the amount of time defined by this macro. |
7515 | + * |
7516 | + * That way we will have a 'stable' behavior among all HCDs supported by Linux. |
7517 | + * |
7518 | + * Note that the USB Specification states we should drive resume for *at least* |
7519 | + * 20 ms, but it doesn't give an upper bound. This creates two possible |
7520 | + * situations which we want to avoid: |
7521 | + * |
7522 | + * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes |
7523 | + * us to fail USB Electrical Tests, thus failing Certification |
7524 | + * |
7525 | + * (b) Some (many) devices actually need more than 20 ms of resume signalling, |
7526 | + * and while we can argue that's against the USB Specification, we don't have |
7527 | + * control over which devices a certification laboratory will be using for |
7528 | + * certification. If CertLab uses a device which was tested against Windows and |
7529 | + * that happens to have relaxed resume signalling rules, we might fall into |
7530 | + * situations where we fail interoperability and electrical tests. |
7531 | + * |
7532 | + * In order to avoid both conditions, we're using a 40 ms resume timeout, which |
7533 | + * should cope with both LPJ calibration errors and devices not following every |
7534 | + * detail of the USB Specification. |
7535 | + */ |
7536 | +#define USB_RESUME_TIMEOUT 40 /* ms */ |
7537 | + |
7538 | /** |
7539 | * struct usb_interface_cache - long-term representation of a device interface |
7540 | * @num_altsetting: number of altsettings defined. |
7541 | diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h |
7542 | index d3583d3ee193..dd0f3abde75d 100644 |
7543 | --- a/include/target/iscsi/iscsi_target_core.h |
7544 | +++ b/include/target/iscsi/iscsi_target_core.h |
7545 | @@ -602,6 +602,11 @@ struct iscsi_conn { |
7546 | struct iscsi_session *sess; |
7547 | /* Pointer to thread_set in use for this conn's threads */ |
7548 | struct iscsi_thread_set *thread_set; |
7549 | + int bitmap_id; |
7550 | + int rx_thread_active; |
7551 | + struct task_struct *rx_thread; |
7552 | + int tx_thread_active; |
7553 | + struct task_struct *tx_thread; |
7554 | /* list_head for session connection list */ |
7555 | struct list_head conn_list; |
7556 | } ____cacheline_aligned; |
7557 | @@ -871,10 +876,12 @@ struct iscsit_global { |
7558 | /* Unique identifier used for the authentication daemon */ |
7559 | u32 auth_id; |
7560 | u32 inactive_ts; |
7561 | +#define ISCSIT_BITMAP_BITS 262144 |
7562 | /* Thread Set bitmap count */ |
7563 | int ts_bitmap_count; |
7564 | /* Thread Set bitmap pointer */ |
7565 | unsigned long *ts_bitmap; |
7566 | + spinlock_t ts_bitmap_lock; |
7567 | /* Used for iSCSI discovery session authentication */ |
7568 | struct iscsi_node_acl discovery_acl; |
7569 | struct iscsi_portal_group *discovery_tpg; |
7570 | diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h |
7571 | index 672150b6aaf5..985ca4c907fe 100644 |
7572 | --- a/include/target/target_core_base.h |
7573 | +++ b/include/target/target_core_base.h |
7574 | @@ -524,7 +524,7 @@ struct se_cmd { |
7575 | sense_reason_t (*execute_cmd)(struct se_cmd *); |
7576 | sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *, |
7577 | u32, enum dma_data_direction); |
7578 | - sense_reason_t (*transport_complete_callback)(struct se_cmd *); |
7579 | + sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); |
7580 | |
7581 | unsigned char *t_task_cdb; |
7582 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; |
7583 | diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h |
7584 | index 0bf130a1c58d..28ec6c9c421a 100644 |
7585 | --- a/include/uapi/linux/nfsd/debug.h |
7586 | +++ b/include/uapi/linux/nfsd/debug.h |
7587 | @@ -12,14 +12,6 @@ |
7588 | #include <linux/sunrpc/debug.h> |
7589 | |
7590 | /* |
7591 | - * Enable debugging for nfsd. |
7592 | - * Requires RPC_DEBUG. |
7593 | - */ |
7594 | -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
7595 | -# define NFSD_DEBUG 1 |
7596 | -#endif |
7597 | - |
7598 | -/* |
7599 | * knfsd debug flags |
7600 | */ |
7601 | #define NFSDDBG_SOCK 0x0001 |
7602 | diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h |
7603 | index a20e4a3a8b15..847a0a2b399c 100644 |
7604 | --- a/include/video/samsung_fimd.h |
7605 | +++ b/include/video/samsung_fimd.h |
7606 | @@ -436,6 +436,12 @@ |
7607 | #define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0) |
7608 | #define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0) |
7609 | |
7610 | +/* Display port clock control */ |
7611 | +#define DP_MIE_CLKCON 0x27c |
7612 | +#define DP_MIE_CLK_DISABLE 0x0 |
7613 | +#define DP_MIE_CLK_DP_ENABLE 0x2 |
7614 | +#define DP_MIE_CLK_MIE_ENABLE 0x3 |
7615 | + |
7616 | /* Notes on per-window bpp settings |
7617 | * |
7618 | * Value Win0 Win1 Win2 Win3 Win 4 |
7619 | diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c |
7620 | index 36508e69e92a..5d8ea3d8a897 100644 |
7621 | --- a/kernel/bpf/verifier.c |
7622 | +++ b/kernel/bpf/verifier.c |
7623 | @@ -755,7 +755,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno, |
7624 | enum bpf_reg_type expected_type; |
7625 | int err = 0; |
7626 | |
7627 | - if (arg_type == ARG_ANYTHING) |
7628 | + if (arg_type == ARG_DONTCARE) |
7629 | return 0; |
7630 | |
7631 | if (reg->type == NOT_INIT) { |
7632 | @@ -763,6 +763,9 @@ static int check_func_arg(struct verifier_env *env, u32 regno, |
7633 | return -EACCES; |
7634 | } |
7635 | |
7636 | + if (arg_type == ARG_ANYTHING) |
7637 | + return 0; |
7638 | + |
7639 | if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY || |
7640 | arg_type == ARG_PTR_TO_MAP_VALUE) { |
7641 | expected_type = PTR_TO_STACK; |
7642 | diff --git a/kernel/ptrace.c b/kernel/ptrace.c |
7643 | index 227fec36b12a..9a34bd80a745 100644 |
7644 | --- a/kernel/ptrace.c |
7645 | +++ b/kernel/ptrace.c |
7646 | @@ -697,6 +697,8 @@ static int ptrace_peek_siginfo(struct task_struct *child, |
7647 | static int ptrace_resume(struct task_struct *child, long request, |
7648 | unsigned long data) |
7649 | { |
7650 | + bool need_siglock; |
7651 | + |
7652 | if (!valid_signal(data)) |
7653 | return -EIO; |
7654 | |
7655 | @@ -724,8 +726,26 @@ static int ptrace_resume(struct task_struct *child, long request, |
7656 | user_disable_single_step(child); |
7657 | } |
7658 | |
7659 | + /* |
7660 | + * Change ->exit_code and ->state under siglock to avoid the race |
7661 | + * with wait_task_stopped() in between; a non-zero ->exit_code will |
7662 | + * wrongly look like another report from tracee. |
7663 | + * |
7664 | + * Note that we need siglock even if ->exit_code == data and/or this |
7665 | + * status was not reported yet, the new status must not be cleared by |
7666 | + * wait_task_stopped() after resume. |
7667 | + * |
7668 | + * If data == 0 we do not care if wait_task_stopped() reports the old |
7669 | + * status and clears the code too; this can't race with the tracee, it |
7670 | + * takes siglock after resume. |
7671 | + */ |
7672 | + need_siglock = data && !thread_group_empty(current); |
7673 | + if (need_siglock) |
7674 | + spin_lock_irq(&child->sighand->siglock); |
7675 | child->exit_code = data; |
7676 | wake_up_state(child, __TASK_TRACED); |
7677 | + if (need_siglock) |
7678 | + spin_unlock_irq(&child->sighand->siglock); |
7679 | |
7680 | return 0; |
7681 | } |
7682 | diff --git a/kernel/sched/core.c b/kernel/sched/core.c |
7683 | index 62671f53202a..3d5f6f6d14c2 100644 |
7684 | --- a/kernel/sched/core.c |
7685 | +++ b/kernel/sched/core.c |
7686 | @@ -996,6 +996,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) |
7687 | rq_clock_skip_update(rq, true); |
7688 | } |
7689 | |
7690 | +static ATOMIC_NOTIFIER_HEAD(task_migration_notifier); |
7691 | + |
7692 | +void register_task_migration_notifier(struct notifier_block *n) |
7693 | +{ |
7694 | + atomic_notifier_chain_register(&task_migration_notifier, n); |
7695 | +} |
7696 | + |
7697 | #ifdef CONFIG_SMP |
7698 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
7699 | { |
7700 | @@ -1026,10 +1033,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
7701 | trace_sched_migrate_task(p, new_cpu); |
7702 | |
7703 | if (task_cpu(p) != new_cpu) { |
7704 | + struct task_migration_notifier tmn; |
7705 | + |
7706 | if (p->sched_class->migrate_task_rq) |
7707 | p->sched_class->migrate_task_rq(p, new_cpu); |
7708 | p->se.nr_migrations++; |
7709 | perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); |
7710 | + |
7711 | + tmn.task = p; |
7712 | + tmn.from_cpu = task_cpu(p); |
7713 | + tmn.to_cpu = new_cpu; |
7714 | + |
7715 | + atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn); |
7716 | } |
7717 | |
7718 | __set_task_cpu(p, new_cpu); |
7719 | diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c |
7720 | index 3fa8fa6d9403..f670cbb17f5d 100644 |
7721 | --- a/kernel/sched/deadline.c |
7722 | +++ b/kernel/sched/deadline.c |
7723 | @@ -514,7 +514,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) |
7724 | unsigned long flags; |
7725 | struct rq *rq; |
7726 | |
7727 | - rq = task_rq_lock(current, &flags); |
7728 | + rq = task_rq_lock(p, &flags); |
7729 | |
7730 | /* |
7731 | * We need to take care of several possible races here: |
7732 | @@ -569,7 +569,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) |
7733 | push_dl_task(rq); |
7734 | #endif |
7735 | unlock: |
7736 | - task_rq_unlock(rq, current, &flags); |
7737 | + task_rq_unlock(rq, p, &flags); |
7738 | |
7739 | return HRTIMER_NORESTART; |
7740 | } |
7741 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c |
7742 | index 5040d44fe5a3..922048a0f7ea 100644 |
7743 | --- a/kernel/trace/ring_buffer.c |
7744 | +++ b/kernel/trace/ring_buffer.c |
7745 | @@ -2679,7 +2679,7 @@ static DEFINE_PER_CPU(unsigned int, current_context); |
7746 | |
7747 | static __always_inline int trace_recursive_lock(void) |
7748 | { |
7749 | - unsigned int val = this_cpu_read(current_context); |
7750 | + unsigned int val = __this_cpu_read(current_context); |
7751 | int bit; |
7752 | |
7753 | if (in_interrupt()) { |
7754 | @@ -2696,18 +2696,17 @@ static __always_inline int trace_recursive_lock(void) |
7755 | return 1; |
7756 | |
7757 | val |= (1 << bit); |
7758 | - this_cpu_write(current_context, val); |
7759 | + __this_cpu_write(current_context, val); |
7760 | |
7761 | return 0; |
7762 | } |
7763 | |
7764 | static __always_inline void trace_recursive_unlock(void) |
7765 | { |
7766 | - unsigned int val = this_cpu_read(current_context); |
7767 | + unsigned int val = __this_cpu_read(current_context); |
7768 | |
7769 | - val--; |
7770 | - val &= this_cpu_read(current_context); |
7771 | - this_cpu_write(current_context, val); |
7772 | + val &= val & (val - 1); |
7773 | + __this_cpu_write(current_context, val); |
7774 | } |
7775 | |
7776 | #else |
7777 | diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c |
7778 | index db54dda10ccc..a9c10a3cf122 100644 |
7779 | --- a/kernel/trace/trace_events.c |
7780 | +++ b/kernel/trace/trace_events.c |
7781 | @@ -565,6 +565,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, |
7782 | static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) |
7783 | { |
7784 | char *event = NULL, *sub = NULL, *match; |
7785 | + int ret; |
7786 | |
7787 | /* |
7788 | * The buf format can be <subsystem>:<event-name> |
7789 | @@ -590,7 +591,13 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) |
7790 | event = NULL; |
7791 | } |
7792 | |
7793 | - return __ftrace_set_clr_event(tr, match, sub, event, set); |
7794 | + ret = __ftrace_set_clr_event(tr, match, sub, event, set); |
7795 | + |
7796 | + /* Put back the colon to allow this to be called again */ |
7797 | + if (buf) |
7798 | + *(buf - 1) = ':'; |
7799 | + |
7800 | + return ret; |
7801 | } |
7802 | |
7803 | /** |
7804 | diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c |
7805 | index 2d25ad1526bb..b6fce365ef27 100644 |
7806 | --- a/kernel/trace/trace_functions_graph.c |
7807 | +++ b/kernel/trace/trace_functions_graph.c |
7808 | @@ -1309,15 +1309,19 @@ void graph_trace_open(struct trace_iterator *iter) |
7809 | { |
7810 | /* pid and depth on the last trace processed */ |
7811 | struct fgraph_data *data; |
7812 | + gfp_t gfpflags; |
7813 | int cpu; |
7814 | |
7815 | iter->private = NULL; |
7816 | |
7817 | - data = kzalloc(sizeof(*data), GFP_KERNEL); |
7818 | + /* We can be called in atomic context via ftrace_dump() */ |
7819 | + gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; |
7820 | + |
7821 | + data = kzalloc(sizeof(*data), gfpflags); |
7822 | if (!data) |
7823 | goto out_err; |
7824 | |
7825 | - data->cpu_data = alloc_percpu(struct fgraph_cpu_data); |
7826 | + data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); |
7827 | if (!data->cpu_data) |
7828 | goto out_err_free; |
7829 | |
7830 | diff --git a/lib/string.c b/lib/string.c |
7831 | index ce81aaec3839..a5792019193c 100644 |
7832 | --- a/lib/string.c |
7833 | +++ b/lib/string.c |
7834 | @@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset); |
7835 | void memzero_explicit(void *s, size_t count) |
7836 | { |
7837 | memset(s, 0, count); |
7838 | - OPTIMIZER_HIDE_VAR(s); |
7839 | + barrier(); |
7840 | } |
7841 | EXPORT_SYMBOL(memzero_explicit); |
7842 | |
7843 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
7844 | index 6817b0350c71..956d4dbe001d 100644 |
7845 | --- a/mm/huge_memory.c |
7846 | +++ b/mm/huge_memory.c |
7847 | @@ -2316,8 +2316,14 @@ static struct page |
7848 | struct vm_area_struct *vma, unsigned long address, |
7849 | int node) |
7850 | { |
7851 | + gfp_t flags; |
7852 | + |
7853 | VM_BUG_ON_PAGE(*hpage, *hpage); |
7854 | |
7855 | + /* Only allocate from the target node */ |
7856 | + flags = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) | |
7857 | + __GFP_THISNODE; |
7858 | + |
7859 | /* |
7860 | * Before allocating the hugepage, release the mmap_sem read lock. |
7861 | * The allocation can take potentially a long time if it involves |
7862 | @@ -2326,8 +2332,7 @@ static struct page |
7863 | */ |
7864 | up_read(&mm->mmap_sem); |
7865 | |
7866 | - *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask( |
7867 | - khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER); |
7868 | + *hpage = alloc_pages_exact_node(node, flags, HPAGE_PMD_ORDER); |
7869 | if (unlikely(!*hpage)) { |
7870 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); |
7871 | *hpage = ERR_PTR(-ENOMEM); |
7872 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
7873 | index c41b2a0ee273..caad3c5a926f 100644 |
7874 | --- a/mm/hugetlb.c |
7875 | +++ b/mm/hugetlb.c |
7876 | @@ -3735,8 +3735,7 @@ retry: |
7877 | if (!pmd_huge(*pmd)) |
7878 | goto out; |
7879 | if (pmd_present(*pmd)) { |
7880 | - page = pte_page(*(pte_t *)pmd) + |
7881 | - ((address & ~PMD_MASK) >> PAGE_SHIFT); |
7882 | + page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); |
7883 | if (flags & FOLL_GET) |
7884 | get_page(page); |
7885 | } else { |
7886 | diff --git a/mm/mempolicy.c b/mm/mempolicy.c |
7887 | index 4721046a134a..de5dc5e12691 100644 |
7888 | --- a/mm/mempolicy.c |
7889 | +++ b/mm/mempolicy.c |
7890 | @@ -1985,7 +1985,8 @@ retry_cpuset: |
7891 | nmask = policy_nodemask(gfp, pol); |
7892 | if (!nmask || node_isset(node, *nmask)) { |
7893 | mpol_cond_put(pol); |
7894 | - page = alloc_pages_exact_node(node, gfp, order); |
7895 | + page = alloc_pages_exact_node(node, |
7896 | + gfp | __GFP_THISNODE, order); |
7897 | goto out; |
7898 | } |
7899 | } |
7900 | diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c |
7901 | index 0ee453fad3de..f371cbff6d45 100644 |
7902 | --- a/net/bridge/br_netfilter.c |
7903 | +++ b/net/bridge/br_netfilter.c |
7904 | @@ -651,6 +651,13 @@ static int br_nf_forward_finish(struct sk_buff *skb) |
7905 | struct net_device *in; |
7906 | |
7907 | if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) { |
7908 | + int frag_max_size; |
7909 | + |
7910 | + if (skb->protocol == htons(ETH_P_IP)) { |
7911 | + frag_max_size = IPCB(skb)->frag_max_size; |
7912 | + BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size; |
7913 | + } |
7914 | + |
7915 | in = nf_bridge->physindev; |
7916 | if (nf_bridge->mask & BRNF_PKT_TYPE) { |
7917 | skb->pkt_type = PACKET_OTHERHOST; |
7918 | @@ -710,8 +717,14 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops, |
7919 | nf_bridge->mask |= BRNF_PKT_TYPE; |
7920 | } |
7921 | |
7922 | - if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb)) |
7923 | - return NF_DROP; |
7924 | + if (pf == NFPROTO_IPV4) { |
7925 | + int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size; |
7926 | + |
7927 | + if (br_parse_ip_options(skb)) |
7928 | + return NF_DROP; |
7929 | + |
7930 | + IPCB(skb)->frag_max_size = frag_max; |
7931 | + } |
7932 | |
7933 | /* The physdev module checks on this */ |
7934 | nf_bridge->mask |= BRNF_BRIDGED; |
7935 | diff --git a/net/core/dev.c b/net/core/dev.c |
7936 | index 45109b70664e..22a53acdb5bb 100644 |
7937 | --- a/net/core/dev.c |
7938 | +++ b/net/core/dev.c |
7939 | @@ -3041,7 +3041,7 @@ static struct rps_dev_flow * |
7940 | set_rps_cpu(struct net_device *dev, struct sk_buff *skb, |
7941 | struct rps_dev_flow *rflow, u16 next_cpu) |
7942 | { |
7943 | - if (next_cpu != RPS_NO_CPU) { |
7944 | + if (next_cpu < nr_cpu_ids) { |
7945 | #ifdef CONFIG_RFS_ACCEL |
7946 | struct netdev_rx_queue *rxqueue; |
7947 | struct rps_dev_flow_table *flow_table; |
7948 | @@ -3146,7 +3146,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, |
7949 | * If the desired CPU (where last recvmsg was done) is |
7950 | * different from current CPU (one in the rx-queue flow |
7951 | * table entry), switch if one of the following holds: |
7952 | - * - Current CPU is unset (equal to RPS_NO_CPU). |
7953 | + * - Current CPU is unset (>= nr_cpu_ids). |
7954 | * - Current CPU is offline. |
7955 | * - The current CPU's queue tail has advanced beyond the |
7956 | * last packet that was enqueued using this table entry. |
7957 | @@ -3154,14 +3154,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, |
7958 | * have been dequeued, thus preserving in order delivery. |
7959 | */ |
7960 | if (unlikely(tcpu != next_cpu) && |
7961 | - (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || |
7962 | + (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || |
7963 | ((int)(per_cpu(softnet_data, tcpu).input_queue_head - |
7964 | rflow->last_qtail)) >= 0)) { |
7965 | tcpu = next_cpu; |
7966 | rflow = set_rps_cpu(dev, skb, rflow, next_cpu); |
7967 | } |
7968 | |
7969 | - if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { |
7970 | + if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { |
7971 | *rflowp = rflow; |
7972 | cpu = tcpu; |
7973 | goto done; |
7974 | @@ -3202,14 +3202,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, |
7975 | struct rps_dev_flow_table *flow_table; |
7976 | struct rps_dev_flow *rflow; |
7977 | bool expire = true; |
7978 | - int cpu; |
7979 | + unsigned int cpu; |
7980 | |
7981 | rcu_read_lock(); |
7982 | flow_table = rcu_dereference(rxqueue->rps_flow_table); |
7983 | if (flow_table && flow_id <= flow_table->mask) { |
7984 | rflow = &flow_table->flows[flow_id]; |
7985 | cpu = ACCESS_ONCE(rflow->cpu); |
7986 | - if (rflow->filter == filter_id && cpu != RPS_NO_CPU && |
7987 | + if (rflow->filter == filter_id && cpu < nr_cpu_ids && |
7988 | ((int)(per_cpu(softnet_data, cpu).input_queue_head - |
7989 | rflow->last_qtail) < |
7990 | (int)(10 * flow_table->mask))) |
7991 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
7992 | index 98d45fe72f51..e9f9a15fce4e 100644 |
7993 | --- a/net/core/skbuff.c |
7994 | +++ b/net/core/skbuff.c |
7995 | @@ -280,13 +280,14 @@ nodata: |
7996 | EXPORT_SYMBOL(__alloc_skb); |
7997 | |
7998 | /** |
7999 | - * build_skb - build a network buffer |
8000 | + * __build_skb - build a network buffer |
8001 | * @data: data buffer provided by caller |
8002 | - * @frag_size: size of fragment, or 0 if head was kmalloced |
8003 | + * @frag_size: size of data, or 0 if head was kmalloced |
8004 | * |
8005 | * Allocate a new &sk_buff. Caller provides space holding head and |
8006 | * skb_shared_info. @data must have been allocated by kmalloc() only if |
8007 | - * @frag_size is 0, otherwise data should come from the page allocator. |
8008 | + * @frag_size is 0, otherwise data should come from the page allocator |
8009 | + * or vmalloc() |
8010 | * The return is the new skb buffer. |
8011 | * On a failure the return is %NULL, and @data is not freed. |
8012 | * Notes : |
8013 | @@ -297,7 +298,7 @@ EXPORT_SYMBOL(__alloc_skb); |
8014 | * before giving packet to stack. |
8015 | * RX rings only contains data buffers, not full skbs. |
8016 | */ |
8017 | -struct sk_buff *build_skb(void *data, unsigned int frag_size) |
8018 | +struct sk_buff *__build_skb(void *data, unsigned int frag_size) |
8019 | { |
8020 | struct skb_shared_info *shinfo; |
8021 | struct sk_buff *skb; |
8022 | @@ -311,7 +312,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) |
8023 | |
8024 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
8025 | skb->truesize = SKB_TRUESIZE(size); |
8026 | - skb->head_frag = frag_size != 0; |
8027 | atomic_set(&skb->users, 1); |
8028 | skb->head = data; |
8029 | skb->data = data; |
8030 | @@ -328,6 +328,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) |
8031 | |
8032 | return skb; |
8033 | } |
8034 | + |
8035 | +/* build_skb() is wrapper over __build_skb(), that specifically |
8036 | + * takes care of skb->head and skb->pfmemalloc |
8037 | + * This means that if @frag_size is not zero, then @data must be backed |
8038 | + * by a page fragment, not kmalloc() or vmalloc() |
8039 | + */ |
8040 | +struct sk_buff *build_skb(void *data, unsigned int frag_size) |
8041 | +{ |
8042 | + struct sk_buff *skb = __build_skb(data, frag_size); |
8043 | + |
8044 | + if (skb && frag_size) { |
8045 | + skb->head_frag = 1; |
8046 | + if (virt_to_head_page(data)->pfmemalloc) |
8047 | + skb->pfmemalloc = 1; |
8048 | + } |
8049 | + return skb; |
8050 | +} |
8051 | EXPORT_SYMBOL(build_skb); |
8052 | |
8053 | struct netdev_alloc_cache { |
8054 | @@ -348,7 +365,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc, |
8055 | gfp_t gfp = gfp_mask; |
8056 | |
8057 | if (order) { |
8058 | - gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY; |
8059 | + gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | |
8060 | + __GFP_NOMEMALLOC; |
8061 | page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); |
8062 | nc->frag.size = PAGE_SIZE << (page ? order : 0); |
8063 | } |
8064 | diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c |
8065 | index d9bc28ac5d1b..53bd53fbbee0 100644 |
8066 | --- a/net/ipv4/ip_forward.c |
8067 | +++ b/net/ipv4/ip_forward.c |
8068 | @@ -82,6 +82,9 @@ int ip_forward(struct sk_buff *skb) |
8069 | if (skb->pkt_type != PACKET_HOST) |
8070 | goto drop; |
8071 | |
8072 | + if (unlikely(skb->sk)) |
8073 | + goto drop; |
8074 | + |
8075 | if (skb_warn_if_lro(skb)) |
8076 | goto drop; |
8077 | |
8078 | diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
8079 | index d520492ba698..9d48dc427a5a 100644 |
8080 | --- a/net/ipv4/tcp_output.c |
8081 | +++ b/net/ipv4/tcp_output.c |
8082 | @@ -2751,39 +2751,65 @@ begin_fwd: |
8083 | } |
8084 | } |
8085 | |
8086 | -/* Send a fin. The caller locks the socket for us. This cannot be |
8087 | - * allowed to fail queueing a FIN frame under any circumstances. |
8088 | +/* We allow to exceed memory limits for FIN packets to expedite |
8089 | + * connection tear down and (memory) recovery. |
8090 | + * Otherwise tcp_send_fin() could be tempted to either delay FIN |
8091 | + * or even be forced to close flow without any FIN. |
8092 | + */ |
8093 | +static void sk_forced_wmem_schedule(struct sock *sk, int size) |
8094 | +{ |
8095 | + int amt, status; |
8096 | + |
8097 | + if (size <= sk->sk_forward_alloc) |
8098 | + return; |
8099 | + amt = sk_mem_pages(size); |
8100 | + sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; |
8101 | + sk_memory_allocated_add(sk, amt, &status); |
8102 | +} |
8103 | + |
8104 | +/* Send a FIN. The caller locks the socket for us. |
8105 | + * We should try to send a FIN packet really hard, but eventually give up. |
8106 | */ |
8107 | void tcp_send_fin(struct sock *sk) |
8108 | { |
8109 | + struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); |
8110 | struct tcp_sock *tp = tcp_sk(sk); |
8111 | - struct sk_buff *skb = tcp_write_queue_tail(sk); |
8112 | - int mss_now; |
8113 | |
8114 | - /* Optimization, tack on the FIN if we have a queue of |
8115 | - * unsent frames. But be careful about outgoing SACKS |
8116 | - * and IP options. |
8117 | + /* Optimization, tack on the FIN if we have one skb in write queue and |
8118 | + * this skb was not yet sent, or we are under memory pressure. |
8119 | + * Note: in the latter case, FIN packet will be sent after a timeout, |
8120 | + * as TCP stack thinks it has already been transmitted. |
8121 | */ |
8122 | - mss_now = tcp_current_mss(sk); |
8123 | - |
8124 | - if (tcp_send_head(sk) != NULL) { |
8125 | - TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; |
8126 | - TCP_SKB_CB(skb)->end_seq++; |
8127 | + if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) { |
8128 | +coalesce: |
8129 | + TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; |
8130 | + TCP_SKB_CB(tskb)->end_seq++; |
8131 | tp->write_seq++; |
8132 | + if (!tcp_send_head(sk)) { |
8133 | + /* This means tskb was already sent. |
8134 | + * Pretend we included the FIN on previous transmit. |
8135 | + * We need to set tp->snd_nxt to the value it would have |
8136 | + * if FIN had been sent. This is because retransmit path |
8137 | + * does not change tp->snd_nxt. |
8138 | + */ |
8139 | + tp->snd_nxt++; |
8140 | + return; |
8141 | + } |
8142 | } else { |
8143 | - /* Socket is locked, keep trying until memory is available. */ |
8144 | - for (;;) { |
8145 | - skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); |
8146 | - if (skb) |
8147 | - break; |
8148 | - yield(); |
8149 | + skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); |
8150 | + if (unlikely(!skb)) { |
8151 | + if (tskb) |
8152 | + goto coalesce; |
8153 | + return; |
8154 | } |
8155 | + skb_reserve(skb, MAX_TCP_HEADER); |
8156 | + sk_forced_wmem_schedule(sk, skb->truesize); |
8157 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ |
8158 | tcp_init_nondata_skb(skb, tp->write_seq, |
8159 | TCPHDR_ACK | TCPHDR_FIN); |
8160 | tcp_queue_skb(sk, skb); |
8161 | } |
8162 | - __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); |
8163 | + __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); |
8164 | } |
8165 | |
8166 | /* We get here when a process closes a file descriptor (either due to |
8167 | diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c |
8168 | index 142f66aece18..0ca013d66492 100644 |
8169 | --- a/net/mac80211/mlme.c |
8170 | +++ b/net/mac80211/mlme.c |
8171 | @@ -2260,7 +2260,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) |
8172 | else |
8173 | ssid_len = ssid[1]; |
8174 | |
8175 | - ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL, |
8176 | + ieee80211_send_probe_req(sdata, sdata->vif.addr, dst, |
8177 | ssid + 2, ssid_len, NULL, |
8178 | 0, (u32) -1, true, 0, |
8179 | ifmgd->associated->channel, false); |
8180 | diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c |
8181 | index 05919bf3f670..d1d7a8166f46 100644 |
8182 | --- a/net/netlink/af_netlink.c |
8183 | +++ b/net/netlink/af_netlink.c |
8184 | @@ -1616,13 +1616,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size, |
8185 | if (data == NULL) |
8186 | return NULL; |
8187 | |
8188 | - skb = build_skb(data, size); |
8189 | + skb = __build_skb(data, size); |
8190 | if (skb == NULL) |
8191 | vfree(data); |
8192 | - else { |
8193 | - skb->head_frag = 0; |
8194 | + else |
8195 | skb->destructor = netlink_skb_destructor; |
8196 | - } |
8197 | |
8198 | return skb; |
8199 | } |
8200 | diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c |
8201 | index 2ca9f2e93139..53745f4c2bf5 100644 |
8202 | --- a/sound/pci/emu10k1/emuproc.c |
8203 | +++ b/sound/pci/emu10k1/emuproc.c |
8204 | @@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry, |
8205 | struct snd_emu10k1 *emu = entry->private_data; |
8206 | u32 value; |
8207 | u32 value2; |
8208 | - unsigned long flags; |
8209 | u32 rate; |
8210 | |
8211 | if (emu->card_capabilities->emu_model) { |
8212 | - spin_lock_irqsave(&emu->emu_lock, flags); |
8213 | snd_emu1010_fpga_read(emu, 0x38, &value); |
8214 | - spin_unlock_irqrestore(&emu->emu_lock, flags); |
8215 | if ((value & 0x1) == 0) { |
8216 | - spin_lock_irqsave(&emu->emu_lock, flags); |
8217 | snd_emu1010_fpga_read(emu, 0x2a, &value); |
8218 | snd_emu1010_fpga_read(emu, 0x2b, &value2); |
8219 | - spin_unlock_irqrestore(&emu->emu_lock, flags); |
8220 | rate = 0x1770000 / (((value << 5) | value2)+1); |
8221 | snd_iprintf(buffer, "ADAT Locked : %u\n", rate); |
8222 | } else { |
8223 | snd_iprintf(buffer, "ADAT Unlocked\n"); |
8224 | } |
8225 | - spin_lock_irqsave(&emu->emu_lock, flags); |
8226 | snd_emu1010_fpga_read(emu, 0x20, &value); |
8227 | - spin_unlock_irqrestore(&emu->emu_lock, flags); |
8228 | if ((value & 0x4) == 0) { |
8229 | - spin_lock_irqsave(&emu->emu_lock, flags); |
8230 | snd_emu1010_fpga_read(emu, 0x28, &value); |
8231 | snd_emu1010_fpga_read(emu, 0x29, &value2); |
8232 | - spin_unlock_irqrestore(&emu->emu_lock, flags); |
8233 | rate = 0x1770000 / (((value << 5) | value2)+1); |
8234 | snd_iprintf(buffer, "SPDIF Locked : %d\n", rate); |
8235 | } else { |
8236 | @@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry, |
8237 | { |
8238 | struct snd_emu10k1 *emu = entry->private_data; |
8239 | u32 value; |
8240 | - unsigned long flags; |
8241 | int i; |
8242 | snd_iprintf(buffer, "EMU1010 Registers:\n\n"); |
8243 | |
8244 | for(i = 0; i < 0x40; i+=1) { |
8245 | - spin_lock_irqsave(&emu->emu_lock, flags); |
8246 | snd_emu1010_fpga_read(emu, i, &value); |
8247 | - spin_unlock_irqrestore(&emu->emu_lock, flags); |
8248 | snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f); |
8249 | } |
8250 | } |
8251 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
8252 | index f9d12c0a7e5a..2fd490b1764b 100644 |
8253 | --- a/sound/pci/hda/patch_realtek.c |
8254 | +++ b/sound/pci/hda/patch_realtek.c |
8255 | @@ -5047,12 +5047,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
8256 | SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), |
8257 | SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), |
8258 | SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
8259 | + SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), |
8260 | SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), |
8261 | SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), |
8262 | SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
8263 | SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), |
8264 | SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), |
8265 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
8266 | + SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), |
8267 | SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), |
8268 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
8269 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), |
8270 | @@ -5142,6 +5144,16 @@ static const struct hda_model_fixup alc269_fixup_models[] = { |
8271 | {0x1b, 0x411111f0}, \ |
8272 | {0x1e, 0x411111f0} |
8273 | |
8274 | +#define ALC256_STANDARD_PINS \ |
8275 | + {0x12, 0x90a60140}, \ |
8276 | + {0x14, 0x90170110}, \ |
8277 | + {0x19, 0x411111f0}, \ |
8278 | + {0x1a, 0x411111f0}, \ |
8279 | + {0x1b, 0x411111f0}, \ |
8280 | + {0x1d, 0x40700001}, \ |
8281 | + {0x1e, 0x411111f0}, \ |
8282 | + {0x21, 0x02211020} |
8283 | + |
8284 | #define ALC282_STANDARD_PINS \ |
8285 | {0x14, 0x90170110}, \ |
8286 | {0x18, 0x411111f0}, \ |
8287 | @@ -5235,15 +5247,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { |
8288 | {0x1d, 0x40700001}, |
8289 | {0x21, 0x02211050}), |
8290 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
8291 | - {0x12, 0x90a60140}, |
8292 | - {0x13, 0x40000000}, |
8293 | - {0x14, 0x90170110}, |
8294 | - {0x19, 0x411111f0}, |
8295 | - {0x1a, 0x411111f0}, |
8296 | - {0x1b, 0x411111f0}, |
8297 | - {0x1d, 0x40700001}, |
8298 | - {0x1e, 0x411111f0}, |
8299 | - {0x21, 0x02211020}), |
8300 | + ALC256_STANDARD_PINS, |
8301 | + {0x13, 0x40000000}), |
8302 | + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
8303 | + ALC256_STANDARD_PINS, |
8304 | + {0x13, 0x411111f0}), |
8305 | SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, |
8306 | {0x12, 0x90a60130}, |
8307 | {0x13, 0x40000000}, |
8308 | @@ -5563,6 +5571,8 @@ static int patch_alc269(struct hda_codec *codec) |
8309 | break; |
8310 | case 0x10ec0256: |
8311 | spec->codec_variant = ALC269_TYPE_ALC256; |
8312 | + spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */ |
8313 | + alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/ |
8314 | break; |
8315 | } |
8316 | |
8317 | @@ -5576,8 +5586,8 @@ static int patch_alc269(struct hda_codec *codec) |
8318 | if (err < 0) |
8319 | goto error; |
8320 | |
8321 | - if (!spec->gen.no_analog && spec->gen.beep_nid) |
8322 | - set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); |
8323 | + if (!spec->gen.no_analog && spec->gen.beep_nid && spec->gen.mixer_nid) |
8324 | + set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT); |
8325 | |
8326 | codec->patch_ops = alc_patch_ops; |
8327 | #ifdef CONFIG_PM |
8328 | diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c |
8329 | index 7d3a6accaf9a..e770ee6f36da 100644 |
8330 | --- a/sound/soc/codecs/cs4271.c |
8331 | +++ b/sound/soc/codecs/cs4271.c |
8332 | @@ -561,10 +561,10 @@ static int cs4271_codec_probe(struct snd_soc_codec *codec) |
8333 | if (gpio_is_valid(cs4271->gpio_nreset)) { |
8334 | /* Reset codec */ |
8335 | gpio_direction_output(cs4271->gpio_nreset, 0); |
8336 | - udelay(1); |
8337 | + mdelay(1); |
8338 | gpio_set_value(cs4271->gpio_nreset, 1); |
8339 | /* Give the codec time to wake up */ |
8340 | - udelay(1); |
8341 | + mdelay(1); |
8342 | } |
8343 | |
8344 | ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2, |
8345 | diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c |
8346 | index 474cae82a874..8c09e3ffdcaa 100644 |
8347 | --- a/sound/soc/codecs/pcm512x.c |
8348 | +++ b/sound/soc/codecs/pcm512x.c |
8349 | @@ -304,9 +304,9 @@ static const struct soc_enum pcm512x_veds = |
8350 | static const struct snd_kcontrol_new pcm512x_controls[] = { |
8351 | SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2, |
8352 | PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv), |
8353 | -SOC_DOUBLE_TLV("Playback Volume", PCM512x_ANALOG_GAIN_CTRL, |
8354 | +SOC_DOUBLE_TLV("Analogue Playback Volume", PCM512x_ANALOG_GAIN_CTRL, |
8355 | PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv), |
8356 | -SOC_DOUBLE_TLV("Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST, |
8357 | +SOC_DOUBLE_TLV("Analogue Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST, |
8358 | PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv), |
8359 | SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT, |
8360 | PCM512x_RQMR_SHIFT, 1, 1), |
8361 | @@ -576,8 +576,8 @@ static int pcm512x_find_pll_coeff(struct snd_soc_dai *dai, |
8362 | |
8363 | /* pllin_rate / P (or here, den) cannot be greater than 20 MHz */ |
8364 | if (pllin_rate / den > 20000000 && num < 8) { |
8365 | - num *= 20000000 / (pllin_rate / den); |
8366 | - den *= 20000000 / (pllin_rate / den); |
8367 | + num *= DIV_ROUND_UP(pllin_rate / den, 20000000); |
8368 | + den *= DIV_ROUND_UP(pllin_rate / den, 20000000); |
8369 | } |
8370 | dev_dbg(dev, "num / den = %lu / %lu\n", num, den); |
8371 | |
8372 | diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c |
8373 | index 31bb4801a005..9e71c768966f 100644 |
8374 | --- a/sound/soc/codecs/wm8741.c |
8375 | +++ b/sound/soc/codecs/wm8741.c |
8376 | @@ -123,7 +123,7 @@ static struct { |
8377 | }; |
8378 | |
8379 | static const unsigned int rates_11289[] = { |
8380 | - 44100, 88235, |
8381 | + 44100, 88200, |
8382 | }; |
8383 | |
8384 | static const struct snd_pcm_hw_constraint_list constraints_11289 = { |
8385 | @@ -150,7 +150,7 @@ static const struct snd_pcm_hw_constraint_list constraints_16384 = { |
8386 | }; |
8387 | |
8388 | static const unsigned int rates_16934[] = { |
8389 | - 44100, 88235, |
8390 | + 44100, 88200, |
8391 | }; |
8392 | |
8393 | static const struct snd_pcm_hw_constraint_list constraints_16934 = { |
8394 | @@ -168,7 +168,7 @@ static const struct snd_pcm_hw_constraint_list constraints_18432 = { |
8395 | }; |
8396 | |
8397 | static const unsigned int rates_22579[] = { |
8398 | - 44100, 88235, 1764000 |
8399 | + 44100, 88200, 176400 |
8400 | }; |
8401 | |
8402 | static const struct snd_pcm_hw_constraint_list constraints_22579 = { |
8403 | @@ -186,7 +186,7 @@ static const struct snd_pcm_hw_constraint_list constraints_24576 = { |
8404 | }; |
8405 | |
8406 | static const unsigned int rates_36864[] = { |
8407 | - 48000, 96000, 19200 |
8408 | + 48000, 96000, 192000 |
8409 | }; |
8410 | |
8411 | static const struct snd_pcm_hw_constraint_list constraints_36864 = { |
8412 | diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c |
8413 | index b6bb5947a8a8..8c2b9be80a9a 100644 |
8414 | --- a/sound/soc/davinci/davinci-evm.c |
8415 | +++ b/sound/soc/davinci/davinci-evm.c |
8416 | @@ -425,18 +425,8 @@ static int davinci_evm_probe(struct platform_device *pdev) |
8417 | return ret; |
8418 | } |
8419 | |
8420 | -static int davinci_evm_remove(struct platform_device *pdev) |
8421 | -{ |
8422 | - struct snd_soc_card *card = platform_get_drvdata(pdev); |
8423 | - |
8424 | - snd_soc_unregister_card(card); |
8425 | - |
8426 | - return 0; |
8427 | -} |
8428 | - |
8429 | static struct platform_driver davinci_evm_driver = { |
8430 | .probe = davinci_evm_probe, |
8431 | - .remove = davinci_evm_remove, |
8432 | .driver = { |
8433 | .name = "davinci_evm", |
8434 | .pm = &snd_soc_pm_ops, |
8435 | diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c |
8436 | index 9a28365126f9..32631a86078b 100644 |
8437 | --- a/sound/usb/quirks.c |
8438 | +++ b/sound/usb/quirks.c |
8439 | @@ -1115,6 +1115,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) |
8440 | { |
8441 | /* devices which do not support reading the sample rate. */ |
8442 | switch (chip->usb_id) { |
8443 | + case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ |
8444 | case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ |
8445 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ |
8446 | return true; |
8447 | diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c |
8448 | index dcc665228c71..deb3569ab004 100644 |
8449 | --- a/tools/lib/traceevent/kbuffer-parse.c |
8450 | +++ b/tools/lib/traceevent/kbuffer-parse.c |
8451 | @@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr, |
8452 | switch (type_len) { |
8453 | case KBUFFER_TYPE_PADDING: |
8454 | *length = read_4(kbuf, data); |
8455 | - data += *length; |
8456 | break; |
8457 | |
8458 | case KBUFFER_TYPE_TIME_EXTEND: |
8459 | diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile |
8460 | index cc224080b525..0884d31ae12e 100644 |
8461 | --- a/tools/perf/config/Makefile |
8462 | +++ b/tools/perf/config/Makefile |
8463 | @@ -651,7 +651,7 @@ ifeq (${IS_64_BIT}, 1) |
8464 | NO_PERF_READ_VDSO32 := 1 |
8465 | endif |
8466 | endif |
8467 | - ifneq (${IS_X86_64}, 1) |
8468 | + ifneq ($(ARCH), x86) |
8469 | NO_PERF_READ_VDSOX32 := 1 |
8470 | endif |
8471 | ifndef NO_PERF_READ_VDSOX32 |
8472 | @@ -699,7 +699,7 @@ sysconfdir = $(prefix)/etc |
8473 | ETC_PERFCONFIG = etc/perfconfig |
8474 | endif |
8475 | ifndef lib |
8476 | -ifeq ($(IS_X86_64),1) |
8477 | +ifeq ($(ARCH)$(IS_64_BIT), x861) |
8478 | lib = lib64 |
8479 | else |
8480 | lib = lib |
8481 | diff --git a/tools/perf/tests/make b/tools/perf/tests/make |
8482 | index 75709d2b17b4..bff85324f799 100644 |
8483 | --- a/tools/perf/tests/make |
8484 | +++ b/tools/perf/tests/make |
8485 | @@ -5,7 +5,7 @@ include config/Makefile.arch |
8486 | |
8487 | # FIXME looks like x86 is the only arch running tests ;-) |
8488 | # we need some IS_(32/64) flag to make this generic |
8489 | -ifeq ($(IS_X86_64),1) |
8490 | +ifeq ($(ARCH)$(IS_64_BIT), x861) |
8491 | lib = lib64 |
8492 | else |
8493 | lib = lib |
8494 | diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c |
8495 | index 6da965bdbc2c..85b523885f9d 100644 |
8496 | --- a/tools/perf/util/cloexec.c |
8497 | +++ b/tools/perf/util/cloexec.c |
8498 | @@ -7,6 +7,12 @@ |
8499 | |
8500 | static unsigned long flag = PERF_FLAG_FD_CLOEXEC; |
8501 | |
8502 | +int __weak sched_getcpu(void) |
8503 | +{ |
8504 | + errno = ENOSYS; |
8505 | + return -1; |
8506 | +} |
8507 | + |
8508 | static int perf_flag_probe(void) |
8509 | { |
8510 | /* use 'safest' configuration as used in perf_evsel__fallback() */ |
8511 | diff --git a/tools/perf/util/cloexec.h b/tools/perf/util/cloexec.h |
8512 | index 94a5a7d829d5..68888c29b04a 100644 |
8513 | --- a/tools/perf/util/cloexec.h |
8514 | +++ b/tools/perf/util/cloexec.h |
8515 | @@ -3,4 +3,10 @@ |
8516 | |
8517 | unsigned long perf_event_open_cloexec_flag(void); |
8518 | |
8519 | +#ifdef __GLIBC_PREREQ |
8520 | +#if !__GLIBC_PREREQ(2, 6) |
8521 | +extern int sched_getcpu(void) __THROW; |
8522 | +#endif |
8523 | +#endif |
8524 | + |
8525 | #endif /* __PERF_CLOEXEC_H */ |
8526 | diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c |
8527 | index 33b7a2aef713..9bdf007d243a 100644 |
8528 | --- a/tools/perf/util/symbol-elf.c |
8529 | +++ b/tools/perf/util/symbol-elf.c |
8530 | @@ -74,6 +74,10 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym) |
8531 | return GELF_ST_TYPE(sym->st_info); |
8532 | } |
8533 | |
8534 | +#ifndef STT_GNU_IFUNC |
8535 | +#define STT_GNU_IFUNC 10 |
8536 | +#endif |
8537 | + |
8538 | static inline int elf_sym__is_function(const GElf_Sym *sym) |
8539 | { |
8540 | return (elf_sym__type(sym) == STT_FUNC || |
8541 | diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile |
8542 | index d1b3a361e526..4039854560d0 100644 |
8543 | --- a/tools/power/x86/turbostat/Makefile |
8544 | +++ b/tools/power/x86/turbostat/Makefile |
8545 | @@ -1,8 +1,12 @@ |
8546 | CC = $(CROSS_COMPILE)gcc |
8547 | -BUILD_OUTPUT := $(PWD) |
8548 | +BUILD_OUTPUT := $(CURDIR) |
8549 | PREFIX := /usr |
8550 | DESTDIR := |
8551 | |
8552 | +ifeq ("$(origin O)", "command line") |
8553 | + BUILD_OUTPUT := $(O) |
8554 | +endif |
8555 | + |
8556 | turbostat : turbostat.c |
8557 | CFLAGS += -Wall |
8558 | CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"' |
8559 | diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c |
8560 | index c9f60f524588..e5abe7cb2990 100644 |
8561 | --- a/virt/kvm/arm/vgic.c |
8562 | +++ b/virt/kvm/arm/vgic.c |
8563 | @@ -1371,6 +1371,9 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, |
8564 | goto out; |
8565 | } |
8566 | |
8567 | + if (irq_num >= kvm->arch.vgic.nr_irqs) |
8568 | + return -EINVAL; |
8569 | + |
8570 | vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level); |
8571 | if (vcpu_id >= 0) { |
8572 | /* kick the specified vcpu */ |
8573 | diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c |
8574 | index cc6a25d95fbf..f8f3f5fe53d3 100644 |
8575 | --- a/virt/kvm/kvm_main.c |
8576 | +++ b/virt/kvm/kvm_main.c |
8577 | @@ -1653,8 +1653,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
8578 | ghc->generation = slots->generation; |
8579 | ghc->len = len; |
8580 | ghc->memslot = gfn_to_memslot(kvm, start_gfn); |
8581 | - ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); |
8582 | - if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { |
8583 | + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL); |
8584 | + if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) { |
8585 | ghc->hva += offset; |
8586 | } else { |
8587 | /* |