Contents of /trunk/kernel-magellan/patches-3.11/0104-3.11.5-all-fixes.patch
Parent Directory | Revision Log
Revision 2302 -
(show annotations)
(download)
Mon Oct 14 07:03:23 2013 UTC (10 years, 11 months ago) by niro
File size: 176936 byte(s)
Mon Oct 14 07:03:23 2013 UTC (10 years, 11 months ago) by niro
File size: 176936 byte(s)
-linux-3.11.5
1 | diff --git a/Makefile b/Makefile |
2 | index 97bae8a1..83121b7 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 3 |
7 | PATCHLEVEL = 11 |
8 | -SUBLEVEL = 4 |
9 | +SUBLEVEL = 5 |
10 | EXTRAVERSION = |
11 | NAME = Linux for Workgroups |
12 | |
13 | diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig |
14 | index 6e572c6..aba4ec7 100644 |
15 | --- a/arch/arm/configs/multi_v7_defconfig |
16 | +++ b/arch/arm/configs/multi_v7_defconfig |
17 | @@ -46,6 +46,7 @@ CONFIG_ARCH_ZYNQ=y |
18 | CONFIG_SMP=y |
19 | CONFIG_HIGHPTE=y |
20 | CONFIG_ARM_APPENDED_DTB=y |
21 | +CONFIG_ARM_ATAG_DTB_COMPAT=y |
22 | CONFIG_NET=y |
23 | CONFIG_UNIX=y |
24 | CONFIG_INET=y |
25 | diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c |
26 | index b7840e7..cafdd3f 100644 |
27 | --- a/arch/arm/kvm/reset.c |
28 | +++ b/arch/arm/kvm/reset.c |
29 | @@ -58,14 +58,14 @@ static const struct kvm_irq_level a15_vtimer_irq = { |
30 | */ |
31 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) |
32 | { |
33 | - struct kvm_regs *cpu_reset; |
34 | + struct kvm_regs *reset_regs; |
35 | const struct kvm_irq_level *cpu_vtimer_irq; |
36 | |
37 | switch (vcpu->arch.target) { |
38 | case KVM_ARM_TARGET_CORTEX_A15: |
39 | if (vcpu->vcpu_id > a15_max_cpu_idx) |
40 | return -EINVAL; |
41 | - cpu_reset = &a15_regs_reset; |
42 | + reset_regs = &a15_regs_reset; |
43 | vcpu->arch.midr = read_cpuid_id(); |
44 | cpu_vtimer_irq = &a15_vtimer_irq; |
45 | break; |
46 | @@ -74,7 +74,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) |
47 | } |
48 | |
49 | /* Reset core registers */ |
50 | - memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs)); |
51 | + memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs)); |
52 | |
53 | /* Reset CP15 registers */ |
54 | kvm_reset_coprocs(vcpu); |
55 | diff --git a/arch/arm/mach-integrator/pci_v3.h b/arch/arm/mach-integrator/pci_v3.h |
56 | index 755fd29..06a9e2e 100644 |
57 | --- a/arch/arm/mach-integrator/pci_v3.h |
58 | +++ b/arch/arm/mach-integrator/pci_v3.h |
59 | @@ -1,2 +1,9 @@ |
60 | /* Simple oneliner include to the PCIv3 early init */ |
61 | +#ifdef CONFIG_PCI |
62 | extern int pci_v3_early_init(void); |
63 | +#else |
64 | +static inline int pci_v3_early_init(void) |
65 | +{ |
66 | + return 0; |
67 | +} |
68 | +#endif |
69 | diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig |
70 | index ef3a8da..2f1bcc6 100644 |
71 | --- a/arch/arm/mach-tegra/Kconfig |
72 | +++ b/arch/arm/mach-tegra/Kconfig |
73 | @@ -2,18 +2,24 @@ config ARCH_TEGRA |
74 | bool "NVIDIA Tegra" if ARCH_MULTI_V7 |
75 | select ARCH_HAS_CPUFREQ |
76 | select ARCH_REQUIRE_GPIOLIB |
77 | + select ARM_GIC |
78 | select CLKDEV_LOOKUP |
79 | select CLKSRC_MMIO |
80 | select CLKSRC_OF |
81 | select COMMON_CLK |
82 | + select CPU_V7 |
83 | select GENERIC_CLOCKEVENTS |
84 | select HAVE_ARM_SCU if SMP |
85 | select HAVE_ARM_TWD if LOCAL_TIMERS |
86 | select HAVE_CLK |
87 | select HAVE_SMP |
88 | select MIGHT_HAVE_CACHE_L2X0 |
89 | + select PINCTRL |
90 | select SOC_BUS |
91 | select SPARSE_IRQ |
92 | + select USB_ARCH_HAS_EHCI if USB_SUPPORT |
93 | + select USB_ULPI if USB_PHY |
94 | + select USB_ULPI_VIEWPORT if USB_PHY |
95 | select USE_OF |
96 | help |
97 | This enables support for NVIDIA Tegra based systems. |
98 | @@ -27,15 +33,9 @@ config ARCH_TEGRA_2x_SOC |
99 | select ARM_ERRATA_720789 |
100 | select ARM_ERRATA_754327 if SMP |
101 | select ARM_ERRATA_764369 if SMP |
102 | - select ARM_GIC |
103 | - select CPU_V7 |
104 | - select PINCTRL |
105 | select PINCTRL_TEGRA20 |
106 | select PL310_ERRATA_727915 if CACHE_L2X0 |
107 | select PL310_ERRATA_769419 if CACHE_L2X0 |
108 | - select USB_ARCH_HAS_EHCI if USB_SUPPORT |
109 | - select USB_ULPI if USB_PHY |
110 | - select USB_ULPI_VIEWPORT if USB_PHY |
111 | help |
112 | Support for NVIDIA Tegra AP20 and T20 processors, based on the |
113 | ARM CortexA9MP CPU and the ARM PL310 L2 cache controller |
114 | @@ -44,14 +44,8 @@ config ARCH_TEGRA_3x_SOC |
115 | bool "Enable support for Tegra30 family" |
116 | select ARM_ERRATA_754322 |
117 | select ARM_ERRATA_764369 if SMP |
118 | - select ARM_GIC |
119 | - select CPU_V7 |
120 | - select PINCTRL |
121 | select PINCTRL_TEGRA30 |
122 | select PL310_ERRATA_769419 if CACHE_L2X0 |
123 | - select USB_ARCH_HAS_EHCI if USB_SUPPORT |
124 | - select USB_ULPI if USB_PHY |
125 | - select USB_ULPI_VIEWPORT if USB_PHY |
126 | help |
127 | Support for NVIDIA Tegra T30 processor family, based on the |
128 | ARM CortexA9MP CPU and the ARM PL310 L2 cache controller |
129 | @@ -59,10 +53,7 @@ config ARCH_TEGRA_3x_SOC |
130 | config ARCH_TEGRA_114_SOC |
131 | bool "Enable support for Tegra114 family" |
132 | select HAVE_ARM_ARCH_TIMER |
133 | - select ARM_GIC |
134 | select ARM_L1_CACHE_SHIFT_6 |
135 | - select CPU_V7 |
136 | - select PINCTRL |
137 | select PINCTRL_TEGRA114 |
138 | help |
139 | Support for NVIDIA Tegra T114 processor family, based on the |
140 | diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c |
141 | index 869a1c6..12f828a 100644 |
142 | --- a/arch/avr32/kernel/time.c |
143 | +++ b/arch/avr32/kernel/time.c |
144 | @@ -98,7 +98,14 @@ static void comparator_mode(enum clock_event_mode mode, |
145 | case CLOCK_EVT_MODE_SHUTDOWN: |
146 | sysreg_write(COMPARE, 0); |
147 | pr_debug("%s: stop\n", evdev->name); |
148 | - cpu_idle_poll_ctrl(false); |
149 | + if (evdev->mode == CLOCK_EVT_MODE_ONESHOT || |
150 | + evdev->mode == CLOCK_EVT_MODE_RESUME) { |
151 | + /* |
152 | + * Only disable idle poll if we have forced that |
153 | + * in a previous call. |
154 | + */ |
155 | + cpu_idle_poll_ctrl(false); |
156 | + } |
157 | break; |
158 | default: |
159 | BUG(); |
160 | diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c |
161 | index b20ff17..ef9f409 100644 |
162 | --- a/arch/powerpc/kernel/iommu.c |
163 | +++ b/arch/powerpc/kernel/iommu.c |
164 | @@ -661,7 +661,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) |
165 | /* number of bytes needed for the bitmap */ |
166 | sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); |
167 | |
168 | - page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); |
169 | + page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz)); |
170 | if (!page) |
171 | panic("iommu_init_table: Can't allocate %ld bytes\n", sz); |
172 | tbl->it_map = page_address(page); |
173 | diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c |
174 | index 27a90b9..b4e6676 100644 |
175 | --- a/arch/powerpc/kernel/sysfs.c |
176 | +++ b/arch/powerpc/kernel/sysfs.c |
177 | @@ -17,6 +17,7 @@ |
178 | #include <asm/machdep.h> |
179 | #include <asm/smp.h> |
180 | #include <asm/pmc.h> |
181 | +#include <asm/firmware.h> |
182 | |
183 | #include "cacheinfo.h" |
184 | |
185 | @@ -179,15 +180,25 @@ SYSFS_PMCSETUP(spurr, SPRN_SPURR); |
186 | SYSFS_PMCSETUP(dscr, SPRN_DSCR); |
187 | SYSFS_PMCSETUP(pir, SPRN_PIR); |
188 | |
189 | +/* |
190 | + Lets only enable read for phyp resources and |
191 | + enable write when needed with a separate function. |
192 | + Lets be conservative and default to pseries. |
193 | +*/ |
194 | static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra); |
195 | static DEVICE_ATTR(spurr, 0400, show_spurr, NULL); |
196 | static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr); |
197 | -static DEVICE_ATTR(purr, 0600, show_purr, store_purr); |
198 | +static DEVICE_ATTR(purr, 0400, show_purr, store_purr); |
199 | static DEVICE_ATTR(pir, 0400, show_pir, NULL); |
200 | |
201 | unsigned long dscr_default = 0; |
202 | EXPORT_SYMBOL(dscr_default); |
203 | |
204 | +static void add_write_permission_dev_attr(struct device_attribute *attr) |
205 | +{ |
206 | + attr->attr.mode |= 0200; |
207 | +} |
208 | + |
209 | static ssize_t show_dscr_default(struct device *dev, |
210 | struct device_attribute *attr, char *buf) |
211 | { |
212 | @@ -394,8 +405,11 @@ static void register_cpu_online(unsigned int cpu) |
213 | if (cpu_has_feature(CPU_FTR_MMCRA)) |
214 | device_create_file(s, &dev_attr_mmcra); |
215 | |
216 | - if (cpu_has_feature(CPU_FTR_PURR)) |
217 | + if (cpu_has_feature(CPU_FTR_PURR)) { |
218 | + if (!firmware_has_feature(FW_FEATURE_LPAR)) |
219 | + add_write_permission_dev_attr(&dev_attr_purr); |
220 | device_create_file(s, &dev_attr_purr); |
221 | + } |
222 | |
223 | if (cpu_has_feature(CPU_FTR_SPURR)) |
224 | device_create_file(s, &dev_attr_spurr); |
225 | diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S |
226 | index 0554d1f..e3f8da3 100644 |
227 | --- a/arch/powerpc/kernel/tm.S |
228 | +++ b/arch/powerpc/kernel/tm.S |
229 | @@ -79,6 +79,11 @@ _GLOBAL(tm_abort) |
230 | TABORT(R3) |
231 | blr |
232 | |
233 | + .section ".toc","aw" |
234 | +DSCR_DEFAULT: |
235 | + .tc dscr_default[TC],dscr_default |
236 | + |
237 | + .section ".text" |
238 | |
239 | /* void tm_reclaim(struct thread_struct *thread, |
240 | * unsigned long orig_msr, |
241 | @@ -123,6 +128,7 @@ _GLOBAL(tm_reclaim) |
242 | mr r15, r14 |
243 | ori r15, r15, MSR_FP |
244 | li r16, MSR_RI |
245 | + ori r16, r16, MSR_EE /* IRQs hard off */ |
246 | andc r15, r15, r16 |
247 | oris r15, r15, MSR_VEC@h |
248 | #ifdef CONFIG_VSX |
249 | @@ -187,11 +193,18 @@ dont_backup_fp: |
250 | std r1, PACATMSCRATCH(r13) |
251 | ld r1, PACAR1(r13) |
252 | |
253 | + /* Store the PPR in r11 and reset to decent value */ |
254 | + std r11, GPR11(r1) /* Temporary stash */ |
255 | + mfspr r11, SPRN_PPR |
256 | + HMT_MEDIUM |
257 | + |
258 | /* Now get some more GPRS free */ |
259 | std r7, GPR7(r1) /* Temporary stash */ |
260 | std r12, GPR12(r1) /* '' '' '' */ |
261 | ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */ |
262 | |
263 | + std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */ |
264 | + |
265 | addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */ |
266 | |
267 | /* Make r7 look like an exception frame so that we |
268 | @@ -203,15 +216,19 @@ dont_backup_fp: |
269 | SAVE_GPR(0, r7) /* user r0 */ |
270 | SAVE_GPR(2, r7) /* user r2 */ |
271 | SAVE_4GPRS(3, r7) /* user r3-r6 */ |
272 | - SAVE_4GPRS(8, r7) /* user r8-r11 */ |
273 | + SAVE_GPR(8, r7) /* user r8 */ |
274 | + SAVE_GPR(9, r7) /* user r9 */ |
275 | + SAVE_GPR(10, r7) /* user r10 */ |
276 | ld r3, PACATMSCRATCH(r13) /* user r1 */ |
277 | ld r4, GPR7(r1) /* user r7 */ |
278 | - ld r5, GPR12(r1) /* user r12 */ |
279 | - GET_SCRATCH0(6) /* user r13 */ |
280 | + ld r5, GPR11(r1) /* user r11 */ |
281 | + ld r6, GPR12(r1) /* user r12 */ |
282 | + GET_SCRATCH0(8) /* user r13 */ |
283 | std r3, GPR1(r7) |
284 | std r4, GPR7(r7) |
285 | - std r5, GPR12(r7) |
286 | - std r6, GPR13(r7) |
287 | + std r5, GPR11(r7) |
288 | + std r6, GPR12(r7) |
289 | + std r8, GPR13(r7) |
290 | |
291 | SAVE_NVGPRS(r7) /* user r14-r31 */ |
292 | |
293 | @@ -234,14 +251,12 @@ dont_backup_fp: |
294 | std r6, _XER(r7) |
295 | |
296 | |
297 | - /* ******************** TAR, PPR, DSCR ********** */ |
298 | + /* ******************** TAR, DSCR ********** */ |
299 | mfspr r3, SPRN_TAR |
300 | - mfspr r4, SPRN_PPR |
301 | - mfspr r5, SPRN_DSCR |
302 | + mfspr r4, SPRN_DSCR |
303 | |
304 | std r3, THREAD_TM_TAR(r12) |
305 | - std r4, THREAD_TM_PPR(r12) |
306 | - std r5, THREAD_TM_DSCR(r12) |
307 | + std r4, THREAD_TM_DSCR(r12) |
308 | |
309 | /* MSR and flags: We don't change CRs, and we don't need to alter |
310 | * MSR. |
311 | @@ -258,7 +273,7 @@ dont_backup_fp: |
312 | std r3, THREAD_TM_TFHAR(r12) |
313 | std r4, THREAD_TM_TFIAR(r12) |
314 | |
315 | - /* AMR and PPR are checkpointed too, but are unsupported by Linux. */ |
316 | + /* AMR is checkpointed too, but is unsupported by Linux. */ |
317 | |
318 | /* Restore original MSR/IRQ state & clear TM mode */ |
319 | ld r14, TM_FRAME_L0(r1) /* Orig MSR */ |
320 | @@ -274,6 +289,12 @@ dont_backup_fp: |
321 | mtcr r4 |
322 | mtlr r0 |
323 | ld r2, 40(r1) |
324 | + |
325 | + /* Load system default DSCR */ |
326 | + ld r4, DSCR_DEFAULT@toc(r2) |
327 | + ld r0, 0(r4) |
328 | + mtspr SPRN_DSCR, r0 |
329 | + |
330 | blr |
331 | |
332 | |
333 | @@ -358,25 +379,24 @@ dont_restore_fp: |
334 | |
335 | restore_gprs: |
336 | |
337 | - /* ******************** TAR, PPR, DSCR ********** */ |
338 | - ld r4, THREAD_TM_TAR(r3) |
339 | - ld r5, THREAD_TM_PPR(r3) |
340 | - ld r6, THREAD_TM_DSCR(r3) |
341 | + /* ******************** CR,LR,CCR,MSR ********** */ |
342 | + ld r4, _CTR(r7) |
343 | + ld r5, _LINK(r7) |
344 | + ld r6, _CCR(r7) |
345 | + ld r8, _XER(r7) |
346 | |
347 | - mtspr SPRN_TAR, r4 |
348 | - mtspr SPRN_PPR, r5 |
349 | - mtspr SPRN_DSCR, r6 |
350 | + mtctr r4 |
351 | + mtlr r5 |
352 | + mtcr r6 |
353 | + mtxer r8 |
354 | |
355 | - /* ******************** CR,LR,CCR,MSR ********** */ |
356 | - ld r3, _CTR(r7) |
357 | - ld r4, _LINK(r7) |
358 | - ld r5, _CCR(r7) |
359 | - ld r6, _XER(r7) |
360 | + /* ******************** TAR ******************** */ |
361 | + ld r4, THREAD_TM_TAR(r3) |
362 | + mtspr SPRN_TAR, r4 |
363 | |
364 | - mtctr r3 |
365 | - mtlr r4 |
366 | - mtcr r5 |
367 | - mtxer r6 |
368 | + /* Load up the PPR and DSCR in GPRs only at this stage */ |
369 | + ld r5, THREAD_TM_DSCR(r3) |
370 | + ld r6, THREAD_TM_PPR(r3) |
371 | |
372 | /* Clear the MSR RI since we are about to change R1. EE is already off |
373 | */ |
374 | @@ -384,19 +404,26 @@ restore_gprs: |
375 | mtmsrd r4, 1 |
376 | |
377 | REST_4GPRS(0, r7) /* GPR0-3 */ |
378 | - REST_GPR(4, r7) /* GPR4-6 */ |
379 | - REST_GPR(5, r7) |
380 | - REST_GPR(6, r7) |
381 | + REST_GPR(4, r7) /* GPR4 */ |
382 | REST_4GPRS(8, r7) /* GPR8-11 */ |
383 | REST_2GPRS(12, r7) /* GPR12-13 */ |
384 | |
385 | REST_NVGPRS(r7) /* GPR14-31 */ |
386 | |
387 | - ld r7, GPR7(r7) /* GPR7 */ |
388 | + /* Load up PPR and DSCR here so we don't run with user values for long |
389 | + */ |
390 | + mtspr SPRN_DSCR, r5 |
391 | + mtspr SPRN_PPR, r6 |
392 | + |
393 | + REST_GPR(5, r7) /* GPR5-7 */ |
394 | + REST_GPR(6, r7) |
395 | + ld r7, GPR7(r7) |
396 | |
397 | /* Commit register state as checkpointed state: */ |
398 | TRECHKPT |
399 | |
400 | + HMT_MEDIUM |
401 | + |
402 | /* Our transactional state has now changed. |
403 | * |
404 | * Now just get out of here. Transactional (current) state will be |
405 | @@ -419,6 +446,12 @@ restore_gprs: |
406 | mtcr r4 |
407 | mtlr r0 |
408 | ld r2, 40(r1) |
409 | + |
410 | + /* Load system default DSCR */ |
411 | + ld r4, DSCR_DEFAULT@toc(r2) |
412 | + ld r0, 0(r4) |
413 | + mtspr SPRN_DSCR, r0 |
414 | + |
415 | blr |
416 | |
417 | /* ****************************************************************** */ |
418 | diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c |
419 | index 536016d..2d845d8 100644 |
420 | --- a/arch/powerpc/kernel/vio.c |
421 | +++ b/arch/powerpc/kernel/vio.c |
422 | @@ -1529,11 +1529,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
423 | const char *cp; |
424 | |
425 | dn = dev->of_node; |
426 | - if (!dn) |
427 | - return -ENODEV; |
428 | + if (!dn) { |
429 | + strcat(buf, "\n"); |
430 | + return strlen(buf); |
431 | + } |
432 | cp = of_get_property(dn, "compatible", NULL); |
433 | - if (!cp) |
434 | - return -ENODEV; |
435 | + if (!cp) { |
436 | + strcat(buf, "\n"); |
437 | + return strlen(buf); |
438 | + } |
439 | |
440 | return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); |
441 | } |
442 | diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S |
443 | index 167f725..57a0720 100644 |
444 | --- a/arch/powerpc/lib/checksum_64.S |
445 | +++ b/arch/powerpc/lib/checksum_64.S |
446 | @@ -226,19 +226,35 @@ _GLOBAL(csum_partial) |
447 | blr |
448 | |
449 | |
450 | - .macro source |
451 | + .macro srcnr |
452 | 100: |
453 | .section __ex_table,"a" |
454 | .align 3 |
455 | - .llong 100b,.Lsrc_error |
456 | + .llong 100b,.Lsrc_error_nr |
457 | .previous |
458 | .endm |
459 | |
460 | - .macro dest |
461 | + .macro source |
462 | +150: |
463 | + .section __ex_table,"a" |
464 | + .align 3 |
465 | + .llong 150b,.Lsrc_error |
466 | + .previous |
467 | + .endm |
468 | + |
469 | + .macro dstnr |
470 | 200: |
471 | .section __ex_table,"a" |
472 | .align 3 |
473 | - .llong 200b,.Ldest_error |
474 | + .llong 200b,.Ldest_error_nr |
475 | + .previous |
476 | + .endm |
477 | + |
478 | + .macro dest |
479 | +250: |
480 | + .section __ex_table,"a" |
481 | + .align 3 |
482 | + .llong 250b,.Ldest_error |
483 | .previous |
484 | .endm |
485 | |
486 | @@ -269,16 +285,16 @@ _GLOBAL(csum_partial_copy_generic) |
487 | rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */ |
488 | beq .Lcopy_aligned |
489 | |
490 | - li r7,4 |
491 | - sub r6,r7,r6 |
492 | + li r9,4 |
493 | + sub r6,r9,r6 |
494 | mtctr r6 |
495 | |
496 | 1: |
497 | -source; lhz r6,0(r3) /* align to doubleword */ |
498 | +srcnr; lhz r6,0(r3) /* align to doubleword */ |
499 | subi r5,r5,2 |
500 | addi r3,r3,2 |
501 | adde r0,r0,r6 |
502 | -dest; sth r6,0(r4) |
503 | +dstnr; sth r6,0(r4) |
504 | addi r4,r4,2 |
505 | bdnz 1b |
506 | |
507 | @@ -392,10 +408,10 @@ dest; std r16,56(r4) |
508 | |
509 | mtctr r6 |
510 | 3: |
511 | -source; ld r6,0(r3) |
512 | +srcnr; ld r6,0(r3) |
513 | addi r3,r3,8 |
514 | adde r0,r0,r6 |
515 | -dest; std r6,0(r4) |
516 | +dstnr; std r6,0(r4) |
517 | addi r4,r4,8 |
518 | bdnz 3b |
519 | |
520 | @@ -405,10 +421,10 @@ dest; std r6,0(r4) |
521 | srdi. r6,r5,2 |
522 | beq .Lcopy_tail_halfword |
523 | |
524 | -source; lwz r6,0(r3) |
525 | +srcnr; lwz r6,0(r3) |
526 | addi r3,r3,4 |
527 | adde r0,r0,r6 |
528 | -dest; stw r6,0(r4) |
529 | +dstnr; stw r6,0(r4) |
530 | addi r4,r4,4 |
531 | subi r5,r5,4 |
532 | |
533 | @@ -416,10 +432,10 @@ dest; stw r6,0(r4) |
534 | srdi. r6,r5,1 |
535 | beq .Lcopy_tail_byte |
536 | |
537 | -source; lhz r6,0(r3) |
538 | +srcnr; lhz r6,0(r3) |
539 | addi r3,r3,2 |
540 | adde r0,r0,r6 |
541 | -dest; sth r6,0(r4) |
542 | +dstnr; sth r6,0(r4) |
543 | addi r4,r4,2 |
544 | subi r5,r5,2 |
545 | |
546 | @@ -427,10 +443,10 @@ dest; sth r6,0(r4) |
547 | andi. r6,r5,1 |
548 | beq .Lcopy_finish |
549 | |
550 | -source; lbz r6,0(r3) |
551 | +srcnr; lbz r6,0(r3) |
552 | sldi r9,r6,8 /* Pad the byte out to 16 bits */ |
553 | adde r0,r0,r9 |
554 | -dest; stb r6,0(r4) |
555 | +dstnr; stb r6,0(r4) |
556 | |
557 | .Lcopy_finish: |
558 | addze r0,r0 /* add in final carry */ |
559 | @@ -440,6 +456,11 @@ dest; stb r6,0(r4) |
560 | blr |
561 | |
562 | .Lsrc_error: |
563 | + ld r14,STK_REG(R14)(r1) |
564 | + ld r15,STK_REG(R15)(r1) |
565 | + ld r16,STK_REG(R16)(r1) |
566 | + addi r1,r1,STACKFRAMESIZE |
567 | +.Lsrc_error_nr: |
568 | cmpdi 0,r7,0 |
569 | beqlr |
570 | li r6,-EFAULT |
571 | @@ -447,6 +468,11 @@ dest; stb r6,0(r4) |
572 | blr |
573 | |
574 | .Ldest_error: |
575 | + ld r14,STK_REG(R14)(r1) |
576 | + ld r15,STK_REG(R15)(r1) |
577 | + ld r16,STK_REG(R16)(r1) |
578 | + addi r1,r1,STACKFRAMESIZE |
579 | +.Ldest_error_nr: |
580 | cmpdi 0,r8,0 |
581 | beqlr |
582 | li r6,-EFAULT |
583 | diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c |
584 | index d0cd9e4..8ed035d 100644 |
585 | --- a/arch/powerpc/mm/init_64.c |
586 | +++ b/arch/powerpc/mm/init_64.c |
587 | @@ -300,5 +300,9 @@ void vmemmap_free(unsigned long start, unsigned long end) |
588 | { |
589 | } |
590 | |
591 | +void register_page_bootmem_memmap(unsigned long section_nr, |
592 | + struct page *start_page, unsigned long size) |
593 | +{ |
594 | +} |
595 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
596 | |
597 | diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c |
598 | index 7f4bea1..25b051b 100644 |
599 | --- a/arch/powerpc/mm/mem.c |
600 | +++ b/arch/powerpc/mm/mem.c |
601 | @@ -297,12 +297,21 @@ void __init paging_init(void) |
602 | } |
603 | #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ |
604 | |
605 | +static void __init register_page_bootmem_info(void) |
606 | +{ |
607 | + int i; |
608 | + |
609 | + for_each_online_node(i) |
610 | + register_page_bootmem_info_node(NODE_DATA(i)); |
611 | +} |
612 | + |
613 | void __init mem_init(void) |
614 | { |
615 | #ifdef CONFIG_SWIOTLB |
616 | swiotlb_init(0); |
617 | #endif |
618 | |
619 | + register_page_bootmem_info(); |
620 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
621 | set_max_mapnr(max_pfn); |
622 | free_all_bootmem(); |
623 | diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c |
624 | index 2ee4a70..a3f7abd 100644 |
625 | --- a/arch/powerpc/perf/power8-pmu.c |
626 | +++ b/arch/powerpc/perf/power8-pmu.c |
627 | @@ -199,6 +199,7 @@ |
628 | #define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1))) |
629 | #define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1)) |
630 | #define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8) |
631 | +#define MMCR1_FAB_SHIFT 36 |
632 | #define MMCR1_DC_QUAL_SHIFT 47 |
633 | #define MMCR1_IC_QUAL_SHIFT 46 |
634 | |
635 | @@ -388,8 +389,8 @@ static int power8_compute_mmcr(u64 event[], int n_ev, |
636 | * the threshold bits are used for the match value. |
637 | */ |
638 | if (event_is_fab_match(event[i])) { |
639 | - mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) & |
640 | - EVENT_THR_CTL_MASK; |
641 | + mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) & |
642 | + EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT; |
643 | } else { |
644 | val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; |
645 | mmcra |= val << MMCRA_THR_CTL_SHIFT; |
646 | diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S |
647 | index be7a408..0cdf499 100644 |
648 | --- a/arch/s390/kernel/entry.S |
649 | +++ b/arch/s390/kernel/entry.S |
650 | @@ -265,6 +265,7 @@ sysc_sigpending: |
651 | tm __TI_flags+3(%r12),_TIF_SYSCALL |
652 | jno sysc_return |
653 | lm %r2,%r7,__PT_R2(%r11) # load svc arguments |
654 | + l %r10,__TI_sysc_table(%r12) # 31 bit system call table |
655 | xr %r8,%r8 # svc 0 returns -ENOSYS |
656 | clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) |
657 | jnl sysc_nr_ok # invalid svc number -> do svc 0 |
658 | diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S |
659 | index 1c039d0..4bdb420 100644 |
660 | --- a/arch/s390/kernel/entry64.S |
661 | +++ b/arch/s390/kernel/entry64.S |
662 | @@ -296,6 +296,7 @@ sysc_sigpending: |
663 | tm __TI_flags+7(%r12),_TIF_SYSCALL |
664 | jno sysc_return |
665 | lmg %r2,%r7,__PT_R2(%r11) # load svc arguments |
666 | + lg %r10,__TI_sysc_table(%r12) # address of system call table |
667 | lghi %r8,0 # svc 0 returns -ENOSYS |
668 | llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number |
669 | cghi %r1,NR_syscalls |
670 | diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c |
671 | index 62d6b15..dff60ab 100644 |
672 | --- a/arch/sparc/kernel/ds.c |
673 | +++ b/arch/sparc/kernel/ds.c |
674 | @@ -849,9 +849,8 @@ void ldom_reboot(const char *boot_command) |
675 | if (boot_command && strlen(boot_command)) { |
676 | unsigned long len; |
677 | |
678 | - strcpy(full_boot_str, "boot "); |
679 | - strlcpy(full_boot_str + strlen("boot "), boot_command, |
680 | - sizeof(full_boot_str + strlen("boot "))); |
681 | + snprintf(full_boot_str, sizeof(full_boot_str), "boot %s", |
682 | + boot_command); |
683 | len = strlen(full_boot_str); |
684 | |
685 | if (reboot_data_supported) { |
686 | diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S |
687 | index e2a0300..33c02b1 100644 |
688 | --- a/arch/sparc/kernel/entry.S |
689 | +++ b/arch/sparc/kernel/entry.S |
690 | @@ -839,7 +839,7 @@ sys_sigreturn: |
691 | nop |
692 | |
693 | call syscall_trace |
694 | - nop |
695 | + mov 1, %o1 |
696 | |
697 | 1: |
698 | /* We don't want to muck with user registers like a |
699 | diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S |
700 | index 0746e5e..fde5a41 100644 |
701 | --- a/arch/sparc/kernel/ktlb.S |
702 | +++ b/arch/sparc/kernel/ktlb.S |
703 | @@ -25,11 +25,10 @@ kvmap_itlb: |
704 | */ |
705 | kvmap_itlb_4v: |
706 | |
707 | -kvmap_itlb_nonlinear: |
708 | /* Catch kernel NULL pointer calls. */ |
709 | sethi %hi(PAGE_SIZE), %g5 |
710 | cmp %g4, %g5 |
711 | - bleu,pn %xcc, kvmap_dtlb_longpath |
712 | + blu,pn %xcc, kvmap_itlb_longpath |
713 | nop |
714 | |
715 | KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load) |
716 | diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S |
717 | index 22a1098..73ec8a7 100644 |
718 | --- a/arch/sparc/kernel/syscalls.S |
719 | +++ b/arch/sparc/kernel/syscalls.S |
720 | @@ -152,7 +152,7 @@ linux_syscall_trace32: |
721 | srl %i4, 0, %o4 |
722 | srl %i1, 0, %o1 |
723 | srl %i2, 0, %o2 |
724 | - ba,pt %xcc, 2f |
725 | + ba,pt %xcc, 5f |
726 | srl %i3, 0, %o3 |
727 | |
728 | linux_syscall_trace: |
729 | @@ -182,13 +182,13 @@ linux_sparc_syscall32: |
730 | srl %i1, 0, %o1 ! IEU0 Group |
731 | ldx [%g6 + TI_FLAGS], %l0 ! Load |
732 | |
733 | - srl %i5, 0, %o5 ! IEU1 |
734 | + srl %i3, 0, %o3 ! IEU0 |
735 | srl %i2, 0, %o2 ! IEU0 Group |
736 | andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 |
737 | bne,pn %icc, linux_syscall_trace32 ! CTI |
738 | mov %i0, %l5 ! IEU1 |
739 | - call %l7 ! CTI Group brk forced |
740 | - srl %i3, 0, %o3 ! IEU0 |
741 | +5: call %l7 ! CTI Group brk forced |
742 | + srl %i5, 0, %o5 ! IEU1 |
743 | ba,a,pt %xcc, 3f |
744 | |
745 | /* Linux native system calls enter here... */ |
746 | diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S |
747 | index e0b1e13..ad4bde3 100644 |
748 | --- a/arch/sparc/kernel/trampoline_64.S |
749 | +++ b/arch/sparc/kernel/trampoline_64.S |
750 | @@ -129,7 +129,6 @@ startup_continue: |
751 | clr %l5 |
752 | sethi %hi(num_kernel_image_mappings), %l6 |
753 | lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 |
754 | - add %l6, 1, %l6 |
755 | |
756 | mov 15, %l7 |
757 | BRANCH_IF_ANY_CHEETAH(g1,g5,2f) |
758 | @@ -222,7 +221,6 @@ niagara_lock_tlb: |
759 | clr %l5 |
760 | sethi %hi(num_kernel_image_mappings), %l6 |
761 | lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 |
762 | - add %l6, 1, %l6 |
763 | |
764 | 1: |
765 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 |
766 | diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c |
767 | index 0c4e35e..323335b 100644 |
768 | --- a/arch/sparc/lib/ksyms.c |
769 | +++ b/arch/sparc/lib/ksyms.c |
770 | @@ -98,15 +98,6 @@ EXPORT_SYMBOL(___copy_from_user); |
771 | EXPORT_SYMBOL(___copy_in_user); |
772 | EXPORT_SYMBOL(__clear_user); |
773 | |
774 | -/* RW semaphores */ |
775 | -EXPORT_SYMBOL(__down_read); |
776 | -EXPORT_SYMBOL(__down_read_trylock); |
777 | -EXPORT_SYMBOL(__down_write); |
778 | -EXPORT_SYMBOL(__down_write_trylock); |
779 | -EXPORT_SYMBOL(__up_read); |
780 | -EXPORT_SYMBOL(__up_write); |
781 | -EXPORT_SYMBOL(__downgrade_write); |
782 | - |
783 | /* Atomic counter implementation. */ |
784 | EXPORT_SYMBOL(atomic_add); |
785 | EXPORT_SYMBOL(atomic_add_ret); |
786 | diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h |
787 | index 63294f5..4f7ae39 100644 |
788 | --- a/arch/tile/include/asm/percpu.h |
789 | +++ b/arch/tile/include/asm/percpu.h |
790 | @@ -15,9 +15,37 @@ |
791 | #ifndef _ASM_TILE_PERCPU_H |
792 | #define _ASM_TILE_PERCPU_H |
793 | |
794 | -register unsigned long __my_cpu_offset __asm__("tp"); |
795 | -#define __my_cpu_offset __my_cpu_offset |
796 | -#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) |
797 | +register unsigned long my_cpu_offset_reg asm("tp"); |
798 | + |
799 | +#ifdef CONFIG_PREEMPT |
800 | +/* |
801 | + * For full preemption, we can't just use the register variable |
802 | + * directly, since we need barrier() to hazard against it, causing the |
803 | + * compiler to reload anything computed from a previous "tp" value. |
804 | + * But we also don't want to use volatile asm, since we'd like the |
805 | + * compiler to be able to cache the value across multiple percpu reads. |
806 | + * So we use a fake stack read as a hazard against barrier(). |
807 | + * The 'U' constraint is like 'm' but disallows postincrement. |
808 | + */ |
809 | +static inline unsigned long __my_cpu_offset(void) |
810 | +{ |
811 | + unsigned long tp; |
812 | + register unsigned long *sp asm("sp"); |
813 | + asm("move %0, tp" : "=r" (tp) : "U" (*sp)); |
814 | + return tp; |
815 | +} |
816 | +#define __my_cpu_offset __my_cpu_offset() |
817 | +#else |
818 | +/* |
819 | + * We don't need to hazard against barrier() since "tp" doesn't ever |
820 | + * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only |
821 | + * changes at function call points, at which we are already re-reading |
822 | + * the value of "tp" due to "my_cpu_offset_reg" being a global variable. |
823 | + */ |
824 | +#define __my_cpu_offset my_cpu_offset_reg |
825 | +#endif |
826 | + |
827 | +#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp)) |
828 | |
829 | #include <asm-generic/percpu.h> |
830 | |
831 | diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c |
832 | index f40acef..a6977e1 100644 |
833 | --- a/drivers/acpi/acpi_ipmi.c |
834 | +++ b/drivers/acpi/acpi_ipmi.c |
835 | @@ -39,6 +39,7 @@ |
836 | #include <linux/ipmi.h> |
837 | #include <linux/device.h> |
838 | #include <linux/pnp.h> |
839 | +#include <linux/spinlock.h> |
840 | |
841 | MODULE_AUTHOR("Zhao Yakui"); |
842 | MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); |
843 | @@ -57,7 +58,7 @@ struct acpi_ipmi_device { |
844 | struct list_head head; |
845 | /* the IPMI request message list */ |
846 | struct list_head tx_msg_list; |
847 | - struct mutex tx_msg_lock; |
848 | + spinlock_t tx_msg_lock; |
849 | acpi_handle handle; |
850 | struct pnp_dev *pnp_dev; |
851 | ipmi_user_t user_interface; |
852 | @@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, |
853 | struct kernel_ipmi_msg *msg; |
854 | struct acpi_ipmi_buffer *buffer; |
855 | struct acpi_ipmi_device *device; |
856 | + unsigned long flags; |
857 | |
858 | msg = &tx_msg->tx_message; |
859 | /* |
860 | @@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, |
861 | |
862 | /* Get the msgid */ |
863 | device = tx_msg->device; |
864 | - mutex_lock(&device->tx_msg_lock); |
865 | + spin_lock_irqsave(&device->tx_msg_lock, flags); |
866 | device->curr_msgid++; |
867 | tx_msg->tx_msgid = device->curr_msgid; |
868 | - mutex_unlock(&device->tx_msg_lock); |
869 | + spin_unlock_irqrestore(&device->tx_msg_lock, flags); |
870 | } |
871 | |
872 | static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, |
873 | @@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) |
874 | int msg_found = 0; |
875 | struct acpi_ipmi_msg *tx_msg; |
876 | struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; |
877 | + unsigned long flags; |
878 | |
879 | if (msg->user != ipmi_device->user_interface) { |
880 | dev_warn(&pnp_dev->dev, "Unexpected response is returned. " |
881 | @@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) |
882 | ipmi_free_recv_msg(msg); |
883 | return; |
884 | } |
885 | - mutex_lock(&ipmi_device->tx_msg_lock); |
886 | + spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); |
887 | list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { |
888 | if (msg->msgid == tx_msg->tx_msgid) { |
889 | msg_found = 1; |
890 | @@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) |
891 | } |
892 | } |
893 | |
894 | - mutex_unlock(&ipmi_device->tx_msg_lock); |
895 | + spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); |
896 | if (!msg_found) { |
897 | dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " |
898 | "returned.\n", msg->msgid); |
899 | @@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, |
900 | struct acpi_ipmi_device *ipmi_device = handler_context; |
901 | int err, rem_time; |
902 | acpi_status status; |
903 | + unsigned long flags; |
904 | /* |
905 | * IPMI opregion message. |
906 | * IPMI message is firstly written to the BMC and system software |
907 | @@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, |
908 | return AE_NO_MEMORY; |
909 | |
910 | acpi_format_ipmi_msg(tx_msg, address, value); |
911 | - mutex_lock(&ipmi_device->tx_msg_lock); |
912 | + spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); |
913 | list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); |
914 | - mutex_unlock(&ipmi_device->tx_msg_lock); |
915 | + spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); |
916 | err = ipmi_request_settime(ipmi_device->user_interface, |
917 | &tx_msg->addr, |
918 | tx_msg->tx_msgid, |
919 | @@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, |
920 | status = AE_OK; |
921 | |
922 | end_label: |
923 | - mutex_lock(&ipmi_device->tx_msg_lock); |
924 | + spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); |
925 | list_del(&tx_msg->head); |
926 | - mutex_unlock(&ipmi_device->tx_msg_lock); |
927 | + spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); |
928 | kfree(tx_msg); |
929 | return status; |
930 | } |
931 | @@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device) |
932 | |
933 | INIT_LIST_HEAD(&ipmi_device->head); |
934 | |
935 | - mutex_init(&ipmi_device->tx_msg_lock); |
936 | + spin_lock_init(&ipmi_device->tx_msg_lock); |
937 | INIT_LIST_HEAD(&ipmi_device->tx_msg_list); |
938 | ipmi_install_space_handler(ipmi_device); |
939 | |
940 | diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c |
941 | index 62b6c2c..90a4e6b 100644 |
942 | --- a/drivers/block/cciss.c |
943 | +++ b/drivers/block/cciss.c |
944 | @@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, |
945 | int err; |
946 | u32 cp; |
947 | |
948 | + memset(&arg64, 0, sizeof(arg64)); |
949 | err = 0; |
950 | err |= |
951 | copy_from_user(&arg64.LUN_info, &arg32->LUN_info, |
952 | diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c |
953 | index 639d26b..2b94403 100644 |
954 | --- a/drivers/block/cpqarray.c |
955 | +++ b/drivers/block/cpqarray.c |
956 | @@ -1193,6 +1193,7 @@ out_passthru: |
957 | ida_pci_info_struct pciinfo; |
958 | |
959 | if (!arg) return -EINVAL; |
960 | + memset(&pciinfo, 0, sizeof(pciinfo)); |
961 | pciinfo.bus = host->pci_dev->bus->number; |
962 | pciinfo.dev_fn = host->pci_dev->devfn; |
963 | pciinfo.board_id = host->board_id; |
964 | diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c |
965 | index a12b923..0a327f4 100644 |
966 | --- a/drivers/bluetooth/ath3k.c |
967 | +++ b/drivers/bluetooth/ath3k.c |
968 | @@ -85,6 +85,7 @@ static struct usb_device_id ath3k_table[] = { |
969 | { USB_DEVICE(0x04CA, 0x3008) }, |
970 | { USB_DEVICE(0x13d3, 0x3362) }, |
971 | { USB_DEVICE(0x0CF3, 0xE004) }, |
972 | + { USB_DEVICE(0x0CF3, 0xE005) }, |
973 | { USB_DEVICE(0x0930, 0x0219) }, |
974 | { USB_DEVICE(0x0489, 0xe057) }, |
975 | { USB_DEVICE(0x13d3, 0x3393) }, |
976 | @@ -126,6 +127,7 @@ static struct usb_device_id ath3k_blist_tbl[] = { |
977 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, |
978 | { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, |
979 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, |
980 | + { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, |
981 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, |
982 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, |
983 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, |
984 | diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c |
985 | index 8e16f0a..3221a55 100644 |
986 | --- a/drivers/bluetooth/btusb.c |
987 | +++ b/drivers/bluetooth/btusb.c |
988 | @@ -102,6 +102,7 @@ static struct usb_device_id btusb_table[] = { |
989 | |
990 | /* Broadcom BCM20702A0 */ |
991 | { USB_DEVICE(0x0b05, 0x17b5) }, |
992 | + { USB_DEVICE(0x0b05, 0x17cb) }, |
993 | { USB_DEVICE(0x04ca, 0x2003) }, |
994 | { USB_DEVICE(0x0489, 0xe042) }, |
995 | { USB_DEVICE(0x413c, 0x8197) }, |
996 | @@ -148,6 +149,7 @@ static struct usb_device_id blacklist_table[] = { |
997 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, |
998 | { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, |
999 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, |
1000 | + { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, |
1001 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, |
1002 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, |
1003 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, |
1004 | diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c |
1005 | index ff2aab9..25787d06 100644 |
1006 | --- a/drivers/dma/imx-dma.c |
1007 | +++ b/drivers/dma/imx-dma.c |
1008 | @@ -437,17 +437,18 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) |
1009 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
1010 | int chno = imxdmac->channel; |
1011 | struct imxdma_desc *desc; |
1012 | + unsigned long flags; |
1013 | |
1014 | - spin_lock(&imxdma->lock); |
1015 | + spin_lock_irqsave(&imxdma->lock, flags); |
1016 | if (list_empty(&imxdmac->ld_active)) { |
1017 | - spin_unlock(&imxdma->lock); |
1018 | + spin_unlock_irqrestore(&imxdma->lock, flags); |
1019 | goto out; |
1020 | } |
1021 | |
1022 | desc = list_first_entry(&imxdmac->ld_active, |
1023 | struct imxdma_desc, |
1024 | node); |
1025 | - spin_unlock(&imxdma->lock); |
1026 | + spin_unlock_irqrestore(&imxdma->lock, flags); |
1027 | |
1028 | if (desc->sg) { |
1029 | u32 tmp; |
1030 | @@ -519,7 +520,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) |
1031 | { |
1032 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
1033 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
1034 | - unsigned long flags; |
1035 | int slot = -1; |
1036 | int i; |
1037 | |
1038 | @@ -527,7 +527,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) |
1039 | switch (d->type) { |
1040 | case IMXDMA_DESC_INTERLEAVED: |
1041 | /* Try to get a free 2D slot */ |
1042 | - spin_lock_irqsave(&imxdma->lock, flags); |
1043 | for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { |
1044 | if ((imxdma->slots_2d[i].count > 0) && |
1045 | ((imxdma->slots_2d[i].xsr != d->x) || |
1046 | @@ -537,10 +536,8 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) |
1047 | slot = i; |
1048 | break; |
1049 | } |
1050 | - if (slot < 0) { |
1051 | - spin_unlock_irqrestore(&imxdma->lock, flags); |
1052 | + if (slot < 0) |
1053 | return -EBUSY; |
1054 | - } |
1055 | |
1056 | imxdma->slots_2d[slot].xsr = d->x; |
1057 | imxdma->slots_2d[slot].ysr = d->y; |
1058 | @@ -549,7 +546,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) |
1059 | |
1060 | imxdmac->slot_2d = slot; |
1061 | imxdmac->enabled_2d = true; |
1062 | - spin_unlock_irqrestore(&imxdma->lock, flags); |
1063 | |
1064 | if (slot == IMX_DMA_2D_SLOT_A) { |
1065 | d->config_mem &= ~CCR_MSEL_B; |
1066 | @@ -625,18 +621,17 @@ static void imxdma_tasklet(unsigned long data) |
1067 | struct imxdma_channel *imxdmac = (void *)data; |
1068 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
1069 | struct imxdma_desc *desc; |
1070 | + unsigned long flags; |
1071 | |
1072 | - spin_lock(&imxdma->lock); |
1073 | + spin_lock_irqsave(&imxdma->lock, flags); |
1074 | |
1075 | if (list_empty(&imxdmac->ld_active)) { |
1076 | /* Someone might have called terminate all */ |
1077 | - goto out; |
1078 | + spin_unlock_irqrestore(&imxdma->lock, flags); |
1079 | + return; |
1080 | } |
1081 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); |
1082 | |
1083 | - if (desc->desc.callback) |
1084 | - desc->desc.callback(desc->desc.callback_param); |
1085 | - |
1086 | /* If we are dealing with a cyclic descriptor, keep it on ld_active |
1087 | * and dont mark the descriptor as complete. |
1088 | * Only in non-cyclic cases it would be marked as complete |
1089 | @@ -663,7 +658,11 @@ static void imxdma_tasklet(unsigned long data) |
1090 | __func__, imxdmac->channel); |
1091 | } |
1092 | out: |
1093 | - spin_unlock(&imxdma->lock); |
1094 | + spin_unlock_irqrestore(&imxdma->lock, flags); |
1095 | + |
1096 | + if (desc->desc.callback) |
1097 | + desc->desc.callback(desc->desc.callback_param); |
1098 | + |
1099 | } |
1100 | |
1101 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
1102 | @@ -885,7 +884,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( |
1103 | kfree(imxdmac->sg_list); |
1104 | |
1105 | imxdmac->sg_list = kcalloc(periods + 1, |
1106 | - sizeof(struct scatterlist), GFP_KERNEL); |
1107 | + sizeof(struct scatterlist), GFP_ATOMIC); |
1108 | if (!imxdmac->sg_list) |
1109 | return NULL; |
1110 | |
1111 | diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c |
1112 | index dfeb3a3..6c235c5 100644 |
1113 | --- a/drivers/gpio/gpio-omap.c |
1114 | +++ b/drivers/gpio/gpio-omap.c |
1115 | @@ -63,6 +63,7 @@ struct gpio_bank { |
1116 | struct gpio_chip chip; |
1117 | struct clk *dbck; |
1118 | u32 mod_usage; |
1119 | + u32 irq_usage; |
1120 | u32 dbck_enable_mask; |
1121 | bool dbck_enabled; |
1122 | struct device *dev; |
1123 | @@ -86,6 +87,9 @@ struct gpio_bank { |
1124 | #define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio)) |
1125 | #define GPIO_MOD_CTRL_BIT BIT(0) |
1126 | |
1127 | +#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) |
1128 | +#define LINE_USED(line, offset) (line & (1 << offset)) |
1129 | + |
1130 | static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) |
1131 | { |
1132 | return bank->chip.base + gpio_irq; |
1133 | @@ -420,15 +424,69 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, |
1134 | return 0; |
1135 | } |
1136 | |
1137 | +static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset) |
1138 | +{ |
1139 | + if (bank->regs->pinctrl) { |
1140 | + void __iomem *reg = bank->base + bank->regs->pinctrl; |
1141 | + |
1142 | + /* Claim the pin for MPU */ |
1143 | + __raw_writel(__raw_readl(reg) | (1 << offset), reg); |
1144 | + } |
1145 | + |
1146 | + if (bank->regs->ctrl && !BANK_USED(bank)) { |
1147 | + void __iomem *reg = bank->base + bank->regs->ctrl; |
1148 | + u32 ctrl; |
1149 | + |
1150 | + ctrl = __raw_readl(reg); |
1151 | + /* Module is enabled, clocks are not gated */ |
1152 | + ctrl &= ~GPIO_MOD_CTRL_BIT; |
1153 | + __raw_writel(ctrl, reg); |
1154 | + bank->context.ctrl = ctrl; |
1155 | + } |
1156 | +} |
1157 | + |
1158 | +static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset) |
1159 | +{ |
1160 | + void __iomem *base = bank->base; |
1161 | + |
1162 | + if (bank->regs->wkup_en && |
1163 | + !LINE_USED(bank->mod_usage, offset) && |
1164 | + !LINE_USED(bank->irq_usage, offset)) { |
1165 | + /* Disable wake-up during idle for dynamic tick */ |
1166 | + _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0); |
1167 | + bank->context.wake_en = |
1168 | + __raw_readl(bank->base + bank->regs->wkup_en); |
1169 | + } |
1170 | + |
1171 | + if (bank->regs->ctrl && !BANK_USED(bank)) { |
1172 | + void __iomem *reg = bank->base + bank->regs->ctrl; |
1173 | + u32 ctrl; |
1174 | + |
1175 | + ctrl = __raw_readl(reg); |
1176 | + /* Module is disabled, clocks are gated */ |
1177 | + ctrl |= GPIO_MOD_CTRL_BIT; |
1178 | + __raw_writel(ctrl, reg); |
1179 | + bank->context.ctrl = ctrl; |
1180 | + } |
1181 | +} |
1182 | + |
1183 | +static int gpio_is_input(struct gpio_bank *bank, int mask) |
1184 | +{ |
1185 | + void __iomem *reg = bank->base + bank->regs->direction; |
1186 | + |
1187 | + return __raw_readl(reg) & mask; |
1188 | +} |
1189 | + |
1190 | static int gpio_irq_type(struct irq_data *d, unsigned type) |
1191 | { |
1192 | struct gpio_bank *bank = irq_data_get_irq_chip_data(d); |
1193 | unsigned gpio = 0; |
1194 | int retval; |
1195 | unsigned long flags; |
1196 | + unsigned offset; |
1197 | |
1198 | - if (WARN_ON(!bank->mod_usage)) |
1199 | - return -EINVAL; |
1200 | + if (!BANK_USED(bank)) |
1201 | + pm_runtime_get_sync(bank->dev); |
1202 | |
1203 | #ifdef CONFIG_ARCH_OMAP1 |
1204 | if (d->irq > IH_MPUIO_BASE) |
1205 | @@ -446,7 +504,17 @@ static int gpio_irq_type(struct irq_data *d, unsigned type) |
1206 | return -EINVAL; |
1207 | |
1208 | spin_lock_irqsave(&bank->lock, flags); |
1209 | - retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type); |
1210 | + offset = GPIO_INDEX(bank, gpio); |
1211 | + retval = _set_gpio_triggering(bank, offset, type); |
1212 | + if (!LINE_USED(bank->mod_usage, offset)) { |
1213 | + _enable_gpio_module(bank, offset); |
1214 | + _set_gpio_direction(bank, offset, 1); |
1215 | + } else if (!gpio_is_input(bank, 1 << offset)) { |
1216 | + spin_unlock_irqrestore(&bank->lock, flags); |
1217 | + return -EINVAL; |
1218 | + } |
1219 | + |
1220 | + bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio); |
1221 | spin_unlock_irqrestore(&bank->lock, flags); |
1222 | |
1223 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
1224 | @@ -603,35 +671,19 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) |
1225 | * If this is the first gpio_request for the bank, |
1226 | * enable the bank module. |
1227 | */ |
1228 | - if (!bank->mod_usage) |
1229 | + if (!BANK_USED(bank)) |
1230 | pm_runtime_get_sync(bank->dev); |
1231 | |
1232 | spin_lock_irqsave(&bank->lock, flags); |
1233 | /* Set trigger to none. You need to enable the desired trigger with |
1234 | - * request_irq() or set_irq_type(). |
1235 | + * request_irq() or set_irq_type(). Only do this if the IRQ line has |
1236 | + * not already been requested. |
1237 | */ |
1238 | - _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); |
1239 | - |
1240 | - if (bank->regs->pinctrl) { |
1241 | - void __iomem *reg = bank->base + bank->regs->pinctrl; |
1242 | - |
1243 | - /* Claim the pin for MPU */ |
1244 | - __raw_writel(__raw_readl(reg) | (1 << offset), reg); |
1245 | - } |
1246 | - |
1247 | - if (bank->regs->ctrl && !bank->mod_usage) { |
1248 | - void __iomem *reg = bank->base + bank->regs->ctrl; |
1249 | - u32 ctrl; |
1250 | - |
1251 | - ctrl = __raw_readl(reg); |
1252 | - /* Module is enabled, clocks are not gated */ |
1253 | - ctrl &= ~GPIO_MOD_CTRL_BIT; |
1254 | - __raw_writel(ctrl, reg); |
1255 | - bank->context.ctrl = ctrl; |
1256 | + if (!LINE_USED(bank->irq_usage, offset)) { |
1257 | + _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); |
1258 | + _enable_gpio_module(bank, offset); |
1259 | } |
1260 | - |
1261 | bank->mod_usage |= 1 << offset; |
1262 | - |
1263 | spin_unlock_irqrestore(&bank->lock, flags); |
1264 | |
1265 | return 0; |
1266 | @@ -640,31 +692,11 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) |
1267 | static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) |
1268 | { |
1269 | struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); |
1270 | - void __iomem *base = bank->base; |
1271 | unsigned long flags; |
1272 | |
1273 | spin_lock_irqsave(&bank->lock, flags); |
1274 | - |
1275 | - if (bank->regs->wkup_en) { |
1276 | - /* Disable wake-up during idle for dynamic tick */ |
1277 | - _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0); |
1278 | - bank->context.wake_en = |
1279 | - __raw_readl(bank->base + bank->regs->wkup_en); |
1280 | - } |
1281 | - |
1282 | bank->mod_usage &= ~(1 << offset); |
1283 | - |
1284 | - if (bank->regs->ctrl && !bank->mod_usage) { |
1285 | - void __iomem *reg = bank->base + bank->regs->ctrl; |
1286 | - u32 ctrl; |
1287 | - |
1288 | - ctrl = __raw_readl(reg); |
1289 | - /* Module is disabled, clocks are gated */ |
1290 | - ctrl |= GPIO_MOD_CTRL_BIT; |
1291 | - __raw_writel(ctrl, reg); |
1292 | - bank->context.ctrl = ctrl; |
1293 | - } |
1294 | - |
1295 | + _disable_gpio_module(bank, offset); |
1296 | _reset_gpio(bank, bank->chip.base + offset); |
1297 | spin_unlock_irqrestore(&bank->lock, flags); |
1298 | |
1299 | @@ -672,7 +704,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) |
1300 | * If this is the last gpio to be freed in the bank, |
1301 | * disable the bank module. |
1302 | */ |
1303 | - if (!bank->mod_usage) |
1304 | + if (!BANK_USED(bank)) |
1305 | pm_runtime_put(bank->dev); |
1306 | } |
1307 | |
1308 | @@ -762,10 +794,20 @@ static void gpio_irq_shutdown(struct irq_data *d) |
1309 | struct gpio_bank *bank = irq_data_get_irq_chip_data(d); |
1310 | unsigned int gpio = irq_to_gpio(bank, d->hwirq); |
1311 | unsigned long flags; |
1312 | + unsigned offset = GPIO_INDEX(bank, gpio); |
1313 | |
1314 | spin_lock_irqsave(&bank->lock, flags); |
1315 | + bank->irq_usage &= ~(1 << offset); |
1316 | + _disable_gpio_module(bank, offset); |
1317 | _reset_gpio(bank, gpio); |
1318 | spin_unlock_irqrestore(&bank->lock, flags); |
1319 | + |
1320 | + /* |
1321 | + * If this is the last IRQ to be freed in the bank, |
1322 | + * disable the bank module. |
1323 | + */ |
1324 | + if (!BANK_USED(bank)) |
1325 | + pm_runtime_put(bank->dev); |
1326 | } |
1327 | |
1328 | static void gpio_ack_irq(struct irq_data *d) |
1329 | @@ -897,13 +939,6 @@ static int gpio_input(struct gpio_chip *chip, unsigned offset) |
1330 | return 0; |
1331 | } |
1332 | |
1333 | -static int gpio_is_input(struct gpio_bank *bank, int mask) |
1334 | -{ |
1335 | - void __iomem *reg = bank->base + bank->regs->direction; |
1336 | - |
1337 | - return __raw_readl(reg) & mask; |
1338 | -} |
1339 | - |
1340 | static int gpio_get(struct gpio_chip *chip, unsigned offset) |
1341 | { |
1342 | struct gpio_bank *bank; |
1343 | @@ -922,13 +957,22 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value) |
1344 | { |
1345 | struct gpio_bank *bank; |
1346 | unsigned long flags; |
1347 | + int retval = 0; |
1348 | |
1349 | bank = container_of(chip, struct gpio_bank, chip); |
1350 | spin_lock_irqsave(&bank->lock, flags); |
1351 | + |
1352 | + if (LINE_USED(bank->irq_usage, offset)) { |
1353 | + retval = -EINVAL; |
1354 | + goto exit; |
1355 | + } |
1356 | + |
1357 | bank->set_dataout(bank, offset, value); |
1358 | _set_gpio_direction(bank, offset, 0); |
1359 | + |
1360 | +exit: |
1361 | spin_unlock_irqrestore(&bank->lock, flags); |
1362 | - return 0; |
1363 | + return retval; |
1364 | } |
1365 | |
1366 | static int gpio_debounce(struct gpio_chip *chip, unsigned offset, |
1367 | @@ -1400,7 +1444,7 @@ void omap2_gpio_prepare_for_idle(int pwr_mode) |
1368 | struct gpio_bank *bank; |
1369 | |
1370 | list_for_each_entry(bank, &omap_gpio_list, node) { |
1371 | - if (!bank->mod_usage || !bank->loses_context) |
1372 | + if (!BANK_USED(bank) || !bank->loses_context) |
1373 | continue; |
1374 | |
1375 | bank->power_mode = pwr_mode; |
1376 | @@ -1414,7 +1458,7 @@ void omap2_gpio_resume_after_idle(void) |
1377 | struct gpio_bank *bank; |
1378 | |
1379 | list_for_each_entry(bank, &omap_gpio_list, node) { |
1380 | - if (!bank->mod_usage || !bank->loses_context) |
1381 | + if (!BANK_USED(bank) || !bank->loses_context) |
1382 | continue; |
1383 | |
1384 | pm_runtime_get_sync(bank->dev); |
1385 | diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c |
1386 | index 0687e64..8f06cca 100644 |
1387 | --- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c |
1388 | +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c |
1389 | @@ -579,8 +579,22 @@ static void |
1390 | init_reserved(struct nvbios_init *init) |
1391 | { |
1392 | u8 opcode = nv_ro08(init->bios, init->offset); |
1393 | - trace("RESERVED\t0x%02x\n", opcode); |
1394 | - init->offset += 1; |
1395 | + u8 length, i; |
1396 | + |
1397 | + switch (opcode) { |
1398 | + case 0xaa: |
1399 | + length = 4; |
1400 | + break; |
1401 | + default: |
1402 | + length = 1; |
1403 | + break; |
1404 | + } |
1405 | + |
1406 | + trace("RESERVED 0x%02x\t", opcode); |
1407 | + for (i = 1; i < length; i++) |
1408 | + cont(" 0x%02x", nv_ro08(init->bios, init->offset + i)); |
1409 | + cont("\n"); |
1410 | + init->offset += length; |
1411 | } |
1412 | |
1413 | /** |
1414 | @@ -2135,6 +2149,7 @@ static struct nvbios_init_opcode { |
1415 | [0x99] = { init_zm_auxch }, |
1416 | [0x9a] = { init_i2c_long_if }, |
1417 | [0xa9] = { init_gpio_ne }, |
1418 | + [0xaa] = { init_reserved }, |
1419 | }; |
1420 | |
1421 | #define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0])) |
1422 | diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c |
1423 | index 128279e..8e5438e 100644 |
1424 | --- a/drivers/gpu/drm/radeon/radeon_asic.c |
1425 | +++ b/drivers/gpu/drm/radeon/radeon_asic.c |
1426 | @@ -926,8 +926,6 @@ static struct radeon_asic r520_asic = { |
1427 | .wait_for_vblank = &avivo_wait_for_vblank, |
1428 | .set_backlight_level = &atombios_set_backlight_level, |
1429 | .get_backlight_level = &atombios_get_backlight_level, |
1430 | - .hdmi_enable = &r600_hdmi_enable, |
1431 | - .hdmi_setmode = &r600_hdmi_setmode, |
1432 | }, |
1433 | .copy = { |
1434 | .blit = &r100_copy_blit, |
1435 | @@ -1115,6 +1113,8 @@ static struct radeon_asic rv6xx_asic = { |
1436 | .wait_for_vblank = &avivo_wait_for_vblank, |
1437 | .set_backlight_level = &atombios_set_backlight_level, |
1438 | .get_backlight_level = &atombios_get_backlight_level, |
1439 | + .hdmi_enable = &r600_hdmi_enable, |
1440 | + .hdmi_setmode = &r600_hdmi_setmode, |
1441 | }, |
1442 | .copy = { |
1443 | .blit = &r600_copy_blit, |
1444 | diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig |
1445 | index 14ef6ab..750adde 100644 |
1446 | --- a/drivers/hid/Kconfig |
1447 | +++ b/drivers/hid/Kconfig |
1448 | @@ -241,6 +241,7 @@ config HID_HOLTEK |
1449 | - Sharkoon Drakonia / Perixx MX-2000 gaming mice |
1450 | - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 / |
1451 | Zalman ZM-GM1 |
1452 | + - SHARKOON DarkGlider Gaming mouse |
1453 | |
1454 | config HOLTEK_FF |
1455 | bool "Holtek On Line Grip force feedback support" |
1456 | diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c |
1457 | index ee75486..9f60d63 100644 |
1458 | --- a/drivers/hid/hid-core.c |
1459 | +++ b/drivers/hid/hid-core.c |
1460 | @@ -1188,7 +1188,8 @@ static void hid_output_field(const struct hid_device *hid, |
1461 | } |
1462 | |
1463 | /* |
1464 | - * Create a report. |
1465 | + * Create a report. 'data' has to be allocated using |
1466 | + * hid_alloc_report_buf() so that it has proper size. |
1467 | */ |
1468 | |
1469 | void hid_output_report(struct hid_report *report, __u8 *data) |
1470 | @@ -1205,6 +1206,22 @@ void hid_output_report(struct hid_report *report, __u8 *data) |
1471 | EXPORT_SYMBOL_GPL(hid_output_report); |
1472 | |
1473 | /* |
1474 | + * Allocator for buffer that is going to be passed to hid_output_report() |
1475 | + */ |
1476 | +u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) |
1477 | +{ |
1478 | + /* |
1479 | + * 7 extra bytes are necessary to achieve proper functionality |
1480 | + * of implement() working on 8 byte chunks |
1481 | + */ |
1482 | + |
1483 | + int len = ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7; |
1484 | + |
1485 | + return kmalloc(len, flags); |
1486 | +} |
1487 | +EXPORT_SYMBOL_GPL(hid_alloc_report_buf); |
1488 | + |
1489 | +/* |
1490 | * Set a field value. The report this field belongs to has to be |
1491 | * created and transferred to the device, to set this value in the |
1492 | * device. |
1493 | @@ -1656,6 +1673,7 @@ static const struct hid_device_id hid_have_special_driver[] = { |
1494 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, |
1495 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, |
1496 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, |
1497 | + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, |
1498 | { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) }, |
1499 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, |
1500 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, |
1501 | @@ -1745,6 +1763,7 @@ static const struct hid_device_id hid_have_special_driver[] = { |
1502 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, |
1503 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) }, |
1504 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) }, |
1505 | + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) }, |
1506 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) }, |
1507 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) }, |
1508 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) }, |
1509 | diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c |
1510 | index 7e6db3c..e696566 100644 |
1511 | --- a/drivers/hid/hid-holtek-mouse.c |
1512 | +++ b/drivers/hid/hid-holtek-mouse.c |
1513 | @@ -27,6 +27,7 @@ |
1514 | * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000 |
1515 | * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200 |
1516 | * and Zalman ZM-GM1 |
1517 | + * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse |
1518 | */ |
1519 | |
1520 | static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
1521 | @@ -46,6 +47,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
1522 | } |
1523 | break; |
1524 | case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A: |
1525 | + case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081: |
1526 | if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f |
1527 | && rdesc[111] == 0xff && rdesc[112] == 0x7f) { |
1528 | hid_info(hdev, "Fixing up report descriptor\n"); |
1529 | @@ -63,6 +65,8 @@ static const struct hid_device_id holtek_mouse_devices[] = { |
1530 | USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, |
1531 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, |
1532 | USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, |
1533 | + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, |
1534 | + USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, |
1535 | { } |
1536 | }; |
1537 | MODULE_DEVICE_TABLE(hid, holtek_mouse_devices); |
1538 | diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
1539 | index 22134d4..339623c 100644 |
1540 | --- a/drivers/hid/hid-ids.h |
1541 | +++ b/drivers/hid/hid-ids.h |
1542 | @@ -450,6 +450,7 @@ |
1543 | #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055 |
1544 | #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067 |
1545 | #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a |
1546 | +#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081 |
1547 | |
1548 | #define USB_VENDOR_ID_IMATION 0x0718 |
1549 | #define USB_DEVICE_ID_DISC_STAKKA 0xd000 |
1550 | @@ -718,6 +719,7 @@ |
1551 | #define USB_DEVICE_ID_ROCCAT_KONE 0x2ced |
1552 | #define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51 |
1553 | #define USB_DEVICE_ID_ROCCAT_KONEPURE 0x2dbe |
1554 | +#define USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL 0x2db4 |
1555 | #define USB_DEVICE_ID_ROCCAT_KONEXTD 0x2e22 |
1556 | #define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50 |
1557 | #define USB_DEVICE_ID_ROCCAT_LUA 0x2c2e |
1558 | diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c |
1559 | index a2469b5..1be9156 100644 |
1560 | --- a/drivers/hid/hid-logitech-dj.c |
1561 | +++ b/drivers/hid/hid-logitech-dj.c |
1562 | @@ -619,7 +619,7 @@ static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type, |
1563 | |
1564 | struct hid_field *field; |
1565 | struct hid_report *report; |
1566 | - unsigned char data[8]; |
1567 | + unsigned char *data; |
1568 | int offset; |
1569 | |
1570 | dbg_hid("%s: %s, type:%d | code:%d | value:%d\n", |
1571 | @@ -635,6 +635,13 @@ static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type, |
1572 | return -1; |
1573 | } |
1574 | hid_set_field(field, offset, value); |
1575 | + |
1576 | + data = hid_alloc_report_buf(field->report, GFP_KERNEL); |
1577 | + if (!data) { |
1578 | + dev_warn(&dev->dev, "failed to allocate report buf memory\n"); |
1579 | + return -1; |
1580 | + } |
1581 | + |
1582 | hid_output_report(field->report, &data[0]); |
1583 | |
1584 | output_report_enum = &dj_rcv_hiddev->report_enum[HID_OUTPUT_REPORT]; |
1585 | @@ -645,8 +652,9 @@ static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type, |
1586 | |
1587 | hid_hw_request(dj_rcv_hiddev, report, HID_REQ_SET_REPORT); |
1588 | |
1589 | - return 0; |
1590 | + kfree(data); |
1591 | |
1592 | + return 0; |
1593 | } |
1594 | |
1595 | static int logi_dj_ll_start(struct hid_device *hid) |
1596 | diff --git a/drivers/hid/hid-picolcd_debugfs.c b/drivers/hid/hid-picolcd_debugfs.c |
1597 | index 59ab8e1..024cdf3 100644 |
1598 | --- a/drivers/hid/hid-picolcd_debugfs.c |
1599 | +++ b/drivers/hid/hid-picolcd_debugfs.c |
1600 | @@ -394,7 +394,7 @@ static void dump_buff_as_hex(char *dst, size_t dst_sz, const u8 *data, |
1601 | void picolcd_debug_out_report(struct picolcd_data *data, |
1602 | struct hid_device *hdev, struct hid_report *report) |
1603 | { |
1604 | - u8 raw_data[70]; |
1605 | + u8 *raw_data; |
1606 | int raw_size = (report->size >> 3) + 1; |
1607 | char *buff; |
1608 | #define BUFF_SZ 256 |
1609 | @@ -407,20 +407,20 @@ void picolcd_debug_out_report(struct picolcd_data *data, |
1610 | if (!buff) |
1611 | return; |
1612 | |
1613 | - snprintf(buff, BUFF_SZ, "\nout report %d (size %d) = ", |
1614 | - report->id, raw_size); |
1615 | - hid_debug_event(hdev, buff); |
1616 | - if (raw_size + 5 > sizeof(raw_data)) { |
1617 | + raw_data = hid_alloc_report_buf(report, GFP_ATOMIC); |
1618 | + if (!raw_data) { |
1619 | kfree(buff); |
1620 | - hid_debug_event(hdev, " TOO BIG\n"); |
1621 | return; |
1622 | - } else { |
1623 | - raw_data[0] = report->id; |
1624 | - hid_output_report(report, raw_data); |
1625 | - dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size); |
1626 | - hid_debug_event(hdev, buff); |
1627 | } |
1628 | |
1629 | + snprintf(buff, BUFF_SZ, "\nout report %d (size %d) = ", |
1630 | + report->id, raw_size); |
1631 | + hid_debug_event(hdev, buff); |
1632 | + raw_data[0] = report->id; |
1633 | + hid_output_report(report, raw_data); |
1634 | + dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size); |
1635 | + hid_debug_event(hdev, buff); |
1636 | + |
1637 | switch (report->id) { |
1638 | case REPORT_LED_STATE: |
1639 | /* 1 data byte with GPO state */ |
1640 | @@ -644,6 +644,7 @@ void picolcd_debug_out_report(struct picolcd_data *data, |
1641 | break; |
1642 | } |
1643 | wake_up_interruptible(&hdev->debug_wait); |
1644 | + kfree(raw_data); |
1645 | kfree(buff); |
1646 | } |
1647 | |
1648 | diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c |
1649 | index c79d0b0..5850959 100644 |
1650 | --- a/drivers/hid/hid-roccat-konepure.c |
1651 | +++ b/drivers/hid/hid-roccat-konepure.c |
1652 | @@ -262,6 +262,7 @@ static int konepure_raw_event(struct hid_device *hdev, |
1653 | |
1654 | static const struct hid_device_id konepure_devices[] = { |
1655 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) }, |
1656 | + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) }, |
1657 | { } |
1658 | }; |
1659 | |
1660 | @@ -300,5 +301,5 @@ module_init(konepure_init); |
1661 | module_exit(konepure_exit); |
1662 | |
1663 | MODULE_AUTHOR("Stefan Achatz"); |
1664 | -MODULE_DESCRIPTION("USB Roccat KonePure driver"); |
1665 | +MODULE_DESCRIPTION("USB Roccat KonePure/Optical driver"); |
1666 | MODULE_LICENSE("GPL v2"); |
1667 | diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c |
1668 | index 2e7d644..71adf9e 100644 |
1669 | --- a/drivers/hid/hid-wiimote-modules.c |
1670 | +++ b/drivers/hid/hid-wiimote-modules.c |
1671 | @@ -119,12 +119,22 @@ static const struct wiimod_ops wiimod_keys = { |
1672 | * the rumble motor, this flag shouldn't be set. |
1673 | */ |
1674 | |
1675 | +/* used by wiimod_rumble and wiipro_rumble */ |
1676 | +static void wiimod_rumble_worker(struct work_struct *work) |
1677 | +{ |
1678 | + struct wiimote_data *wdata = container_of(work, struct wiimote_data, |
1679 | + rumble_worker); |
1680 | + |
1681 | + spin_lock_irq(&wdata->state.lock); |
1682 | + wiiproto_req_rumble(wdata, wdata->state.cache_rumble); |
1683 | + spin_unlock_irq(&wdata->state.lock); |
1684 | +} |
1685 | + |
1686 | static int wiimod_rumble_play(struct input_dev *dev, void *data, |
1687 | struct ff_effect *eff) |
1688 | { |
1689 | struct wiimote_data *wdata = input_get_drvdata(dev); |
1690 | __u8 value; |
1691 | - unsigned long flags; |
1692 | |
1693 | /* |
1694 | * The wiimote supports only a single rumble motor so if any magnitude |
1695 | @@ -137,9 +147,10 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data, |
1696 | else |
1697 | value = 0; |
1698 | |
1699 | - spin_lock_irqsave(&wdata->state.lock, flags); |
1700 | - wiiproto_req_rumble(wdata, value); |
1701 | - spin_unlock_irqrestore(&wdata->state.lock, flags); |
1702 | + /* Locking state.lock here might deadlock with input_event() calls. |
1703 | + * schedule_work acts as barrier. Merging multiple changes is fine. */ |
1704 | + wdata->state.cache_rumble = value; |
1705 | + schedule_work(&wdata->rumble_worker); |
1706 | |
1707 | return 0; |
1708 | } |
1709 | @@ -147,6 +158,8 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data, |
1710 | static int wiimod_rumble_probe(const struct wiimod_ops *ops, |
1711 | struct wiimote_data *wdata) |
1712 | { |
1713 | + INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker); |
1714 | + |
1715 | set_bit(FF_RUMBLE, wdata->input->ffbit); |
1716 | if (input_ff_create_memless(wdata->input, NULL, wiimod_rumble_play)) |
1717 | return -ENOMEM; |
1718 | @@ -159,6 +172,8 @@ static void wiimod_rumble_remove(const struct wiimod_ops *ops, |
1719 | { |
1720 | unsigned long flags; |
1721 | |
1722 | + cancel_work_sync(&wdata->rumble_worker); |
1723 | + |
1724 | spin_lock_irqsave(&wdata->state.lock, flags); |
1725 | wiiproto_req_rumble(wdata, 0); |
1726 | spin_unlock_irqrestore(&wdata->state.lock, flags); |
1727 | @@ -1731,7 +1746,6 @@ static int wiimod_pro_play(struct input_dev *dev, void *data, |
1728 | { |
1729 | struct wiimote_data *wdata = input_get_drvdata(dev); |
1730 | __u8 value; |
1731 | - unsigned long flags; |
1732 | |
1733 | /* |
1734 | * The wiimote supports only a single rumble motor so if any magnitude |
1735 | @@ -1744,9 +1758,10 @@ static int wiimod_pro_play(struct input_dev *dev, void *data, |
1736 | else |
1737 | value = 0; |
1738 | |
1739 | - spin_lock_irqsave(&wdata->state.lock, flags); |
1740 | - wiiproto_req_rumble(wdata, value); |
1741 | - spin_unlock_irqrestore(&wdata->state.lock, flags); |
1742 | + /* Locking state.lock here might deadlock with input_event() calls. |
1743 | + * schedule_work acts as barrier. Merging multiple changes is fine. */ |
1744 | + wdata->state.cache_rumble = value; |
1745 | + schedule_work(&wdata->rumble_worker); |
1746 | |
1747 | return 0; |
1748 | } |
1749 | @@ -1756,6 +1771,8 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops, |
1750 | { |
1751 | int ret, i; |
1752 | |
1753 | + INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker); |
1754 | + |
1755 | wdata->extension.input = input_allocate_device(); |
1756 | if (!wdata->extension.input) |
1757 | return -ENOMEM; |
1758 | @@ -1817,12 +1834,13 @@ static void wiimod_pro_remove(const struct wiimod_ops *ops, |
1759 | if (!wdata->extension.input) |
1760 | return; |
1761 | |
1762 | + input_unregister_device(wdata->extension.input); |
1763 | + wdata->extension.input = NULL; |
1764 | + cancel_work_sync(&wdata->rumble_worker); |
1765 | + |
1766 | spin_lock_irqsave(&wdata->state.lock, flags); |
1767 | wiiproto_req_rumble(wdata, 0); |
1768 | spin_unlock_irqrestore(&wdata->state.lock, flags); |
1769 | - |
1770 | - input_unregister_device(wdata->extension.input); |
1771 | - wdata->extension.input = NULL; |
1772 | } |
1773 | |
1774 | static const struct wiimod_ops wiimod_pro = { |
1775 | diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h |
1776 | index f1474f3..75db0c4 100644 |
1777 | --- a/drivers/hid/hid-wiimote.h |
1778 | +++ b/drivers/hid/hid-wiimote.h |
1779 | @@ -133,13 +133,15 @@ struct wiimote_state { |
1780 | __u8 *cmd_read_buf; |
1781 | __u8 cmd_read_size; |
1782 | |
1783 | - /* calibration data */ |
1784 | + /* calibration/cache data */ |
1785 | __u16 calib_bboard[4][3]; |
1786 | + __u8 cache_rumble; |
1787 | }; |
1788 | |
1789 | struct wiimote_data { |
1790 | struct hid_device *hdev; |
1791 | struct input_dev *input; |
1792 | + struct work_struct rumble_worker; |
1793 | struct led_classdev *leds[4]; |
1794 | struct input_dev *accel; |
1795 | struct input_dev *ir; |
1796 | diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c |
1797 | index fc307e0..145a4cb 100644 |
1798 | --- a/drivers/hid/uhid.c |
1799 | +++ b/drivers/hid/uhid.c |
1800 | @@ -640,7 +640,7 @@ static const struct file_operations uhid_fops = { |
1801 | |
1802 | static struct miscdevice uhid_misc = { |
1803 | .fops = &uhid_fops, |
1804 | - .minor = MISC_DYNAMIC_MINOR, |
1805 | + .minor = UHID_MINOR, |
1806 | .name = UHID_NAME, |
1807 | }; |
1808 | |
1809 | @@ -659,3 +659,5 @@ module_exit(uhid_exit); |
1810 | MODULE_LICENSE("GPL"); |
1811 | MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>"); |
1812 | MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem"); |
1813 | +MODULE_ALIAS_MISCDEV(UHID_MINOR); |
1814 | +MODULE_ALIAS("devname:" UHID_NAME); |
1815 | diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c |
1816 | index 9941828..ada164e 100644 |
1817 | --- a/drivers/hid/usbhid/hid-core.c |
1818 | +++ b/drivers/hid/usbhid/hid-core.c |
1819 | @@ -535,7 +535,6 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re |
1820 | { |
1821 | int head; |
1822 | struct usbhid_device *usbhid = hid->driver_data; |
1823 | - int len = ((report->size - 1) >> 3) + 1 + (report->id > 0); |
1824 | |
1825 | if ((hid->quirks & HID_QUIRK_NOGET) && dir == USB_DIR_IN) |
1826 | return; |
1827 | @@ -546,7 +545,7 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re |
1828 | return; |
1829 | } |
1830 | |
1831 | - usbhid->out[usbhid->outhead].raw_report = kmalloc(len, GFP_ATOMIC); |
1832 | + usbhid->out[usbhid->outhead].raw_report = hid_alloc_report_buf(report, GFP_ATOMIC); |
1833 | if (!usbhid->out[usbhid->outhead].raw_report) { |
1834 | hid_warn(hid, "output queueing failed\n"); |
1835 | return; |
1836 | @@ -595,7 +594,7 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re |
1837 | } |
1838 | |
1839 | if (dir == USB_DIR_OUT) { |
1840 | - usbhid->ctrl[usbhid->ctrlhead].raw_report = kmalloc(len, GFP_ATOMIC); |
1841 | + usbhid->ctrl[usbhid->ctrlhead].raw_report = hid_alloc_report_buf(report, GFP_ATOMIC); |
1842 | if (!usbhid->ctrl[usbhid->ctrlhead].raw_report) { |
1843 | hid_warn(hid, "control queueing failed\n"); |
1844 | return; |
1845 | diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c |
1846 | index 653ac6b..6c923c7 100644 |
1847 | --- a/drivers/infiniband/ulp/srpt/ib_srpt.c |
1848 | +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c |
1849 | @@ -1588,7 +1588,7 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, |
1850 | int resp_data_len; |
1851 | int resp_len; |
1852 | |
1853 | - resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4; |
1854 | + resp_data_len = 4; |
1855 | resp_len = sizeof(*srp_rsp) + resp_data_len; |
1856 | |
1857 | srp_rsp = ioctx->ioctx.buf; |
1858 | @@ -1600,11 +1600,9 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, |
1859 | + atomic_xchg(&ch->req_lim_delta, 0)); |
1860 | srp_rsp->tag = tag; |
1861 | |
1862 | - if (rsp_code != SRP_TSK_MGMT_SUCCESS) { |
1863 | - srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; |
1864 | - srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); |
1865 | - srp_rsp->data[3] = rsp_code; |
1866 | - } |
1867 | + srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; |
1868 | + srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); |
1869 | + srp_rsp->data[3] = rsp_code; |
1870 | |
1871 | return resp_len; |
1872 | } |
1873 | @@ -2358,6 +2356,8 @@ static void srpt_release_channel_work(struct work_struct *w) |
1874 | transport_deregister_session(se_sess); |
1875 | ch->sess = NULL; |
1876 | |
1877 | + ib_destroy_cm_id(ch->cm_id); |
1878 | + |
1879 | srpt_destroy_ch_ib(ch); |
1880 | |
1881 | srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, |
1882 | @@ -2368,8 +2368,6 @@ static void srpt_release_channel_work(struct work_struct *w) |
1883 | list_del(&ch->list); |
1884 | spin_unlock_irq(&sdev->spinlock); |
1885 | |
1886 | - ib_destroy_cm_id(ch->cm_id); |
1887 | - |
1888 | if (ch->release_done) |
1889 | complete(ch->release_done); |
1890 | |
1891 | diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c |
1892 | index ebd0a4c..44e5276 100644 |
1893 | --- a/drivers/iommu/arm-smmu.c |
1894 | +++ b/drivers/iommu/arm-smmu.c |
1895 | @@ -379,6 +379,7 @@ struct arm_smmu_cfg { |
1896 | u32 cbar; |
1897 | pgd_t *pgd; |
1898 | }; |
1899 | +#define INVALID_IRPTNDX 0xff |
1900 | |
1901 | struct arm_smmu_domain { |
1902 | /* |
1903 | @@ -830,7 +831,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, |
1904 | if (IS_ERR_VALUE(ret)) { |
1905 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", |
1906 | root_cfg->irptndx, irq); |
1907 | - root_cfg->irptndx = -1; |
1908 | + root_cfg->irptndx = INVALID_IRPTNDX; |
1909 | goto out_free_context; |
1910 | } |
1911 | |
1912 | @@ -855,7 +856,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) |
1913 | if (!smmu) |
1914 | return; |
1915 | |
1916 | - if (root_cfg->irptndx != -1) { |
1917 | + if (root_cfg->irptndx != INVALID_IRPTNDX) { |
1918 | irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; |
1919 | free_irq(irq, domain); |
1920 | } |
1921 | @@ -1838,8 +1839,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) |
1922 | goto out_put_parent; |
1923 | } |
1924 | |
1925 | - arm_smmu_device_reset(smmu); |
1926 | - |
1927 | for (i = 0; i < smmu->num_global_irqs; ++i) { |
1928 | err = request_irq(smmu->irqs[i], |
1929 | arm_smmu_global_fault, |
1930 | @@ -1857,6 +1856,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) |
1931 | spin_lock(&arm_smmu_devices_lock); |
1932 | list_add(&smmu->list, &arm_smmu_devices); |
1933 | spin_unlock(&arm_smmu_devices_lock); |
1934 | + |
1935 | + arm_smmu_device_reset(smmu); |
1936 | return 0; |
1937 | |
1938 | out_free_irqs: |
1939 | @@ -1947,10 +1948,10 @@ static int __init arm_smmu_init(void) |
1940 | return ret; |
1941 | |
1942 | /* Oh, for a proper bus abstraction */ |
1943 | - if (!iommu_present(&platform_bus_type)); |
1944 | + if (!iommu_present(&platform_bus_type)) |
1945 | bus_set_iommu(&platform_bus_type, &arm_smmu_ops); |
1946 | |
1947 | - if (!iommu_present(&amba_bustype)); |
1948 | + if (!iommu_present(&amba_bustype)) |
1949 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); |
1950 | |
1951 | return 0; |
1952 | diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c |
1953 | index 71eb233..b6a74bc 100644 |
1954 | --- a/drivers/md/bcache/request.c |
1955 | +++ b/drivers/md/bcache/request.c |
1956 | @@ -996,6 +996,7 @@ static void request_write(struct cached_dev *dc, struct search *s) |
1957 | closure_bio_submit(bio, cl, s->d); |
1958 | } else { |
1959 | bch_writeback_add(dc); |
1960 | + s->op.cache_bio = bio; |
1961 | |
1962 | if (bio->bi_rw & REQ_FLUSH) { |
1963 | /* Also need to send a flush to the backing device */ |
1964 | @@ -1008,8 +1009,6 @@ static void request_write(struct cached_dev *dc, struct search *s) |
1965 | flush->bi_private = cl; |
1966 | |
1967 | closure_bio_submit(flush, cl, s->d); |
1968 | - } else { |
1969 | - s->op.cache_bio = bio; |
1970 | } |
1971 | } |
1972 | out: |
1973 | diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c |
1974 | index cd0b7f4..f4a0bea 100644 |
1975 | --- a/drivers/mmc/card/block.c |
1976 | +++ b/drivers/mmc/card/block.c |
1977 | @@ -2191,10 +2191,10 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) |
1978 | * is freeing the queue that stops new requests |
1979 | * from being accepted. |
1980 | */ |
1981 | + card = md->queue.card; |
1982 | mmc_cleanup_queue(&md->queue); |
1983 | if (md->flags & MMC_BLK_PACKED_CMD) |
1984 | mmc_packed_clean(&md->queue); |
1985 | - card = md->queue.card; |
1986 | if (md->disk->flags & GENHD_FL_UP) { |
1987 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); |
1988 | if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && |
1989 | diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
1990 | index e48cb33..5e31046 100644 |
1991 | --- a/drivers/net/bonding/bond_main.c |
1992 | +++ b/drivers/net/bonding/bond_main.c |
1993 | @@ -1947,6 +1947,7 @@ static int __bond_release_one(struct net_device *bond_dev, |
1994 | struct bonding *bond = netdev_priv(bond_dev); |
1995 | struct slave *slave, *oldcurrent; |
1996 | struct sockaddr addr; |
1997 | + int old_flags = bond_dev->flags; |
1998 | netdev_features_t old_features = bond_dev->features; |
1999 | |
2000 | /* slave is not a slave or master is not master of this slave */ |
2001 | @@ -2077,12 +2078,18 @@ static int __bond_release_one(struct net_device *bond_dev, |
2002 | * bond_change_active_slave(..., NULL) |
2003 | */ |
2004 | if (!USES_PRIMARY(bond->params.mode)) { |
2005 | - /* unset promiscuity level from slave */ |
2006 | - if (bond_dev->flags & IFF_PROMISC) |
2007 | + /* unset promiscuity level from slave |
2008 | + * NOTE: The NETDEV_CHANGEADDR call above may change the value |
2009 | + * of the IFF_PROMISC flag in the bond_dev, but we need the |
2010 | + * value of that flag before that change, as that was the value |
2011 | + * when this slave was attached, so we cache at the start of the |
2012 | + * function and use it here. Same goes for ALLMULTI below |
2013 | + */ |
2014 | + if (old_flags & IFF_PROMISC) |
2015 | dev_set_promiscuity(slave_dev, -1); |
2016 | |
2017 | /* unset allmulti level from slave */ |
2018 | - if (bond_dev->flags & IFF_ALLMULTI) |
2019 | + if (old_flags & IFF_ALLMULTI) |
2020 | dev_set_allmulti(slave_dev, -1); |
2021 | |
2022 | bond_hw_addr_flush(bond_dev, slave_dev); |
2023 | diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c |
2024 | index 55d79cb..9e16014 100644 |
2025 | --- a/drivers/net/ethernet/arc/emac_main.c |
2026 | +++ b/drivers/net/ethernet/arc/emac_main.c |
2027 | @@ -149,8 +149,6 @@ static void arc_emac_tx_clean(struct net_device *ndev) |
2028 | struct sk_buff *skb = tx_buff->skb; |
2029 | unsigned int info = le32_to_cpu(txbd->info); |
2030 | |
2031 | - *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; |
2032 | - |
2033 | if ((info & FOR_EMAC) || !txbd->data) |
2034 | break; |
2035 | |
2036 | @@ -180,6 +178,8 @@ static void arc_emac_tx_clean(struct net_device *ndev) |
2037 | txbd->data = 0; |
2038 | txbd->info = 0; |
2039 | |
2040 | + *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; |
2041 | + |
2042 | if (netif_queue_stopped(ndev)) |
2043 | netif_wake_queue(ndev); |
2044 | } |
2045 | diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c |
2046 | index 1a9c4f6..ecc7f7b 100644 |
2047 | --- a/drivers/net/ethernet/marvell/skge.c |
2048 | +++ b/drivers/net/ethernet/marvell/skge.c |
2049 | @@ -3086,13 +3086,16 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, |
2050 | PCI_DMA_FROMDEVICE); |
2051 | skge_rx_reuse(e, skge->rx_buf_size); |
2052 | } else { |
2053 | + struct skge_element ee; |
2054 | struct sk_buff *nskb; |
2055 | |
2056 | nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); |
2057 | if (!nskb) |
2058 | goto resubmit; |
2059 | |
2060 | - skb = e->skb; |
2061 | + ee = *e; |
2062 | + |
2063 | + skb = ee.skb; |
2064 | prefetch(skb->data); |
2065 | |
2066 | if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { |
2067 | @@ -3101,8 +3104,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, |
2068 | } |
2069 | |
2070 | pci_unmap_single(skge->hw->pdev, |
2071 | - dma_unmap_addr(e, mapaddr), |
2072 | - dma_unmap_len(e, maplen), |
2073 | + dma_unmap_addr(&ee, mapaddr), |
2074 | + dma_unmap_len(&ee, maplen), |
2075 | PCI_DMA_FROMDEVICE); |
2076 | } |
2077 | |
2078 | diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c |
2079 | index 85e5c97..7ba68e0 100644 |
2080 | --- a/drivers/net/ethernet/realtek/r8169.c |
2081 | +++ b/drivers/net/ethernet/realtek/r8169.c |
2082 | @@ -4230,6 +4230,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) |
2083 | case RTL_GIGA_MAC_VER_23: |
2084 | case RTL_GIGA_MAC_VER_24: |
2085 | case RTL_GIGA_MAC_VER_34: |
2086 | + case RTL_GIGA_MAC_VER_35: |
2087 | RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); |
2088 | break; |
2089 | case RTL_GIGA_MAC_VER_40: |
2090 | diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c |
2091 | index a753928..2312677 100644 |
2092 | --- a/drivers/net/ethernet/renesas/sh_eth.c |
2093 | +++ b/drivers/net/ethernet/renesas/sh_eth.c |
2094 | @@ -1857,11 +1857,13 @@ static int sh_eth_open(struct net_device *ndev) |
2095 | |
2096 | pm_runtime_get_sync(&mdp->pdev->dev); |
2097 | |
2098 | + napi_enable(&mdp->napi); |
2099 | + |
2100 | ret = request_irq(ndev->irq, sh_eth_interrupt, |
2101 | mdp->cd->irq_flags, ndev->name, ndev); |
2102 | if (ret) { |
2103 | dev_err(&ndev->dev, "Can not assign IRQ number\n"); |
2104 | - return ret; |
2105 | + goto out_napi_off; |
2106 | } |
2107 | |
2108 | /* Descriptor set */ |
2109 | @@ -1879,12 +1881,12 @@ static int sh_eth_open(struct net_device *ndev) |
2110 | if (ret) |
2111 | goto out_free_irq; |
2112 | |
2113 | - napi_enable(&mdp->napi); |
2114 | - |
2115 | return ret; |
2116 | |
2117 | out_free_irq: |
2118 | free_irq(ndev->irq, ndev); |
2119 | +out_napi_off: |
2120 | + napi_disable(&mdp->napi); |
2121 | pm_runtime_put_sync(&mdp->pdev->dev); |
2122 | return ret; |
2123 | } |
2124 | @@ -1976,8 +1978,6 @@ static int sh_eth_close(struct net_device *ndev) |
2125 | { |
2126 | struct sh_eth_private *mdp = netdev_priv(ndev); |
2127 | |
2128 | - napi_disable(&mdp->napi); |
2129 | - |
2130 | netif_stop_queue(ndev); |
2131 | |
2132 | /* Disable interrupts by clearing the interrupt mask. */ |
2133 | @@ -1995,6 +1995,8 @@ static int sh_eth_close(struct net_device *ndev) |
2134 | |
2135 | free_irq(ndev->irq, ndev); |
2136 | |
2137 | + napi_disable(&mdp->napi); |
2138 | + |
2139 | /* Free all the skbuffs in the Rx queue. */ |
2140 | sh_eth_ring_free(ndev); |
2141 | |
2142 | diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c |
2143 | index b75eb9e..685d8e2 100644 |
2144 | --- a/drivers/net/ethernet/via/via-rhine.c |
2145 | +++ b/drivers/net/ethernet/via/via-rhine.c |
2146 | @@ -32,7 +32,7 @@ |
2147 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2148 | |
2149 | #define DRV_NAME "via-rhine" |
2150 | -#define DRV_VERSION "1.5.0" |
2151 | +#define DRV_VERSION "1.5.1" |
2152 | #define DRV_RELDATE "2010-10-09" |
2153 | |
2154 | #include <linux/types.h> |
2155 | @@ -1704,7 +1704,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, |
2156 | cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); |
2157 | |
2158 | if (unlikely(vlan_tx_tag_present(skb))) { |
2159 | - rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16); |
2160 | + u16 vid_pcp = vlan_tx_tag_get(skb); |
2161 | + |
2162 | + /* drop CFI/DEI bit, register needs VID and PCP */ |
2163 | + vid_pcp = (vid_pcp & VLAN_VID_MASK) | |
2164 | + ((vid_pcp & VLAN_PRIO_MASK) >> 1); |
2165 | + rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); |
2166 | /* request tagging */ |
2167 | rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); |
2168 | } |
2169 | diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c |
2170 | index 58eb448..96cb897 100644 |
2171 | --- a/drivers/net/ethernet/xilinx/ll_temac_main.c |
2172 | +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c |
2173 | @@ -297,6 +297,12 @@ static int temac_dma_bd_init(struct net_device *ndev) |
2174 | lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); |
2175 | lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); |
2176 | |
2177 | + /* Init descriptor indexes */ |
2178 | + lp->tx_bd_ci = 0; |
2179 | + lp->tx_bd_next = 0; |
2180 | + lp->tx_bd_tail = 0; |
2181 | + lp->rx_bd_ci = 0; |
2182 | + |
2183 | return 0; |
2184 | |
2185 | out: |
2186 | diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c |
2187 | index 162464f..7f10588 100644 |
2188 | --- a/drivers/net/ppp/pptp.c |
2189 | +++ b/drivers/net/ppp/pptp.c |
2190 | @@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) |
2191 | nf_reset(skb); |
2192 | |
2193 | skb->ip_summed = CHECKSUM_NONE; |
2194 | - ip_select_ident(iph, &rt->dst, NULL); |
2195 | + ip_select_ident(skb, &rt->dst, NULL); |
2196 | ip_send_check(iph); |
2197 | |
2198 | ip_local_out(skb); |
2199 | diff --git a/drivers/net/tun.c b/drivers/net/tun.c |
2200 | index 71af122..68b9aa3 100644 |
2201 | --- a/drivers/net/tun.c |
2202 | +++ b/drivers/net/tun.c |
2203 | @@ -1691,11 +1691,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) |
2204 | INIT_LIST_HEAD(&tun->disabled); |
2205 | err = tun_attach(tun, file); |
2206 | if (err < 0) |
2207 | - goto err_free_dev; |
2208 | + goto err_free_flow; |
2209 | |
2210 | err = register_netdevice(tun->dev); |
2211 | if (err < 0) |
2212 | - goto err_free_dev; |
2213 | + goto err_detach; |
2214 | |
2215 | if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || |
2216 | device_create_file(&tun->dev->dev, &dev_attr_owner) || |
2217 | @@ -1739,7 +1739,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) |
2218 | strcpy(ifr->ifr_name, tun->dev->name); |
2219 | return 0; |
2220 | |
2221 | - err_free_dev: |
2222 | +err_detach: |
2223 | + tun_detach_all(dev); |
2224 | +err_free_flow: |
2225 | + tun_flow_uninit(tun); |
2226 | + security_tun_dev_free_security(tun->security); |
2227 | +err_free_dev: |
2228 | free_netdev(dev); |
2229 | return err; |
2230 | } |
2231 | diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c |
2232 | index 2dbb946..c6867f9 100644 |
2233 | --- a/drivers/net/usb/dm9601.c |
2234 | +++ b/drivers/net/usb/dm9601.c |
2235 | @@ -303,7 +303,7 @@ static void dm9601_set_multicast(struct net_device *net) |
2236 | rx_ctl |= 0x02; |
2237 | } else if (net->flags & IFF_ALLMULTI || |
2238 | netdev_mc_count(net) > DM_MAX_MCAST) { |
2239 | - rx_ctl |= 0x04; |
2240 | + rx_ctl |= 0x08; |
2241 | } else if (!netdev_mc_empty(net)) { |
2242 | struct netdev_hw_addr *ha; |
2243 | |
2244 | diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c |
2245 | index 606eba2..eff8ede 100644 |
2246 | --- a/drivers/net/usb/qmi_wwan.c |
2247 | +++ b/drivers/net/usb/qmi_wwan.c |
2248 | @@ -518,6 +518,135 @@ static const struct usb_device_id products[] = { |
2249 | |
2250 | /* 3. Combined interface devices matching on interface number */ |
2251 | {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ |
2252 | + {QMI_FIXED_INTF(0x05c6, 0x7000, 0)}, |
2253 | + {QMI_FIXED_INTF(0x05c6, 0x7001, 1)}, |
2254 | + {QMI_FIXED_INTF(0x05c6, 0x7002, 1)}, |
2255 | + {QMI_FIXED_INTF(0x05c6, 0x7101, 1)}, |
2256 | + {QMI_FIXED_INTF(0x05c6, 0x7101, 2)}, |
2257 | + {QMI_FIXED_INTF(0x05c6, 0x7101, 3)}, |
2258 | + {QMI_FIXED_INTF(0x05c6, 0x7102, 1)}, |
2259 | + {QMI_FIXED_INTF(0x05c6, 0x7102, 2)}, |
2260 | + {QMI_FIXED_INTF(0x05c6, 0x7102, 3)}, |
2261 | + {QMI_FIXED_INTF(0x05c6, 0x8000, 7)}, |
2262 | + {QMI_FIXED_INTF(0x05c6, 0x8001, 6)}, |
2263 | + {QMI_FIXED_INTF(0x05c6, 0x9000, 4)}, |
2264 | + {QMI_FIXED_INTF(0x05c6, 0x9003, 4)}, |
2265 | + {QMI_FIXED_INTF(0x05c6, 0x9005, 2)}, |
2266 | + {QMI_FIXED_INTF(0x05c6, 0x900a, 4)}, |
2267 | + {QMI_FIXED_INTF(0x05c6, 0x900b, 2)}, |
2268 | + {QMI_FIXED_INTF(0x05c6, 0x900c, 4)}, |
2269 | + {QMI_FIXED_INTF(0x05c6, 0x900c, 5)}, |
2270 | + {QMI_FIXED_INTF(0x05c6, 0x900c, 6)}, |
2271 | + {QMI_FIXED_INTF(0x05c6, 0x900d, 5)}, |
2272 | + {QMI_FIXED_INTF(0x05c6, 0x900f, 3)}, |
2273 | + {QMI_FIXED_INTF(0x05c6, 0x900f, 4)}, |
2274 | + {QMI_FIXED_INTF(0x05c6, 0x900f, 5)}, |
2275 | + {QMI_FIXED_INTF(0x05c6, 0x9010, 4)}, |
2276 | + {QMI_FIXED_INTF(0x05c6, 0x9010, 5)}, |
2277 | + {QMI_FIXED_INTF(0x05c6, 0x9011, 3)}, |
2278 | + {QMI_FIXED_INTF(0x05c6, 0x9011, 4)}, |
2279 | + {QMI_FIXED_INTF(0x05c6, 0x9021, 1)}, |
2280 | + {QMI_FIXED_INTF(0x05c6, 0x9022, 2)}, |
2281 | + {QMI_FIXED_INTF(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */ |
2282 | + {QMI_FIXED_INTF(0x05c6, 0x9026, 3)}, |
2283 | + {QMI_FIXED_INTF(0x05c6, 0x902e, 5)}, |
2284 | + {QMI_FIXED_INTF(0x05c6, 0x9031, 5)}, |
2285 | + {QMI_FIXED_INTF(0x05c6, 0x9032, 4)}, |
2286 | + {QMI_FIXED_INTF(0x05c6, 0x9033, 3)}, |
2287 | + {QMI_FIXED_INTF(0x05c6, 0x9033, 4)}, |
2288 | + {QMI_FIXED_INTF(0x05c6, 0x9033, 5)}, |
2289 | + {QMI_FIXED_INTF(0x05c6, 0x9033, 6)}, |
2290 | + {QMI_FIXED_INTF(0x05c6, 0x9034, 3)}, |
2291 | + {QMI_FIXED_INTF(0x05c6, 0x9034, 4)}, |
2292 | + {QMI_FIXED_INTF(0x05c6, 0x9034, 5)}, |
2293 | + {QMI_FIXED_INTF(0x05c6, 0x9034, 6)}, |
2294 | + {QMI_FIXED_INTF(0x05c6, 0x9034, 7)}, |
2295 | + {QMI_FIXED_INTF(0x05c6, 0x9035, 4)}, |
2296 | + {QMI_FIXED_INTF(0x05c6, 0x9036, 3)}, |
2297 | + {QMI_FIXED_INTF(0x05c6, 0x9037, 5)}, |
2298 | + {QMI_FIXED_INTF(0x05c6, 0x9038, 4)}, |
2299 | + {QMI_FIXED_INTF(0x05c6, 0x903b, 7)}, |
2300 | + {QMI_FIXED_INTF(0x05c6, 0x903c, 6)}, |
2301 | + {QMI_FIXED_INTF(0x05c6, 0x903d, 6)}, |
2302 | + {QMI_FIXED_INTF(0x05c6, 0x903e, 5)}, |
2303 | + {QMI_FIXED_INTF(0x05c6, 0x9043, 3)}, |
2304 | + {QMI_FIXED_INTF(0x05c6, 0x9046, 3)}, |
2305 | + {QMI_FIXED_INTF(0x05c6, 0x9046, 4)}, |
2306 | + {QMI_FIXED_INTF(0x05c6, 0x9046, 5)}, |
2307 | + {QMI_FIXED_INTF(0x05c6, 0x9047, 2)}, |
2308 | + {QMI_FIXED_INTF(0x05c6, 0x9047, 3)}, |
2309 | + {QMI_FIXED_INTF(0x05c6, 0x9047, 4)}, |
2310 | + {QMI_FIXED_INTF(0x05c6, 0x9048, 4)}, |
2311 | + {QMI_FIXED_INTF(0x05c6, 0x9048, 5)}, |
2312 | + {QMI_FIXED_INTF(0x05c6, 0x9048, 6)}, |
2313 | + {QMI_FIXED_INTF(0x05c6, 0x9048, 7)}, |
2314 | + {QMI_FIXED_INTF(0x05c6, 0x9048, 8)}, |
2315 | + {QMI_FIXED_INTF(0x05c6, 0x904c, 5)}, |
2316 | + {QMI_FIXED_INTF(0x05c6, 0x904c, 6)}, |
2317 | + {QMI_FIXED_INTF(0x05c6, 0x904c, 7)}, |
2318 | + {QMI_FIXED_INTF(0x05c6, 0x904c, 8)}, |
2319 | + {QMI_FIXED_INTF(0x05c6, 0x9050, 3)}, |
2320 | + {QMI_FIXED_INTF(0x05c6, 0x9052, 4)}, |
2321 | + {QMI_FIXED_INTF(0x05c6, 0x9053, 6)}, |
2322 | + {QMI_FIXED_INTF(0x05c6, 0x9053, 7)}, |
2323 | + {QMI_FIXED_INTF(0x05c6, 0x9054, 5)}, |
2324 | + {QMI_FIXED_INTF(0x05c6, 0x9054, 6)}, |
2325 | + {QMI_FIXED_INTF(0x05c6, 0x9055, 3)}, |
2326 | + {QMI_FIXED_INTF(0x05c6, 0x9055, 4)}, |
2327 | + {QMI_FIXED_INTF(0x05c6, 0x9055, 5)}, |
2328 | + {QMI_FIXED_INTF(0x05c6, 0x9055, 6)}, |
2329 | + {QMI_FIXED_INTF(0x05c6, 0x9055, 7)}, |
2330 | + {QMI_FIXED_INTF(0x05c6, 0x9056, 3)}, |
2331 | + {QMI_FIXED_INTF(0x05c6, 0x9062, 2)}, |
2332 | + {QMI_FIXED_INTF(0x05c6, 0x9062, 3)}, |
2333 | + {QMI_FIXED_INTF(0x05c6, 0x9062, 4)}, |
2334 | + {QMI_FIXED_INTF(0x05c6, 0x9062, 5)}, |
2335 | + {QMI_FIXED_INTF(0x05c6, 0x9062, 6)}, |
2336 | + {QMI_FIXED_INTF(0x05c6, 0x9062, 7)}, |
2337 | + {QMI_FIXED_INTF(0x05c6, 0x9062, 8)}, |
2338 | + {QMI_FIXED_INTF(0x05c6, 0x9062, 9)}, |
2339 | + {QMI_FIXED_INTF(0x05c6, 0x9064, 3)}, |
2340 | + {QMI_FIXED_INTF(0x05c6, 0x9065, 6)}, |
2341 | + {QMI_FIXED_INTF(0x05c6, 0x9065, 7)}, |
2342 | + {QMI_FIXED_INTF(0x05c6, 0x9066, 5)}, |
2343 | + {QMI_FIXED_INTF(0x05c6, 0x9066, 6)}, |
2344 | + {QMI_FIXED_INTF(0x05c6, 0x9067, 1)}, |
2345 | + {QMI_FIXED_INTF(0x05c6, 0x9068, 2)}, |
2346 | + {QMI_FIXED_INTF(0x05c6, 0x9068, 3)}, |
2347 | + {QMI_FIXED_INTF(0x05c6, 0x9068, 4)}, |
2348 | + {QMI_FIXED_INTF(0x05c6, 0x9068, 5)}, |
2349 | + {QMI_FIXED_INTF(0x05c6, 0x9068, 6)}, |
2350 | + {QMI_FIXED_INTF(0x05c6, 0x9068, 7)}, |
2351 | + {QMI_FIXED_INTF(0x05c6, 0x9069, 5)}, |
2352 | + {QMI_FIXED_INTF(0x05c6, 0x9069, 6)}, |
2353 | + {QMI_FIXED_INTF(0x05c6, 0x9069, 7)}, |
2354 | + {QMI_FIXED_INTF(0x05c6, 0x9069, 8)}, |
2355 | + {QMI_FIXED_INTF(0x05c6, 0x9070, 4)}, |
2356 | + {QMI_FIXED_INTF(0x05c6, 0x9070, 5)}, |
2357 | + {QMI_FIXED_INTF(0x05c6, 0x9075, 5)}, |
2358 | + {QMI_FIXED_INTF(0x05c6, 0x9076, 4)}, |
2359 | + {QMI_FIXED_INTF(0x05c6, 0x9076, 5)}, |
2360 | + {QMI_FIXED_INTF(0x05c6, 0x9076, 6)}, |
2361 | + {QMI_FIXED_INTF(0x05c6, 0x9076, 7)}, |
2362 | + {QMI_FIXED_INTF(0x05c6, 0x9076, 8)}, |
2363 | + {QMI_FIXED_INTF(0x05c6, 0x9077, 3)}, |
2364 | + {QMI_FIXED_INTF(0x05c6, 0x9077, 4)}, |
2365 | + {QMI_FIXED_INTF(0x05c6, 0x9077, 5)}, |
2366 | + {QMI_FIXED_INTF(0x05c6, 0x9077, 6)}, |
2367 | + {QMI_FIXED_INTF(0x05c6, 0x9078, 3)}, |
2368 | + {QMI_FIXED_INTF(0x05c6, 0x9079, 4)}, |
2369 | + {QMI_FIXED_INTF(0x05c6, 0x9079, 5)}, |
2370 | + {QMI_FIXED_INTF(0x05c6, 0x9079, 6)}, |
2371 | + {QMI_FIXED_INTF(0x05c6, 0x9079, 7)}, |
2372 | + {QMI_FIXED_INTF(0x05c6, 0x9079, 8)}, |
2373 | + {QMI_FIXED_INTF(0x05c6, 0x9080, 5)}, |
2374 | + {QMI_FIXED_INTF(0x05c6, 0x9080, 6)}, |
2375 | + {QMI_FIXED_INTF(0x05c6, 0x9080, 7)}, |
2376 | + {QMI_FIXED_INTF(0x05c6, 0x9080, 8)}, |
2377 | + {QMI_FIXED_INTF(0x05c6, 0x9083, 3)}, |
2378 | + {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, |
2379 | + {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, |
2380 | + {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, |
2381 | {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ |
2382 | {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ |
2383 | {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, |
2384 | @@ -612,7 +741,6 @@ static const struct usb_device_id products[] = { |
2385 | {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ |
2386 | {QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ |
2387 | {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ |
2388 | - {QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */ |
2389 | {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ |
2390 | {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ |
2391 | {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ |
2392 | diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
2393 | index 767f7af..8a05d77 100644 |
2394 | --- a/drivers/net/vxlan.c |
2395 | +++ b/drivers/net/vxlan.c |
2396 | @@ -1767,15 +1767,17 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, |
2397 | |
2398 | SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops); |
2399 | |
2400 | - /* create an fdb entry for default destination */ |
2401 | - err = vxlan_fdb_create(vxlan, all_zeros_mac, |
2402 | - vxlan->default_dst.remote_ip, |
2403 | - NUD_REACHABLE|NUD_PERMANENT, |
2404 | - NLM_F_EXCL|NLM_F_CREATE, |
2405 | - vxlan->dst_port, vxlan->default_dst.remote_vni, |
2406 | - vxlan->default_dst.remote_ifindex, NTF_SELF); |
2407 | - if (err) |
2408 | - return err; |
2409 | + /* create an fdb entry for a valid default destination */ |
2410 | + if (vxlan->default_dst.remote_ip != htonl(INADDR_ANY)) { |
2411 | + err = vxlan_fdb_create(vxlan, all_zeros_mac, |
2412 | + vxlan->default_dst.remote_ip, |
2413 | + NUD_REACHABLE|NUD_PERMANENT, |
2414 | + NLM_F_EXCL|NLM_F_CREATE, |
2415 | + vxlan->dst_port, vxlan->default_dst.remote_vni, |
2416 | + vxlan->default_dst.remote_ifindex, NTF_SELF); |
2417 | + if (err) |
2418 | + return err; |
2419 | + } |
2420 | |
2421 | err = register_netdevice(dev); |
2422 | if (err) { |
2423 | diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c |
2424 | index 289e386..6e1ad1c 100644 |
2425 | --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c |
2426 | +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c |
2427 | @@ -465,8 +465,6 @@ static struct sdio_driver brcmf_sdmmc_driver = { |
2428 | |
2429 | static int brcmf_sdio_pd_probe(struct platform_device *pdev) |
2430 | { |
2431 | - int ret; |
2432 | - |
2433 | brcmf_dbg(SDIO, "Enter\n"); |
2434 | |
2435 | brcmfmac_sdio_pdata = pdev->dev.platform_data; |
2436 | @@ -474,11 +472,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev) |
2437 | if (brcmfmac_sdio_pdata->power_on) |
2438 | brcmfmac_sdio_pdata->power_on(); |
2439 | |
2440 | - ret = sdio_register_driver(&brcmf_sdmmc_driver); |
2441 | - if (ret) |
2442 | - brcmf_err("sdio_register_driver failed: %d\n", ret); |
2443 | - |
2444 | - return ret; |
2445 | + return 0; |
2446 | } |
2447 | |
2448 | static int brcmf_sdio_pd_remove(struct platform_device *pdev) |
2449 | @@ -501,6 +495,15 @@ static struct platform_driver brcmf_sdio_pd = { |
2450 | } |
2451 | }; |
2452 | |
2453 | +void brcmf_sdio_register(void) |
2454 | +{ |
2455 | + int ret; |
2456 | + |
2457 | + ret = sdio_register_driver(&brcmf_sdmmc_driver); |
2458 | + if (ret) |
2459 | + brcmf_err("sdio_register_driver failed: %d\n", ret); |
2460 | +} |
2461 | + |
2462 | void brcmf_sdio_exit(void) |
2463 | { |
2464 | brcmf_dbg(SDIO, "Enter\n"); |
2465 | @@ -511,18 +514,13 @@ void brcmf_sdio_exit(void) |
2466 | sdio_unregister_driver(&brcmf_sdmmc_driver); |
2467 | } |
2468 | |
2469 | -void brcmf_sdio_init(void) |
2470 | +void __init brcmf_sdio_init(void) |
2471 | { |
2472 | int ret; |
2473 | |
2474 | brcmf_dbg(SDIO, "Enter\n"); |
2475 | |
2476 | ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe); |
2477 | - if (ret == -ENODEV) { |
2478 | - brcmf_dbg(SDIO, "No platform data available, registering without.\n"); |
2479 | - ret = sdio_register_driver(&brcmf_sdmmc_driver); |
2480 | - } |
2481 | - |
2482 | - if (ret) |
2483 | - brcmf_err("driver registration failed: %d\n", ret); |
2484 | + if (ret == -ENODEV) |
2485 | + brcmf_dbg(SDIO, "No platform data available.\n"); |
2486 | } |
2487 | diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h |
2488 | index 080395f..e715d33 100644 |
2489 | --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h |
2490 | +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h |
2491 | @@ -154,10 +154,11 @@ extern int brcmf_bus_start(struct device *dev); |
2492 | #ifdef CONFIG_BRCMFMAC_SDIO |
2493 | extern void brcmf_sdio_exit(void); |
2494 | extern void brcmf_sdio_init(void); |
2495 | +extern void brcmf_sdio_register(void); |
2496 | #endif |
2497 | #ifdef CONFIG_BRCMFMAC_USB |
2498 | extern void brcmf_usb_exit(void); |
2499 | -extern void brcmf_usb_init(void); |
2500 | +extern void brcmf_usb_register(void); |
2501 | #endif |
2502 | |
2503 | #endif /* _BRCMF_BUS_H_ */ |
2504 | diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c |
2505 | index 8009901..65b2a49 100644 |
2506 | --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c |
2507 | +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c |
2508 | @@ -1020,21 +1020,23 @@ u32 brcmf_get_chip_info(struct brcmf_if *ifp) |
2509 | return bus->chip << 4 | bus->chiprev; |
2510 | } |
2511 | |
2512 | -static void brcmf_driver_init(struct work_struct *work) |
2513 | +static void brcmf_driver_register(struct work_struct *work) |
2514 | { |
2515 | - brcmf_debugfs_init(); |
2516 | - |
2517 | #ifdef CONFIG_BRCMFMAC_SDIO |
2518 | - brcmf_sdio_init(); |
2519 | + brcmf_sdio_register(); |
2520 | #endif |
2521 | #ifdef CONFIG_BRCMFMAC_USB |
2522 | - brcmf_usb_init(); |
2523 | + brcmf_usb_register(); |
2524 | #endif |
2525 | } |
2526 | -static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init); |
2527 | +static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register); |
2528 | |
2529 | static int __init brcmfmac_module_init(void) |
2530 | { |
2531 | + brcmf_debugfs_init(); |
2532 | +#ifdef CONFIG_BRCMFMAC_SDIO |
2533 | + brcmf_sdio_init(); |
2534 | +#endif |
2535 | if (!schedule_work(&brcmf_driver_work)) |
2536 | return -EBUSY; |
2537 | |
2538 | diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c |
2539 | index 322cadc..2904ec4 100644 |
2540 | --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c |
2541 | +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c |
2542 | @@ -1540,7 +1540,7 @@ void brcmf_usb_exit(void) |
2543 | brcmf_release_fw(&fw_image_list); |
2544 | } |
2545 | |
2546 | -void brcmf_usb_init(void) |
2547 | +void brcmf_usb_register(void) |
2548 | { |
2549 | brcmf_dbg(USB, "Enter\n"); |
2550 | INIT_LIST_HEAD(&fw_image_list); |
2551 | diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c |
2552 | index a78e065..d69d024 100644 |
2553 | --- a/drivers/net/wireless/mwifiex/11n_aggr.c |
2554 | +++ b/drivers/net/wireless/mwifiex/11n_aggr.c |
2555 | @@ -149,7 +149,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv, |
2556 | */ |
2557 | int |
2558 | mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, |
2559 | - struct mwifiex_ra_list_tbl *pra_list, int headroom, |
2560 | + struct mwifiex_ra_list_tbl *pra_list, |
2561 | int ptrindex, unsigned long ra_list_flags) |
2562 | __releases(&priv->wmm.ra_list_spinlock) |
2563 | { |
2564 | @@ -159,6 +159,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, |
2565 | int pad = 0, ret; |
2566 | struct mwifiex_tx_param tx_param; |
2567 | struct txpd *ptx_pd = NULL; |
2568 | + int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN; |
2569 | |
2570 | skb_src = skb_peek(&pra_list->skb_head); |
2571 | if (!skb_src) { |
2572 | diff --git a/drivers/net/wireless/mwifiex/11n_aggr.h b/drivers/net/wireless/mwifiex/11n_aggr.h |
2573 | index 900e1c6..892098d 100644 |
2574 | --- a/drivers/net/wireless/mwifiex/11n_aggr.h |
2575 | +++ b/drivers/net/wireless/mwifiex/11n_aggr.h |
2576 | @@ -26,7 +26,7 @@ |
2577 | int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv, |
2578 | struct sk_buff *skb); |
2579 | int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, |
2580 | - struct mwifiex_ra_list_tbl *ptr, int headroom, |
2581 | + struct mwifiex_ra_list_tbl *ptr, |
2582 | int ptr_index, unsigned long flags) |
2583 | __releases(&priv->wmm.ra_list_spinlock); |
2584 | |
2585 | diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c |
2586 | index 2d76147..a6c46f3 100644 |
2587 | --- a/drivers/net/wireless/mwifiex/cmdevt.c |
2588 | +++ b/drivers/net/wireless/mwifiex/cmdevt.c |
2589 | @@ -1155,7 +1155,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, |
2590 | uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); |
2591 | |
2592 | if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) && |
2593 | - adapter->iface_type == MWIFIEX_SDIO) { |
2594 | + adapter->iface_type != MWIFIEX_USB) { |
2595 | mwifiex_hs_activated_event(priv, true); |
2596 | return 0; |
2597 | } else { |
2598 | @@ -1167,8 +1167,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, |
2599 | } |
2600 | if (conditions != HS_CFG_CANCEL) { |
2601 | adapter->is_hs_configured = true; |
2602 | - if (adapter->iface_type == MWIFIEX_USB || |
2603 | - adapter->iface_type == MWIFIEX_PCIE) |
2604 | + if (adapter->iface_type == MWIFIEX_USB) |
2605 | mwifiex_hs_activated_event(priv, true); |
2606 | } else { |
2607 | adapter->is_hs_configured = false; |
2608 | diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c |
2609 | index f90fe21..b7adf3d 100644 |
2610 | --- a/drivers/net/wireless/mwifiex/usb.c |
2611 | +++ b/drivers/net/wireless/mwifiex/usb.c |
2612 | @@ -446,9 +446,6 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message) |
2613 | */ |
2614 | adapter->is_suspended = true; |
2615 | |
2616 | - for (i = 0; i < adapter->priv_num; i++) |
2617 | - netif_carrier_off(adapter->priv[i]->netdev); |
2618 | - |
2619 | if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) |
2620 | usb_kill_urb(card->rx_cmd.urb); |
2621 | |
2622 | @@ -508,10 +505,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf) |
2623 | MWIFIEX_RX_CMD_BUF_SIZE); |
2624 | } |
2625 | |
2626 | - for (i = 0; i < adapter->priv_num; i++) |
2627 | - if (adapter->priv[i]->media_connected) |
2628 | - netif_carrier_on(adapter->priv[i]->netdev); |
2629 | - |
2630 | /* Disable Host Sleep */ |
2631 | if (adapter->hs_activated) |
2632 | mwifiex_cancel_hs(mwifiex_get_priv(adapter, |
2633 | diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c |
2634 | index 944e884..fbf0915 100644 |
2635 | --- a/drivers/net/wireless/mwifiex/wmm.c |
2636 | +++ b/drivers/net/wireless/mwifiex/wmm.c |
2637 | @@ -1239,8 +1239,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter) |
2638 | if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) && |
2639 | mwifiex_is_11n_aggragation_possible(priv, ptr, |
2640 | adapter->tx_buf_size)) |
2641 | - mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN, |
2642 | - ptr_index, flags); |
2643 | + mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags); |
2644 | /* ra_list_spinlock has been freed in |
2645 | mwifiex_11n_aggregate_pkt() */ |
2646 | else |
2647 | diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c |
2648 | index b9deef6..f42dc3c 100644 |
2649 | --- a/drivers/net/wireless/p54/p54usb.c |
2650 | +++ b/drivers/net/wireless/p54/p54usb.c |
2651 | @@ -83,6 +83,7 @@ static struct usb_device_id p54u_table[] = { |
2652 | {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ |
2653 | {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ |
2654 | {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ |
2655 | + {USB_DEVICE(0x07aa, 0x0020)}, /* Corega WLUSB2GTST USB */ |
2656 | {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ |
2657 | {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ |
2658 | {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ |
2659 | diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h |
2660 | index cc03e7c..7032587 100644 |
2661 | --- a/drivers/net/wireless/rtlwifi/wifi.h |
2662 | +++ b/drivers/net/wireless/rtlwifi/wifi.h |
2663 | @@ -2057,7 +2057,7 @@ struct rtl_priv { |
2664 | that it points to the data allocated |
2665 | beyond this structure like: |
2666 | rtl_pci_priv or rtl_usb_priv */ |
2667 | - u8 priv[0]; |
2668 | + u8 priv[0] __aligned(sizeof(void *)); |
2669 | }; |
2670 | |
2671 | #define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv)) |
2672 | diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c |
2673 | index 64828de..fc7867c 100644 |
2674 | --- a/drivers/net/xen-netback/netback.c |
2675 | +++ b/drivers/net/xen-netback/netback.c |
2676 | @@ -361,6 +361,49 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) |
2677 | return false; |
2678 | } |
2679 | |
2680 | +struct xenvif_count_slot_state { |
2681 | + unsigned long copy_off; |
2682 | + bool head; |
2683 | +}; |
2684 | + |
2685 | +unsigned int xenvif_count_frag_slots(struct xenvif *vif, |
2686 | + unsigned long offset, unsigned long size, |
2687 | + struct xenvif_count_slot_state *state) |
2688 | +{ |
2689 | + unsigned count = 0; |
2690 | + |
2691 | + offset &= ~PAGE_MASK; |
2692 | + |
2693 | + while (size > 0) { |
2694 | + unsigned long bytes; |
2695 | + |
2696 | + bytes = PAGE_SIZE - offset; |
2697 | + |
2698 | + if (bytes > size) |
2699 | + bytes = size; |
2700 | + |
2701 | + if (start_new_rx_buffer(state->copy_off, bytes, state->head)) { |
2702 | + count++; |
2703 | + state->copy_off = 0; |
2704 | + } |
2705 | + |
2706 | + if (state->copy_off + bytes > MAX_BUFFER_OFFSET) |
2707 | + bytes = MAX_BUFFER_OFFSET - state->copy_off; |
2708 | + |
2709 | + state->copy_off += bytes; |
2710 | + |
2711 | + offset += bytes; |
2712 | + size -= bytes; |
2713 | + |
2714 | + if (offset == PAGE_SIZE) |
2715 | + offset = 0; |
2716 | + |
2717 | + state->head = false; |
2718 | + } |
2719 | + |
2720 | + return count; |
2721 | +} |
2722 | + |
2723 | /* |
2724 | * Figure out how many ring slots we're going to need to send @skb to |
2725 | * the guest. This function is essentially a dry run of |
2726 | @@ -368,48 +411,39 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) |
2727 | */ |
2728 | unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) |
2729 | { |
2730 | + struct xenvif_count_slot_state state; |
2731 | unsigned int count; |
2732 | - int i, copy_off; |
2733 | + unsigned char *data; |
2734 | + unsigned i; |
2735 | |
2736 | - count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE); |
2737 | + state.head = true; |
2738 | + state.copy_off = 0; |
2739 | |
2740 | - copy_off = skb_headlen(skb) % PAGE_SIZE; |
2741 | + /* Slot for the first (partial) page of data. */ |
2742 | + count = 1; |
2743 | |
2744 | + /* Need a slot for the GSO prefix for GSO extra data? */ |
2745 | if (skb_shinfo(skb)->gso_size) |
2746 | count++; |
2747 | |
2748 | - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
2749 | - unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
2750 | - unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; |
2751 | - unsigned long bytes; |
2752 | - |
2753 | - offset &= ~PAGE_MASK; |
2754 | - |
2755 | - while (size > 0) { |
2756 | - BUG_ON(offset >= PAGE_SIZE); |
2757 | - BUG_ON(copy_off > MAX_BUFFER_OFFSET); |
2758 | - |
2759 | - bytes = PAGE_SIZE - offset; |
2760 | - |
2761 | - if (bytes > size) |
2762 | - bytes = size; |
2763 | + data = skb->data; |
2764 | + while (data < skb_tail_pointer(skb)) { |
2765 | + unsigned long offset = offset_in_page(data); |
2766 | + unsigned long size = PAGE_SIZE - offset; |
2767 | |
2768 | - if (start_new_rx_buffer(copy_off, bytes, 0)) { |
2769 | - count++; |
2770 | - copy_off = 0; |
2771 | - } |
2772 | + if (data + size > skb_tail_pointer(skb)) |
2773 | + size = skb_tail_pointer(skb) - data; |
2774 | |
2775 | - if (copy_off + bytes > MAX_BUFFER_OFFSET) |
2776 | - bytes = MAX_BUFFER_OFFSET - copy_off; |
2777 | + count += xenvif_count_frag_slots(vif, offset, size, &state); |
2778 | |
2779 | - copy_off += bytes; |
2780 | + data += size; |
2781 | + } |
2782 | |
2783 | - offset += bytes; |
2784 | - size -= bytes; |
2785 | + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
2786 | + unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
2787 | + unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; |
2788 | |
2789 | - if (offset == PAGE_SIZE) |
2790 | - offset = 0; |
2791 | - } |
2792 | + count += xenvif_count_frag_slots(vif, offset, size, &state); |
2793 | } |
2794 | return count; |
2795 | } |
2796 | diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c |
2797 | index 3753ed0..bec4ff9 100644 |
2798 | --- a/drivers/regulator/ti-abb-regulator.c |
2799 | +++ b/drivers/regulator/ti-abb-regulator.c |
2800 | @@ -279,8 +279,12 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb, |
2801 | ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg, |
2802 | abb->base); |
2803 | |
2804 | - /* program LDO VBB vset override if needed */ |
2805 | - if (abb->ldo_base) |
2806 | + /* |
2807 | + * program LDO VBB vset override if needed for !bypass mode |
2808 | + * XXX: Do not switch sequence - for !bypass, LDO override reset *must* |
2809 | + * be performed *before* switch to bias mode else VBB glitches. |
2810 | + */ |
2811 | + if (abb->ldo_base && info->opp_sel != TI_ABB_NOMINAL_OPP) |
2812 | ti_abb_program_ldovbb(dev, abb, info); |
2813 | |
2814 | /* Initiate ABB ldo change */ |
2815 | @@ -295,6 +299,14 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb, |
2816 | if (ret) |
2817 | goto out; |
2818 | |
2819 | + /* |
2820 | + * Reset LDO VBB vset override bypass mode |
2821 | + * XXX: Do not switch sequence - for bypass, LDO override reset *must* |
2822 | + * be performed *after* switch to bypass else VBB glitches. |
2823 | + */ |
2824 | + if (abb->ldo_base && info->opp_sel == TI_ABB_NOMINAL_OPP) |
2825 | + ti_abb_program_ldovbb(dev, abb, info); |
2826 | + |
2827 | out: |
2828 | return ret; |
2829 | } |
2830 | diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c |
2831 | index 34552bf..55548dc 100644 |
2832 | --- a/drivers/scsi/esp_scsi.c |
2833 | +++ b/drivers/scsi/esp_scsi.c |
2834 | @@ -530,7 +530,7 @@ static int esp_need_to_nego_sync(struct esp_target_data *tp) |
2835 | static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, |
2836 | struct esp_lun_data *lp) |
2837 | { |
2838 | - if (!ent->tag[0]) { |
2839 | + if (!ent->orig_tag[0]) { |
2840 | /* Non-tagged, slot already taken? */ |
2841 | if (lp->non_tagged_cmd) |
2842 | return -EBUSY; |
2843 | @@ -564,9 +564,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, |
2844 | return -EBUSY; |
2845 | } |
2846 | |
2847 | - BUG_ON(lp->tagged_cmds[ent->tag[1]]); |
2848 | + BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]); |
2849 | |
2850 | - lp->tagged_cmds[ent->tag[1]] = ent; |
2851 | + lp->tagged_cmds[ent->orig_tag[1]] = ent; |
2852 | lp->num_tagged++; |
2853 | |
2854 | return 0; |
2855 | @@ -575,9 +575,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, |
2856 | static void esp_free_lun_tag(struct esp_cmd_entry *ent, |
2857 | struct esp_lun_data *lp) |
2858 | { |
2859 | - if (ent->tag[0]) { |
2860 | - BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent); |
2861 | - lp->tagged_cmds[ent->tag[1]] = NULL; |
2862 | + if (ent->orig_tag[0]) { |
2863 | + BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent); |
2864 | + lp->tagged_cmds[ent->orig_tag[1]] = NULL; |
2865 | lp->num_tagged--; |
2866 | } else { |
2867 | BUG_ON(lp->non_tagged_cmd != ent); |
2868 | @@ -667,6 +667,8 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) |
2869 | ent->tag[0] = 0; |
2870 | ent->tag[1] = 0; |
2871 | } |
2872 | + ent->orig_tag[0] = ent->tag[0]; |
2873 | + ent->orig_tag[1] = ent->tag[1]; |
2874 | |
2875 | if (esp_alloc_lun_tag(ent, lp) < 0) |
2876 | continue; |
2877 | diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h |
2878 | index 28e22ac..cd68805 100644 |
2879 | --- a/drivers/scsi/esp_scsi.h |
2880 | +++ b/drivers/scsi/esp_scsi.h |
2881 | @@ -271,6 +271,7 @@ struct esp_cmd_entry { |
2882 | #define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */ |
2883 | |
2884 | u8 tag[2]; |
2885 | + u8 orig_tag[2]; |
2886 | |
2887 | u8 status; |
2888 | u8 message; |
2889 | diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c |
2890 | index 42a78de..9ac8f68 100644 |
2891 | --- a/drivers/staging/comedi/drivers/ni_65xx.c |
2892 | +++ b/drivers/staging/comedi/drivers/ni_65xx.c |
2893 | @@ -369,28 +369,23 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev, |
2894 | { |
2895 | const struct ni_65xx_board *board = comedi_board(dev); |
2896 | struct ni_65xx_private *devpriv = dev->private; |
2897 | - unsigned base_bitfield_channel; |
2898 | - const unsigned max_ports_per_bitfield = 5; |
2899 | + int base_bitfield_channel; |
2900 | unsigned read_bits = 0; |
2901 | - unsigned j; |
2902 | + int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1); |
2903 | + int port_offset; |
2904 | |
2905 | base_bitfield_channel = CR_CHAN(insn->chanspec); |
2906 | - for (j = 0; j < max_ports_per_bitfield; ++j) { |
2907 | - const unsigned port_offset = |
2908 | - ni_65xx_port_by_channel(base_bitfield_channel) + j; |
2909 | - const unsigned port = |
2910 | - sprivate(s)->base_port + port_offset; |
2911 | - unsigned base_port_channel; |
2912 | + for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel); |
2913 | + port_offset <= last_port_offset; port_offset++) { |
2914 | + unsigned port = sprivate(s)->base_port + port_offset; |
2915 | + int base_port_channel = port_offset * ni_65xx_channels_per_port; |
2916 | unsigned port_mask, port_data, port_read_bits; |
2917 | - int bitshift; |
2918 | - if (port >= ni_65xx_total_num_ports(board)) |
2919 | + int bitshift = base_port_channel - base_bitfield_channel; |
2920 | + |
2921 | + if (bitshift >= 32) |
2922 | break; |
2923 | - base_port_channel = port_offset * ni_65xx_channels_per_port; |
2924 | port_mask = data[0]; |
2925 | port_data = data[1]; |
2926 | - bitshift = base_port_channel - base_bitfield_channel; |
2927 | - if (bitshift >= 32 || bitshift <= -32) |
2928 | - break; |
2929 | if (bitshift > 0) { |
2930 | port_mask >>= bitshift; |
2931 | port_data >>= bitshift; |
2932 | diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c |
2933 | index 1df06d5..d93bc6b 100644 |
2934 | --- a/drivers/target/iscsi/iscsi_target_util.c |
2935 | +++ b/drivers/target/iscsi/iscsi_target_util.c |
2936 | @@ -735,7 +735,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) |
2937 | * Fallthrough |
2938 | */ |
2939 | case ISCSI_OP_SCSI_TMFUNC: |
2940 | - rc = transport_generic_free_cmd(&cmd->se_cmd, 1); |
2941 | + rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); |
2942 | if (!rc && shutdown && se_cmd && se_cmd->se_sess) { |
2943 | __iscsit_free_cmd(cmd, true, shutdown); |
2944 | target_put_sess_cmd(se_cmd->se_sess, se_cmd); |
2945 | @@ -751,7 +751,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) |
2946 | se_cmd = &cmd->se_cmd; |
2947 | __iscsit_free_cmd(cmd, true, shutdown); |
2948 | |
2949 | - rc = transport_generic_free_cmd(&cmd->se_cmd, 1); |
2950 | + rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); |
2951 | if (!rc && shutdown && se_cmd->se_sess) { |
2952 | __iscsit_free_cmd(cmd, true, shutdown); |
2953 | target_put_sess_cmd(se_cmd->se_sess, se_cmd); |
2954 | diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c |
2955 | index 682210d..4fc32c8 100644 |
2956 | --- a/drivers/tty/hvc/hvc_xen.c |
2957 | +++ b/drivers/tty/hvc/hvc_xen.c |
2958 | @@ -636,6 +636,7 @@ struct console xenboot_console = { |
2959 | .name = "xenboot", |
2960 | .write = xenboot_write_console, |
2961 | .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, |
2962 | + .index = -1, |
2963 | }; |
2964 | #endif /* CONFIG_EARLY_PRINTK */ |
2965 | |
2966 | diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c |
2967 | index 042320a..d514332 100644 |
2968 | --- a/drivers/usb/chipidea/ci_hdrc_pci.c |
2969 | +++ b/drivers/usb/chipidea/ci_hdrc_pci.c |
2970 | @@ -129,7 +129,12 @@ static DEFINE_PCI_DEVICE_TABLE(ci_hdrc_pci_id_table) = { |
2971 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829), |
2972 | .driver_data = (kernel_ulong_t)&penwell_pci_platdata, |
2973 | }, |
2974 | - { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ } |
2975 | + { |
2976 | + /* Intel Clovertrail */ |
2977 | + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe006), |
2978 | + .driver_data = (kernel_ulong_t)&penwell_pci_platdata, |
2979 | + }, |
2980 | + { 0 } /* end: all zeroes */ |
2981 | }; |
2982 | MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table); |
2983 | |
2984 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
2985 | index 1cf6f12..80a7104 100644 |
2986 | --- a/drivers/usb/serial/option.c |
2987 | +++ b/drivers/usb/serial/option.c |
2988 | @@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb); |
2989 | |
2990 | #define HUAWEI_VENDOR_ID 0x12D1 |
2991 | #define HUAWEI_PRODUCT_E173 0x140C |
2992 | +#define HUAWEI_PRODUCT_E1750 0x1406 |
2993 | #define HUAWEI_PRODUCT_K4505 0x1464 |
2994 | #define HUAWEI_PRODUCT_K3765 0x1465 |
2995 | #define HUAWEI_PRODUCT_K4605 0x14C6 |
2996 | @@ -567,6 +568,8 @@ static const struct usb_device_id option_ids[] = { |
2997 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, |
2998 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), |
2999 | .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, |
3000 | + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff), |
3001 | + .driver_info = (kernel_ulong_t) &net_intf2_blacklist }, |
3002 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, |
3003 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, |
3004 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), |
3005 | diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c |
3006 | index 100edcc..4c94a79 100644 |
3007 | --- a/fs/binfmt_elf.c |
3008 | +++ b/fs/binfmt_elf.c |
3009 | @@ -1413,7 +1413,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata, |
3010 | * long file_ofs |
3011 | * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... |
3012 | */ |
3013 | -static void fill_files_note(struct memelfnote *note) |
3014 | +static int fill_files_note(struct memelfnote *note) |
3015 | { |
3016 | struct vm_area_struct *vma; |
3017 | unsigned count, size, names_ofs, remaining, n; |
3018 | @@ -1428,11 +1428,11 @@ static void fill_files_note(struct memelfnote *note) |
3019 | names_ofs = (2 + 3 * count) * sizeof(data[0]); |
3020 | alloc: |
3021 | if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */ |
3022 | - goto err; |
3023 | + return -EINVAL; |
3024 | size = round_up(size, PAGE_SIZE); |
3025 | data = vmalloc(size); |
3026 | if (!data) |
3027 | - goto err; |
3028 | + return -ENOMEM; |
3029 | |
3030 | start_end_ofs = data + 2; |
3031 | name_base = name_curpos = ((char *)data) + names_ofs; |
3032 | @@ -1485,7 +1485,7 @@ static void fill_files_note(struct memelfnote *note) |
3033 | |
3034 | size = name_curpos - (char *)data; |
3035 | fill_note(note, "CORE", NT_FILE, size, data); |
3036 | - err: ; |
3037 | + return 0; |
3038 | } |
3039 | |
3040 | #ifdef CORE_DUMP_USE_REGSET |
3041 | @@ -1686,8 +1686,8 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, |
3042 | fill_auxv_note(&info->auxv, current->mm); |
3043 | info->size += notesize(&info->auxv); |
3044 | |
3045 | - fill_files_note(&info->files); |
3046 | - info->size += notesize(&info->files); |
3047 | + if (fill_files_note(&info->files) == 0) |
3048 | + info->size += notesize(&info->files); |
3049 | |
3050 | return 1; |
3051 | } |
3052 | @@ -1719,7 +1719,8 @@ static int write_note_info(struct elf_note_info *info, |
3053 | return 0; |
3054 | if (first && !writenote(&info->auxv, file, foffset)) |
3055 | return 0; |
3056 | - if (first && !writenote(&info->files, file, foffset)) |
3057 | + if (first && info->files.data && |
3058 | + !writenote(&info->files, file, foffset)) |
3059 | return 0; |
3060 | |
3061 | for (i = 1; i < info->thread_notes; ++i) |
3062 | @@ -1806,6 +1807,7 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t) |
3063 | |
3064 | struct elf_note_info { |
3065 | struct memelfnote *notes; |
3066 | + struct memelfnote *notes_files; |
3067 | struct elf_prstatus *prstatus; /* NT_PRSTATUS */ |
3068 | struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */ |
3069 | struct list_head thread_list; |
3070 | @@ -1896,9 +1898,12 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, |
3071 | |
3072 | fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo); |
3073 | fill_auxv_note(info->notes + 3, current->mm); |
3074 | - fill_files_note(info->notes + 4); |
3075 | + info->numnote = 4; |
3076 | |
3077 | - info->numnote = 5; |
3078 | + if (fill_files_note(info->notes + info->numnote) == 0) { |
3079 | + info->notes_files = info->notes + info->numnote; |
3080 | + info->numnote++; |
3081 | + } |
3082 | |
3083 | /* Try to dump the FPU. */ |
3084 | info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, |
3085 | @@ -1960,8 +1965,9 @@ static void free_note_info(struct elf_note_info *info) |
3086 | kfree(list_entry(tmp, struct elf_thread_status, list)); |
3087 | } |
3088 | |
3089 | - /* Free data allocated by fill_files_note(): */ |
3090 | - vfree(info->notes[4].data); |
3091 | + /* Free data possibly allocated by fill_files_note(): */ |
3092 | + if (info->notes_files) |
3093 | + vfree(info->notes_files->data); |
3094 | |
3095 | kfree(info->prstatus); |
3096 | kfree(info->psinfo); |
3097 | @@ -2044,7 +2050,7 @@ static int elf_core_dump(struct coredump_params *cprm) |
3098 | struct vm_area_struct *vma, *gate_vma; |
3099 | struct elfhdr *elf = NULL; |
3100 | loff_t offset = 0, dataoff, foffset; |
3101 | - struct elf_note_info info; |
3102 | + struct elf_note_info info = { }; |
3103 | struct elf_phdr *phdr4note = NULL; |
3104 | struct elf_shdr *shdr4extnum = NULL; |
3105 | Elf_Half e_phnum; |
3106 | diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c |
3107 | index 1204c8e..3ae253e 100644 |
3108 | --- a/fs/btrfs/extent-tree.c |
3109 | +++ b/fs/btrfs/extent-tree.c |
3110 | @@ -2403,6 +2403,8 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, |
3111 | default: |
3112 | WARN_ON(1); |
3113 | } |
3114 | + } else { |
3115 | + list_del_init(&locked_ref->cluster); |
3116 | } |
3117 | spin_unlock(&delayed_refs->lock); |
3118 | |
3119 | @@ -2425,7 +2427,6 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, |
3120 | * list before we release it. |
3121 | */ |
3122 | if (btrfs_delayed_ref_is_head(ref)) { |
3123 | - list_del_init(&locked_ref->cluster); |
3124 | btrfs_delayed_ref_unlock(locked_ref); |
3125 | locked_ref = NULL; |
3126 | } |
3127 | diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
3128 | index 021694c..d3280b2 100644 |
3129 | --- a/fs/btrfs/inode.c |
3130 | +++ b/fs/btrfs/inode.c |
3131 | @@ -2132,6 +2132,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, |
3132 | WARN_ON(1); |
3133 | return ret; |
3134 | } |
3135 | + ret = 0; |
3136 | |
3137 | while (1) { |
3138 | cond_resched(); |
3139 | @@ -2181,8 +2182,6 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, |
3140 | old->len || extent_offset + num_bytes <= |
3141 | old->extent_offset + old->offset) |
3142 | continue; |
3143 | - |
3144 | - ret = 0; |
3145 | break; |
3146 | } |
3147 | |
3148 | diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c |
3149 | index 1209649..4576c03 100644 |
3150 | --- a/fs/btrfs/relocation.c |
3151 | +++ b/fs/btrfs/relocation.c |
3152 | @@ -691,6 +691,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc, |
3153 | int cowonly; |
3154 | int ret; |
3155 | int err = 0; |
3156 | + bool need_check = true; |
3157 | |
3158 | path1 = btrfs_alloc_path(); |
3159 | path2 = btrfs_alloc_path(); |
3160 | @@ -914,6 +915,7 @@ again: |
3161 | cur->bytenr); |
3162 | |
3163 | lower = cur; |
3164 | + need_check = true; |
3165 | for (; level < BTRFS_MAX_LEVEL; level++) { |
3166 | if (!path2->nodes[level]) { |
3167 | BUG_ON(btrfs_root_bytenr(&root->root_item) != |
3168 | @@ -957,14 +959,12 @@ again: |
3169 | |
3170 | /* |
3171 | * add the block to pending list if we |
3172 | - * need check its backrefs. only block |
3173 | - * at 'cur->level + 1' is added to the |
3174 | - * tail of pending list. this guarantees |
3175 | - * we check backrefs from lower level |
3176 | - * blocks to upper level blocks. |
3177 | + * need check its backrefs, we only do this once |
3178 | + * while walking up a tree as we will catch |
3179 | + * anything else later on. |
3180 | */ |
3181 | - if (!upper->checked && |
3182 | - level == cur->level + 1) { |
3183 | + if (!upper->checked && need_check) { |
3184 | + need_check = false; |
3185 | list_add_tail(&edge->list[UPPER], |
3186 | &list); |
3187 | } else |
3188 | diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c |
3189 | index d3f3b43..0c87c6b 100644 |
3190 | --- a/fs/btrfs/send.c |
3191 | +++ b/fs/btrfs/send.c |
3192 | @@ -2519,7 +2519,8 @@ static int did_create_dir(struct send_ctx *sctx, u64 dir) |
3193 | di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); |
3194 | btrfs_dir_item_key_to_cpu(eb, di, &di_key); |
3195 | |
3196 | - if (di_key.objectid < sctx->send_progress) { |
3197 | + if (di_key.type != BTRFS_ROOT_ITEM_KEY && |
3198 | + di_key.objectid < sctx->send_progress) { |
3199 | ret = 1; |
3200 | goto out; |
3201 | } |
3202 | diff --git a/fs/fuse/file.c b/fs/fuse/file.c |
3203 | index d409dea..4598345 100644 |
3204 | --- a/fs/fuse/file.c |
3205 | +++ b/fs/fuse/file.c |
3206 | @@ -2467,6 +2467,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, |
3207 | { |
3208 | struct fuse_file *ff = file->private_data; |
3209 | struct inode *inode = file->f_inode; |
3210 | + struct fuse_inode *fi = get_fuse_inode(inode); |
3211 | struct fuse_conn *fc = ff->fc; |
3212 | struct fuse_req *req; |
3213 | struct fuse_fallocate_in inarg = { |
3214 | @@ -2484,10 +2485,20 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, |
3215 | |
3216 | if (lock_inode) { |
3217 | mutex_lock(&inode->i_mutex); |
3218 | - if (mode & FALLOC_FL_PUNCH_HOLE) |
3219 | - fuse_set_nowrite(inode); |
3220 | + if (mode & FALLOC_FL_PUNCH_HOLE) { |
3221 | + loff_t endbyte = offset + length - 1; |
3222 | + err = filemap_write_and_wait_range(inode->i_mapping, |
3223 | + offset, endbyte); |
3224 | + if (err) |
3225 | + goto out; |
3226 | + |
3227 | + fuse_sync_writes(inode); |
3228 | + } |
3229 | } |
3230 | |
3231 | + if (!(mode & FALLOC_FL_KEEP_SIZE)) |
3232 | + set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); |
3233 | + |
3234 | req = fuse_get_req_nopages(fc); |
3235 | if (IS_ERR(req)) { |
3236 | err = PTR_ERR(req); |
3237 | @@ -2520,11 +2531,11 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, |
3238 | fuse_invalidate_attr(inode); |
3239 | |
3240 | out: |
3241 | - if (lock_inode) { |
3242 | - if (mode & FALLOC_FL_PUNCH_HOLE) |
3243 | - fuse_release_nowrite(inode); |
3244 | + if (!(mode & FALLOC_FL_KEEP_SIZE)) |
3245 | + clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); |
3246 | + |
3247 | + if (lock_inode) |
3248 | mutex_unlock(&inode->i_mutex); |
3249 | - } |
3250 | |
3251 | return err; |
3252 | } |
3253 | diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c |
3254 | index 95604f6..cd3aef5 100644 |
3255 | --- a/fs/nfs/nfs4filelayoutdev.c |
3256 | +++ b/fs/nfs/nfs4filelayoutdev.c |
3257 | @@ -801,34 +801,34 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) |
3258 | struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr; |
3259 | struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx]; |
3260 | struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg); |
3261 | - |
3262 | - if (filelayout_test_devid_unavailable(devid)) |
3263 | - return NULL; |
3264 | + struct nfs4_pnfs_ds *ret = ds; |
3265 | |
3266 | if (ds == NULL) { |
3267 | printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", |
3268 | __func__, ds_idx); |
3269 | filelayout_mark_devid_invalid(devid); |
3270 | - return NULL; |
3271 | + goto out; |
3272 | } |
3273 | if (ds->ds_clp) |
3274 | - return ds; |
3275 | + goto out_test_devid; |
3276 | |
3277 | if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { |
3278 | struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); |
3279 | int err; |
3280 | |
3281 | err = nfs4_ds_connect(s, ds); |
3282 | - if (err) { |
3283 | + if (err) |
3284 | nfs4_mark_deviceid_unavailable(devid); |
3285 | - ds = NULL; |
3286 | - } |
3287 | nfs4_clear_ds_conn_bit(ds); |
3288 | } else { |
3289 | /* Either ds is connected, or ds is NULL */ |
3290 | nfs4_wait_ds_connect(ds); |
3291 | } |
3292 | - return ds; |
3293 | +out_test_devid: |
3294 | + if (filelayout_test_devid_unavailable(devid)) |
3295 | + ret = NULL; |
3296 | +out: |
3297 | + return ret; |
3298 | } |
3299 | |
3300 | module_param(dataserver_retrans, uint, 0644); |
3301 | diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
3302 | index 43f4229..526e969 100644 |
3303 | --- a/fs/nfsd/nfs4state.c |
3304 | +++ b/fs/nfsd/nfs4state.c |
3305 | @@ -368,11 +368,8 @@ static struct nfs4_delegation * |
3306 | alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh) |
3307 | { |
3308 | struct nfs4_delegation *dp; |
3309 | - struct nfs4_file *fp = stp->st_file; |
3310 | |
3311 | dprintk("NFSD alloc_init_deleg\n"); |
3312 | - if (fp->fi_had_conflict) |
3313 | - return NULL; |
3314 | if (num_delegations > max_delegations) |
3315 | return NULL; |
3316 | dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); |
3317 | @@ -389,8 +386,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv |
3318 | INIT_LIST_HEAD(&dp->dl_perfile); |
3319 | INIT_LIST_HEAD(&dp->dl_perclnt); |
3320 | INIT_LIST_HEAD(&dp->dl_recall_lru); |
3321 | - get_nfs4_file(fp); |
3322 | - dp->dl_file = fp; |
3323 | + dp->dl_file = NULL; |
3324 | dp->dl_type = NFS4_OPEN_DELEGATE_READ; |
3325 | fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle); |
3326 | dp->dl_time = 0; |
3327 | @@ -3044,22 +3040,35 @@ static int nfs4_setlease(struct nfs4_delegation *dp) |
3328 | return 0; |
3329 | } |
3330 | |
3331 | -static int nfs4_set_delegation(struct nfs4_delegation *dp) |
3332 | +static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp) |
3333 | { |
3334 | - struct nfs4_file *fp = dp->dl_file; |
3335 | + int status; |
3336 | |
3337 | - if (!fp->fi_lease) |
3338 | - return nfs4_setlease(dp); |
3339 | + if (fp->fi_had_conflict) |
3340 | + return -EAGAIN; |
3341 | + get_nfs4_file(fp); |
3342 | + dp->dl_file = fp; |
3343 | + if (!fp->fi_lease) { |
3344 | + status = nfs4_setlease(dp); |
3345 | + if (status) |
3346 | + goto out_free; |
3347 | + return 0; |
3348 | + } |
3349 | spin_lock(&recall_lock); |
3350 | if (fp->fi_had_conflict) { |
3351 | spin_unlock(&recall_lock); |
3352 | - return -EAGAIN; |
3353 | + status = -EAGAIN; |
3354 | + goto out_free; |
3355 | } |
3356 | atomic_inc(&fp->fi_delegees); |
3357 | list_add(&dp->dl_perfile, &fp->fi_delegations); |
3358 | spin_unlock(&recall_lock); |
3359 | list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); |
3360 | return 0; |
3361 | +out_free: |
3362 | + put_nfs4_file(fp); |
3363 | + dp->dl_file = fp; |
3364 | + return status; |
3365 | } |
3366 | |
3367 | static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) |
3368 | @@ -3134,7 +3143,7 @@ nfs4_open_delegation(struct net *net, struct svc_fh *fh, |
3369 | dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh); |
3370 | if (dp == NULL) |
3371 | goto out_no_deleg; |
3372 | - status = nfs4_set_delegation(dp); |
3373 | + status = nfs4_set_delegation(dp, stp->st_file); |
3374 | if (status) |
3375 | goto out_free; |
3376 | |
3377 | diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c |
3378 | index 0ba6798..da27664 100644 |
3379 | --- a/fs/nilfs2/page.c |
3380 | +++ b/fs/nilfs2/page.c |
3381 | @@ -94,6 +94,7 @@ void nilfs_forget_buffer(struct buffer_head *bh) |
3382 | clear_buffer_nilfs_volatile(bh); |
3383 | clear_buffer_nilfs_checked(bh); |
3384 | clear_buffer_nilfs_redirected(bh); |
3385 | + clear_buffer_async_write(bh); |
3386 | clear_buffer_dirty(bh); |
3387 | if (nilfs_page_buffers_clean(page)) |
3388 | __nilfs_clear_page_dirty(page); |
3389 | @@ -429,6 +430,7 @@ void nilfs_clear_dirty_page(struct page *page, bool silent) |
3390 | "discard block %llu, size %zu", |
3391 | (u64)bh->b_blocknr, bh->b_size); |
3392 | } |
3393 | + clear_buffer_async_write(bh); |
3394 | clear_buffer_dirty(bh); |
3395 | clear_buffer_nilfs_volatile(bh); |
3396 | clear_buffer_nilfs_checked(bh); |
3397 | diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c |
3398 | index bd88a74..9f6b486 100644 |
3399 | --- a/fs/nilfs2/segment.c |
3400 | +++ b/fs/nilfs2/segment.c |
3401 | @@ -665,7 +665,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, |
3402 | |
3403 | bh = head = page_buffers(page); |
3404 | do { |
3405 | - if (!buffer_dirty(bh)) |
3406 | + if (!buffer_dirty(bh) || buffer_async_write(bh)) |
3407 | continue; |
3408 | get_bh(bh); |
3409 | list_add_tail(&bh->b_assoc_buffers, listp); |
3410 | @@ -699,7 +699,8 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode, |
3411 | for (i = 0; i < pagevec_count(&pvec); i++) { |
3412 | bh = head = page_buffers(pvec.pages[i]); |
3413 | do { |
3414 | - if (buffer_dirty(bh)) { |
3415 | + if (buffer_dirty(bh) && |
3416 | + !buffer_async_write(bh)) { |
3417 | get_bh(bh); |
3418 | list_add_tail(&bh->b_assoc_buffers, |
3419 | listp); |
3420 | @@ -1579,6 +1580,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) |
3421 | |
3422 | list_for_each_entry(bh, &segbuf->sb_segsum_buffers, |
3423 | b_assoc_buffers) { |
3424 | + set_buffer_async_write(bh); |
3425 | if (bh->b_page != bd_page) { |
3426 | if (bd_page) { |
3427 | lock_page(bd_page); |
3428 | @@ -1592,6 +1594,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) |
3429 | |
3430 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, |
3431 | b_assoc_buffers) { |
3432 | + set_buffer_async_write(bh); |
3433 | if (bh == segbuf->sb_super_root) { |
3434 | if (bh->b_page != bd_page) { |
3435 | lock_page(bd_page); |
3436 | @@ -1677,6 +1680,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err) |
3437 | list_for_each_entry(segbuf, logs, sb_list) { |
3438 | list_for_each_entry(bh, &segbuf->sb_segsum_buffers, |
3439 | b_assoc_buffers) { |
3440 | + clear_buffer_async_write(bh); |
3441 | if (bh->b_page != bd_page) { |
3442 | if (bd_page) |
3443 | end_page_writeback(bd_page); |
3444 | @@ -1686,6 +1690,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err) |
3445 | |
3446 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, |
3447 | b_assoc_buffers) { |
3448 | + clear_buffer_async_write(bh); |
3449 | if (bh == segbuf->sb_super_root) { |
3450 | if (bh->b_page != bd_page) { |
3451 | end_page_writeback(bd_page); |
3452 | @@ -1755,6 +1760,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) |
3453 | b_assoc_buffers) { |
3454 | set_buffer_uptodate(bh); |
3455 | clear_buffer_dirty(bh); |
3456 | + clear_buffer_async_write(bh); |
3457 | if (bh->b_page != bd_page) { |
3458 | if (bd_page) |
3459 | end_page_writeback(bd_page); |
3460 | @@ -1776,6 +1782,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) |
3461 | b_assoc_buffers) { |
3462 | set_buffer_uptodate(bh); |
3463 | clear_buffer_dirty(bh); |
3464 | + clear_buffer_async_write(bh); |
3465 | clear_buffer_delay(bh); |
3466 | clear_buffer_nilfs_volatile(bh); |
3467 | clear_buffer_nilfs_redirected(bh); |
3468 | diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c |
3469 | index 0b8b2a1..eca6f9d 100644 |
3470 | --- a/fs/xfs/xfs_da_btree.c |
3471 | +++ b/fs/xfs/xfs_da_btree.c |
3472 | @@ -1223,6 +1223,7 @@ xfs_da3_node_toosmall( |
3473 | /* start with smaller blk num */ |
3474 | forward = nodehdr.forw < nodehdr.back; |
3475 | for (i = 0; i < 2; forward = !forward, i++) { |
3476 | + struct xfs_da3_icnode_hdr thdr; |
3477 | if (forward) |
3478 | blkno = nodehdr.forw; |
3479 | else |
3480 | @@ -1235,10 +1236,10 @@ xfs_da3_node_toosmall( |
3481 | return(error); |
3482 | |
3483 | node = bp->b_addr; |
3484 | - xfs_da3_node_hdr_from_disk(&nodehdr, node); |
3485 | + xfs_da3_node_hdr_from_disk(&thdr, node); |
3486 | xfs_trans_brelse(state->args->trans, bp); |
3487 | |
3488 | - if (count - nodehdr.count >= 0) |
3489 | + if (count - thdr.count >= 0) |
3490 | break; /* fits with at least 25% to spare */ |
3491 | } |
3492 | if (i >= 2) { |
3493 | diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h |
3494 | index d06079c..99b490b 100644 |
3495 | --- a/include/asm-generic/hugetlb.h |
3496 | +++ b/include/asm-generic/hugetlb.h |
3497 | @@ -6,12 +6,12 @@ static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) |
3498 | return mk_pte(page, pgprot); |
3499 | } |
3500 | |
3501 | -static inline int huge_pte_write(pte_t pte) |
3502 | +static inline unsigned long huge_pte_write(pte_t pte) |
3503 | { |
3504 | return pte_write(pte); |
3505 | } |
3506 | |
3507 | -static inline int huge_pte_dirty(pte_t pte) |
3508 | +static inline unsigned long huge_pte_dirty(pte_t pte) |
3509 | { |
3510 | return pte_dirty(pte); |
3511 | } |
3512 | diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h |
3513 | index f7f1d71..089743a 100644 |
3514 | --- a/include/linux/balloon_compaction.h |
3515 | +++ b/include/linux/balloon_compaction.h |
3516 | @@ -159,6 +159,26 @@ static inline bool balloon_page_movable(struct page *page) |
3517 | } |
3518 | |
3519 | /* |
3520 | + * isolated_balloon_page - identify an isolated balloon page on private |
3521 | + * compaction/migration page lists. |
3522 | + * |
3523 | + * After a compaction thread isolates a balloon page for migration, it raises |
3524 | + * the page refcount to prevent concurrent compaction threads from re-isolating |
3525 | + * the same page. For that reason putback_movable_pages(), or other routines |
3526 | + * that need to identify isolated balloon pages on private pagelists, cannot |
3527 | + * rely on balloon_page_movable() to accomplish the task. |
3528 | + */ |
3529 | +static inline bool isolated_balloon_page(struct page *page) |
3530 | +{ |
3531 | + /* Already isolated balloon pages, by default, have a raised refcount */ |
3532 | + if (page_flags_cleared(page) && !page_mapped(page) && |
3533 | + page_count(page) >= 2) |
3534 | + return __is_movable_balloon_page(page); |
3535 | + |
3536 | + return false; |
3537 | +} |
3538 | + |
3539 | +/* |
3540 | * balloon_page_insert - insert a page into the balloon's page list and make |
3541 | * the page->mapping assignment accordingly. |
3542 | * @page : page to be assigned as a 'balloon page' |
3543 | @@ -243,6 +263,11 @@ static inline bool balloon_page_movable(struct page *page) |
3544 | return false; |
3545 | } |
3546 | |
3547 | +static inline bool isolated_balloon_page(struct page *page) |
3548 | +{ |
3549 | + return false; |
3550 | +} |
3551 | + |
3552 | static inline bool balloon_page_isolate(struct page *page) |
3553 | { |
3554 | return false; |
3555 | diff --git a/include/linux/hid.h b/include/linux/hid.h |
3556 | index 6e18550..4f8aa47 100644 |
3557 | --- a/include/linux/hid.h |
3558 | +++ b/include/linux/hid.h |
3559 | @@ -746,6 +746,7 @@ struct hid_field *hidinput_get_led_field(struct hid_device *hid); |
3560 | unsigned int hidinput_count_leds(struct hid_device *hid); |
3561 | __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code); |
3562 | void hid_output_report(struct hid_report *report, __u8 *data); |
3563 | +u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags); |
3564 | struct hid_device *hid_allocate_device(void); |
3565 | struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id); |
3566 | int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); |
3567 | diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h |
3568 | index 09c2300..cb35835 100644 |
3569 | --- a/include/linux/miscdevice.h |
3570 | +++ b/include/linux/miscdevice.h |
3571 | @@ -45,6 +45,7 @@ |
3572 | #define MAPPER_CTRL_MINOR 236 |
3573 | #define LOOP_CTRL_MINOR 237 |
3574 | #define VHOST_NET_MINOR 238 |
3575 | +#define UHID_MINOR 239 |
3576 | #define MISC_DYNAMIC_MINOR 255 |
3577 | |
3578 | struct device; |
3579 | diff --git a/include/net/addrconf.h b/include/net/addrconf.h |
3580 | index c7b181c..2683730 100644 |
3581 | --- a/include/net/addrconf.h |
3582 | +++ b/include/net/addrconf.h |
3583 | @@ -73,6 +73,10 @@ extern int ipv6_chk_home_addr(struct net *net, |
3584 | const struct in6_addr *addr); |
3585 | #endif |
3586 | |
3587 | +bool ipv6_chk_custom_prefix(const struct in6_addr *addr, |
3588 | + const unsigned int prefix_len, |
3589 | + struct net_device *dev); |
3590 | + |
3591 | extern int ipv6_chk_prefix(const struct in6_addr *addr, |
3592 | struct net_device *dev); |
3593 | |
3594 | diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h |
3595 | index 3c592cf..205843a 100644 |
3596 | --- a/include/net/bluetooth/hci.h |
3597 | +++ b/include/net/bluetooth/hci.h |
3598 | @@ -104,6 +104,7 @@ enum { |
3599 | enum { |
3600 | HCI_SETUP, |
3601 | HCI_AUTO_OFF, |
3602 | + HCI_RFKILLED, |
3603 | HCI_MGMT, |
3604 | HCI_PAIRABLE, |
3605 | HCI_SERVICE_CACHE, |
3606 | diff --git a/include/net/ip.h b/include/net/ip.h |
3607 | index a68f838..edfa591 100644 |
3608 | --- a/include/net/ip.h |
3609 | +++ b/include/net/ip.h |
3610 | @@ -254,9 +254,11 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) |
3611 | |
3612 | extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more); |
3613 | |
3614 | -static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk) |
3615 | +static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk) |
3616 | { |
3617 | - if (iph->frag_off & htons(IP_DF)) { |
3618 | + struct iphdr *iph = ip_hdr(skb); |
3619 | + |
3620 | + if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) { |
3621 | /* This is only to work around buggy Windows95/2000 |
3622 | * VJ compression implementations. If the ID field |
3623 | * does not change, they drop every other packet in |
3624 | @@ -268,9 +270,11 @@ static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, str |
3625 | __ip_select_ident(iph, dst, 0); |
3626 | } |
3627 | |
3628 | -static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more) |
3629 | +static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more) |
3630 | { |
3631 | - if (iph->frag_off & htons(IP_DF)) { |
3632 | + struct iphdr *iph = ip_hdr(skb); |
3633 | + |
3634 | + if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) { |
3635 | if (sk && inet_sk(sk)->inet_daddr) { |
3636 | iph->id = htons(inet_sk(sk)->inet_id); |
3637 | inet_sk(sk)->inet_id += 1 + more; |
3638 | diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h |
3639 | index 6ca975b..c2e542b 100644 |
3640 | --- a/include/net/secure_seq.h |
3641 | +++ b/include/net/secure_seq.h |
3642 | @@ -3,7 +3,6 @@ |
3643 | |
3644 | #include <linux/types.h> |
3645 | |
3646 | -extern void net_secret_init(void); |
3647 | extern __u32 secure_ip_id(__be32 daddr); |
3648 | extern __u32 secure_ipv6_id(const __be32 daddr[4]); |
3649 | extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); |
3650 | diff --git a/ipc/msg.c b/ipc/msg.c |
3651 | index b65fdf1..a877c16 100644 |
3652 | --- a/ipc/msg.c |
3653 | +++ b/ipc/msg.c |
3654 | @@ -167,6 +167,15 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) |
3655 | ipc_rmid(&msg_ids(ns), &s->q_perm); |
3656 | } |
3657 | |
3658 | +static void msg_rcu_free(struct rcu_head *head) |
3659 | +{ |
3660 | + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); |
3661 | + struct msg_queue *msq = ipc_rcu_to_struct(p); |
3662 | + |
3663 | + security_msg_queue_free(msq); |
3664 | + ipc_rcu_free(head); |
3665 | +} |
3666 | + |
3667 | /** |
3668 | * newque - Create a new msg queue |
3669 | * @ns: namespace |
3670 | @@ -191,15 +200,14 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) |
3671 | msq->q_perm.security = NULL; |
3672 | retval = security_msg_queue_alloc(msq); |
3673 | if (retval) { |
3674 | - ipc_rcu_putref(msq); |
3675 | + ipc_rcu_putref(msq, ipc_rcu_free); |
3676 | return retval; |
3677 | } |
3678 | |
3679 | /* ipc_addid() locks msq upon success. */ |
3680 | id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); |
3681 | if (id < 0) { |
3682 | - security_msg_queue_free(msq); |
3683 | - ipc_rcu_putref(msq); |
3684 | + ipc_rcu_putref(msq, msg_rcu_free); |
3685 | return id; |
3686 | } |
3687 | |
3688 | @@ -277,8 +285,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) |
3689 | free_msg(msg); |
3690 | } |
3691 | atomic_sub(msq->q_cbytes, &ns->msg_bytes); |
3692 | - security_msg_queue_free(msq); |
3693 | - ipc_rcu_putref(msq); |
3694 | + ipc_rcu_putref(msq, msg_rcu_free); |
3695 | } |
3696 | |
3697 | /* |
3698 | @@ -689,6 +696,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, |
3699 | if (ipcperms(ns, &msq->q_perm, S_IWUGO)) |
3700 | goto out_unlock0; |
3701 | |
3702 | + /* raced with RMID? */ |
3703 | + if (msq->q_perm.deleted) { |
3704 | + err = -EIDRM; |
3705 | + goto out_unlock0; |
3706 | + } |
3707 | + |
3708 | err = security_msg_queue_msgsnd(msq, msg, msgflg); |
3709 | if (err) |
3710 | goto out_unlock0; |
3711 | @@ -718,7 +731,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, |
3712 | rcu_read_lock(); |
3713 | ipc_lock_object(&msq->q_perm); |
3714 | |
3715 | - ipc_rcu_putref(msq); |
3716 | + ipc_rcu_putref(msq, ipc_rcu_free); |
3717 | if (msq->q_perm.deleted) { |
3718 | err = -EIDRM; |
3719 | goto out_unlock0; |
3720 | @@ -895,6 +908,13 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl |
3721 | goto out_unlock1; |
3722 | |
3723 | ipc_lock_object(&msq->q_perm); |
3724 | + |
3725 | + /* raced with RMID? */ |
3726 | + if (msq->q_perm.deleted) { |
3727 | + msg = ERR_PTR(-EIDRM); |
3728 | + goto out_unlock0; |
3729 | + } |
3730 | + |
3731 | msg = find_msg(msq, &msgtyp, mode); |
3732 | if (!IS_ERR(msg)) { |
3733 | /* |
3734 | diff --git a/ipc/sem.c b/ipc/sem.c |
3735 | index 4108889..87614511 100644 |
3736 | --- a/ipc/sem.c |
3737 | +++ b/ipc/sem.c |
3738 | @@ -244,70 +244,113 @@ static void merge_queues(struct sem_array *sma) |
3739 | } |
3740 | |
3741 | /* |
3742 | + * Wait until all currently ongoing simple ops have completed. |
3743 | + * Caller must own sem_perm.lock. |
3744 | + * New simple ops cannot start, because simple ops first check |
3745 | + * that sem_perm.lock is free. |
3746 | + */ |
3747 | +static void sem_wait_array(struct sem_array *sma) |
3748 | +{ |
3749 | + int i; |
3750 | + struct sem *sem; |
3751 | + |
3752 | + for (i = 0; i < sma->sem_nsems; i++) { |
3753 | + sem = sma->sem_base + i; |
3754 | + spin_unlock_wait(&sem->lock); |
3755 | + } |
3756 | +} |
3757 | + |
3758 | +static void sem_rcu_free(struct rcu_head *head) |
3759 | +{ |
3760 | + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); |
3761 | + struct sem_array *sma = ipc_rcu_to_struct(p); |
3762 | + |
3763 | + security_sem_free(sma); |
3764 | + ipc_rcu_free(head); |
3765 | +} |
3766 | + |
3767 | +/* |
3768 | * If the request contains only one semaphore operation, and there are |
3769 | * no complex transactions pending, lock only the semaphore involved. |
3770 | * Otherwise, lock the entire semaphore array, since we either have |
3771 | * multiple semaphores in our own semops, or we need to look at |
3772 | * semaphores from other pending complex operations. |
3773 | - * |
3774 | - * Carefully guard against sma->complex_count changing between zero |
3775 | - * and non-zero while we are spinning for the lock. The value of |
3776 | - * sma->complex_count cannot change while we are holding the lock, |
3777 | - * so sem_unlock should be fine. |
3778 | - * |
3779 | - * The global lock path checks that all the local locks have been released, |
3780 | - * checking each local lock once. This means that the local lock paths |
3781 | - * cannot start their critical sections while the global lock is held. |
3782 | */ |
3783 | static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, |
3784 | int nsops) |
3785 | { |
3786 | - int locknum; |
3787 | - again: |
3788 | - if (nsops == 1 && !sma->complex_count) { |
3789 | - struct sem *sem = sma->sem_base + sops->sem_num; |
3790 | + struct sem *sem; |
3791 | |
3792 | - /* Lock just the semaphore we are interested in. */ |
3793 | - spin_lock(&sem->lock); |
3794 | + if (nsops != 1) { |
3795 | + /* Complex operation - acquire a full lock */ |
3796 | + ipc_lock_object(&sma->sem_perm); |
3797 | |
3798 | - /* |
3799 | - * If sma->complex_count was set while we were spinning, |
3800 | - * we may need to look at things we did not lock here. |
3801 | + /* And wait until all simple ops that are processed |
3802 | + * right now have dropped their locks. |
3803 | */ |
3804 | - if (unlikely(sma->complex_count)) { |
3805 | - spin_unlock(&sem->lock); |
3806 | - goto lock_array; |
3807 | - } |
3808 | + sem_wait_array(sma); |
3809 | + return -1; |
3810 | + } |
3811 | |
3812 | + /* |
3813 | + * Only one semaphore affected - try to optimize locking. |
3814 | + * The rules are: |
3815 | + * - optimized locking is possible if no complex operation |
3816 | + * is either enqueued or processed right now. |
3817 | + * - The test for enqueued complex ops is simple: |
3818 | + * sma->complex_count != 0 |
3819 | + * - Testing for complex ops that are processed right now is |
3820 | + * a bit more difficult. Complex ops acquire the full lock |
3821 | + * and first wait that the running simple ops have completed. |
3822 | + * (see above) |
3823 | + * Thus: If we own a simple lock and the global lock is free |
3824 | + * and complex_count is now 0, then it will stay 0 and |
3825 | + * thus just locking sem->lock is sufficient. |
3826 | + */ |
3827 | + sem = sma->sem_base + sops->sem_num; |
3828 | + |
3829 | + if (sma->complex_count == 0) { |
3830 | /* |
3831 | - * Another process is holding the global lock on the |
3832 | - * sem_array; we cannot enter our critical section, |
3833 | - * but have to wait for the global lock to be released. |
3834 | + * It appears that no complex operation is around. |
3835 | + * Acquire the per-semaphore lock. |
3836 | */ |
3837 | - if (unlikely(spin_is_locked(&sma->sem_perm.lock))) { |
3838 | - spin_unlock(&sem->lock); |
3839 | - spin_unlock_wait(&sma->sem_perm.lock); |
3840 | - goto again; |
3841 | + spin_lock(&sem->lock); |
3842 | + |
3843 | + /* Then check that the global lock is free */ |
3844 | + if (!spin_is_locked(&sma->sem_perm.lock)) { |
3845 | + /* spin_is_locked() is not a memory barrier */ |
3846 | + smp_mb(); |
3847 | + |
3848 | + /* Now repeat the test of complex_count: |
3849 | + * It can't change anymore until we drop sem->lock. |
3850 | + * Thus: if is now 0, then it will stay 0. |
3851 | + */ |
3852 | + if (sma->complex_count == 0) { |
3853 | + /* fast path successful! */ |
3854 | + return sops->sem_num; |
3855 | + } |
3856 | } |
3857 | + spin_unlock(&sem->lock); |
3858 | + } |
3859 | |
3860 | - locknum = sops->sem_num; |
3861 | + /* slow path: acquire the full lock */ |
3862 | + ipc_lock_object(&sma->sem_perm); |
3863 | + |
3864 | + if (sma->complex_count == 0) { |
3865 | + /* False alarm: |
3866 | + * There is no complex operation, thus we can switch |
3867 | + * back to the fast path. |
3868 | + */ |
3869 | + spin_lock(&sem->lock); |
3870 | + ipc_unlock_object(&sma->sem_perm); |
3871 | + return sops->sem_num; |
3872 | } else { |
3873 | - int i; |
3874 | - /* |
3875 | - * Lock the semaphore array, and wait for all of the |
3876 | - * individual semaphore locks to go away. The code |
3877 | - * above ensures no new single-lock holders will enter |
3878 | - * their critical section while the array lock is held. |
3879 | + /* Not a false alarm, thus complete the sequence for a |
3880 | + * full lock. |
3881 | */ |
3882 | - lock_array: |
3883 | - ipc_lock_object(&sma->sem_perm); |
3884 | - for (i = 0; i < sma->sem_nsems; i++) { |
3885 | - struct sem *sem = sma->sem_base + i; |
3886 | - spin_unlock_wait(&sem->lock); |
3887 | - } |
3888 | - locknum = -1; |
3889 | + sem_wait_array(sma); |
3890 | + return -1; |
3891 | } |
3892 | - return locknum; |
3893 | } |
3894 | |
3895 | static inline void sem_unlock(struct sem_array *sma, int locknum) |
3896 | @@ -374,12 +417,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns |
3897 | static inline void sem_lock_and_putref(struct sem_array *sma) |
3898 | { |
3899 | sem_lock(sma, NULL, -1); |
3900 | - ipc_rcu_putref(sma); |
3901 | -} |
3902 | - |
3903 | -static inline void sem_putref(struct sem_array *sma) |
3904 | -{ |
3905 | - ipc_rcu_putref(sma); |
3906 | + ipc_rcu_putref(sma, ipc_rcu_free); |
3907 | } |
3908 | |
3909 | static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) |
3910 | @@ -458,14 +496,13 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) |
3911 | sma->sem_perm.security = NULL; |
3912 | retval = security_sem_alloc(sma); |
3913 | if (retval) { |
3914 | - ipc_rcu_putref(sma); |
3915 | + ipc_rcu_putref(sma, ipc_rcu_free); |
3916 | return retval; |
3917 | } |
3918 | |
3919 | id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); |
3920 | if (id < 0) { |
3921 | - security_sem_free(sma); |
3922 | - ipc_rcu_putref(sma); |
3923 | + ipc_rcu_putref(sma, sem_rcu_free); |
3924 | return id; |
3925 | } |
3926 | ns->used_sems += nsems; |
3927 | @@ -1047,8 +1084,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) |
3928 | |
3929 | wake_up_sem_queue_do(&tasks); |
3930 | ns->used_sems -= sma->sem_nsems; |
3931 | - security_sem_free(sma); |
3932 | - ipc_rcu_putref(sma); |
3933 | + ipc_rcu_putref(sma, sem_rcu_free); |
3934 | } |
3935 | |
3936 | static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) |
3937 | @@ -1292,7 +1328,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, |
3938 | rcu_read_unlock(); |
3939 | sem_io = ipc_alloc(sizeof(ushort)*nsems); |
3940 | if(sem_io == NULL) { |
3941 | - sem_putref(sma); |
3942 | + ipc_rcu_putref(sma, ipc_rcu_free); |
3943 | return -ENOMEM; |
3944 | } |
3945 | |
3946 | @@ -1328,20 +1364,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, |
3947 | if(nsems > SEMMSL_FAST) { |
3948 | sem_io = ipc_alloc(sizeof(ushort)*nsems); |
3949 | if(sem_io == NULL) { |
3950 | - sem_putref(sma); |
3951 | + ipc_rcu_putref(sma, ipc_rcu_free); |
3952 | return -ENOMEM; |
3953 | } |
3954 | } |
3955 | |
3956 | if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { |
3957 | - sem_putref(sma); |
3958 | + ipc_rcu_putref(sma, ipc_rcu_free); |
3959 | err = -EFAULT; |
3960 | goto out_free; |
3961 | } |
3962 | |
3963 | for (i = 0; i < nsems; i++) { |
3964 | if (sem_io[i] > SEMVMX) { |
3965 | - sem_putref(sma); |
3966 | + ipc_rcu_putref(sma, ipc_rcu_free); |
3967 | err = -ERANGE; |
3968 | goto out_free; |
3969 | } |
3970 | @@ -1629,7 +1665,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) |
3971 | /* step 2: allocate new undo structure */ |
3972 | new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); |
3973 | if (!new) { |
3974 | - sem_putref(sma); |
3975 | + ipc_rcu_putref(sma, ipc_rcu_free); |
3976 | return ERR_PTR(-ENOMEM); |
3977 | } |
3978 | |
3979 | diff --git a/ipc/shm.c b/ipc/shm.c |
3980 | index c6b4ad5..2d6833d 100644 |
3981 | --- a/ipc/shm.c |
3982 | +++ b/ipc/shm.c |
3983 | @@ -155,6 +155,15 @@ static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, |
3984 | return container_of(ipcp, struct shmid_kernel, shm_perm); |
3985 | } |
3986 | |
3987 | +static void shm_rcu_free(struct rcu_head *head) |
3988 | +{ |
3989 | + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); |
3990 | + struct shmid_kernel *shp = ipc_rcu_to_struct(p); |
3991 | + |
3992 | + security_shm_free(shp); |
3993 | + ipc_rcu_free(head); |
3994 | +} |
3995 | + |
3996 | static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) |
3997 | { |
3998 | ipc_rmid(&shm_ids(ns), &s->shm_perm); |
3999 | @@ -196,8 +205,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) |
4000 | user_shm_unlock(file_inode(shp->shm_file)->i_size, |
4001 | shp->mlock_user); |
4002 | fput (shp->shm_file); |
4003 | - security_shm_free(shp); |
4004 | - ipc_rcu_putref(shp); |
4005 | + ipc_rcu_putref(shp, shm_rcu_free); |
4006 | } |
4007 | |
4008 | /* |
4009 | @@ -485,7 +493,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) |
4010 | shp->shm_perm.security = NULL; |
4011 | error = security_shm_alloc(shp); |
4012 | if (error) { |
4013 | - ipc_rcu_putref(shp); |
4014 | + ipc_rcu_putref(shp, ipc_rcu_free); |
4015 | return error; |
4016 | } |
4017 | |
4018 | @@ -554,8 +562,7 @@ no_id: |
4019 | user_shm_unlock(size, shp->mlock_user); |
4020 | fput(file); |
4021 | no_file: |
4022 | - security_shm_free(shp); |
4023 | - ipc_rcu_putref(shp); |
4024 | + ipc_rcu_putref(shp, shm_rcu_free); |
4025 | return error; |
4026 | } |
4027 | |
4028 | diff --git a/ipc/util.c b/ipc/util.c |
4029 | index 4704223..0c6566b 100644 |
4030 | --- a/ipc/util.c |
4031 | +++ b/ipc/util.c |
4032 | @@ -465,11 +465,6 @@ void ipc_free(void* ptr, int size) |
4033 | kfree(ptr); |
4034 | } |
4035 | |
4036 | -struct ipc_rcu { |
4037 | - struct rcu_head rcu; |
4038 | - atomic_t refcount; |
4039 | -} ____cacheline_aligned_in_smp; |
4040 | - |
4041 | /** |
4042 | * ipc_rcu_alloc - allocate ipc and rcu space |
4043 | * @size: size desired |
4044 | @@ -496,27 +491,24 @@ int ipc_rcu_getref(void *ptr) |
4045 | return atomic_inc_not_zero(&p->refcount); |
4046 | } |
4047 | |
4048 | -/** |
4049 | - * ipc_schedule_free - free ipc + rcu space |
4050 | - * @head: RCU callback structure for queued work |
4051 | - */ |
4052 | -static void ipc_schedule_free(struct rcu_head *head) |
4053 | -{ |
4054 | - vfree(container_of(head, struct ipc_rcu, rcu)); |
4055 | -} |
4056 | - |
4057 | -void ipc_rcu_putref(void *ptr) |
4058 | +void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)) |
4059 | { |
4060 | struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; |
4061 | |
4062 | if (!atomic_dec_and_test(&p->refcount)) |
4063 | return; |
4064 | |
4065 | - if (is_vmalloc_addr(ptr)) { |
4066 | - call_rcu(&p->rcu, ipc_schedule_free); |
4067 | - } else { |
4068 | - kfree_rcu(p, rcu); |
4069 | - } |
4070 | + call_rcu(&p->rcu, func); |
4071 | +} |
4072 | + |
4073 | +void ipc_rcu_free(struct rcu_head *head) |
4074 | +{ |
4075 | + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); |
4076 | + |
4077 | + if (is_vmalloc_addr(p)) |
4078 | + vfree(p); |
4079 | + else |
4080 | + kfree(p); |
4081 | } |
4082 | |
4083 | /** |
4084 | diff --git a/ipc/util.h b/ipc/util.h |
4085 | index b6a6a88..25299e7 100644 |
4086 | --- a/ipc/util.h |
4087 | +++ b/ipc/util.h |
4088 | @@ -47,6 +47,13 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { } |
4089 | static inline void shm_exit_ns(struct ipc_namespace *ns) { } |
4090 | #endif |
4091 | |
4092 | +struct ipc_rcu { |
4093 | + struct rcu_head rcu; |
4094 | + atomic_t refcount; |
4095 | +} ____cacheline_aligned_in_smp; |
4096 | + |
4097 | +#define ipc_rcu_to_struct(p) ((void *)(p+1)) |
4098 | + |
4099 | /* |
4100 | * Structure that holds the parameters needed by the ipc operations |
4101 | * (see after) |
4102 | @@ -120,7 +127,8 @@ void ipc_free(void* ptr, int size); |
4103 | */ |
4104 | void* ipc_rcu_alloc(int size); |
4105 | int ipc_rcu_getref(void *ptr); |
4106 | -void ipc_rcu_putref(void *ptr); |
4107 | +void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)); |
4108 | +void ipc_rcu_free(struct rcu_head *head); |
4109 | |
4110 | struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); |
4111 | struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); |
4112 | diff --git a/kernel/kmod.c b/kernel/kmod.c |
4113 | index fb32636..b086006 100644 |
4114 | --- a/kernel/kmod.c |
4115 | +++ b/kernel/kmod.c |
4116 | @@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) |
4117 | DECLARE_COMPLETION_ONSTACK(done); |
4118 | int retval = 0; |
4119 | |
4120 | + if (!sub_info->path) { |
4121 | + call_usermodehelper_freeinfo(sub_info); |
4122 | + return -EINVAL; |
4123 | + } |
4124 | helper_lock(); |
4125 | if (!khelper_wq || usermodehelper_disabled) { |
4126 | retval = -EBUSY; |
4127 | diff --git a/kernel/softirq.c b/kernel/softirq.c |
4128 | index be3d351..adf6c00 100644 |
4129 | --- a/kernel/softirq.c |
4130 | +++ b/kernel/softirq.c |
4131 | @@ -328,10 +328,19 @@ void irq_enter(void) |
4132 | |
4133 | static inline void invoke_softirq(void) |
4134 | { |
4135 | - if (!force_irqthreads) |
4136 | - __do_softirq(); |
4137 | - else |
4138 | + if (!force_irqthreads) { |
4139 | + /* |
4140 | + * We can safely execute softirq on the current stack if |
4141 | + * it is the irq stack, because it should be near empty |
4142 | + * at this stage. But we have no way to know if the arch |
4143 | + * calls irq_exit() on the irq stack. So call softirq |
4144 | + * in its own stack to prevent from any overrun on top |
4145 | + * of a potentially deep task stack. |
4146 | + */ |
4147 | + do_softirq(); |
4148 | + } else { |
4149 | wakeup_softirqd(); |
4150 | + } |
4151 | } |
4152 | |
4153 | static inline void tick_irq_exit(void) |
4154 | diff --git a/mm/Kconfig b/mm/Kconfig |
4155 | index 8028dcc..6509d27 100644 |
4156 | --- a/mm/Kconfig |
4157 | +++ b/mm/Kconfig |
4158 | @@ -183,7 +183,7 @@ config MEMORY_HOTPLUG_SPARSE |
4159 | config MEMORY_HOTREMOVE |
4160 | bool "Allow for memory hot remove" |
4161 | select MEMORY_ISOLATION |
4162 | - select HAVE_BOOTMEM_INFO_NODE if X86_64 |
4163 | + select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64) |
4164 | depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE |
4165 | depends on MIGRATION |
4166 | |
4167 | diff --git a/mm/bounce.c b/mm/bounce.c |
4168 | index c9f0a43..5a7d58f 100644 |
4169 | --- a/mm/bounce.c |
4170 | +++ b/mm/bounce.c |
4171 | @@ -204,6 +204,8 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, |
4172 | struct bio_vec *to, *from; |
4173 | unsigned i; |
4174 | |
4175 | + if (force) |
4176 | + goto bounce; |
4177 | bio_for_each_segment(from, *bio_orig, i) |
4178 | if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) |
4179 | goto bounce; |
4180 | diff --git a/mm/migrate.c b/mm/migrate.c |
4181 | index 6f0c244..25ca7ca 100644 |
4182 | --- a/mm/migrate.c |
4183 | +++ b/mm/migrate.c |
4184 | @@ -103,7 +103,7 @@ void putback_movable_pages(struct list_head *l) |
4185 | list_del(&page->lru); |
4186 | dec_zone_page_state(page, NR_ISOLATED_ANON + |
4187 | page_is_file_cache(page)); |
4188 | - if (unlikely(balloon_page_movable(page))) |
4189 | + if (unlikely(isolated_balloon_page(page))) |
4190 | balloon_page_putback(page); |
4191 | else |
4192 | putback_lru_page(page); |
4193 | diff --git a/mm/vmscan.c b/mm/vmscan.c |
4194 | index 2cff0d4..65cbae5 100644 |
4195 | --- a/mm/vmscan.c |
4196 | +++ b/mm/vmscan.c |
4197 | @@ -48,6 +48,7 @@ |
4198 | #include <asm/div64.h> |
4199 | |
4200 | #include <linux/swapops.h> |
4201 | +#include <linux/balloon_compaction.h> |
4202 | |
4203 | #include "internal.h" |
4204 | |
4205 | @@ -1060,7 +1061,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, |
4206 | LIST_HEAD(clean_pages); |
4207 | |
4208 | list_for_each_entry_safe(page, next, page_list, lru) { |
4209 | - if (page_is_file_cache(page) && !PageDirty(page)) { |
4210 | + if (page_is_file_cache(page) && !PageDirty(page) && |
4211 | + !isolated_balloon_page(page)) { |
4212 | ClearPageActive(page); |
4213 | list_move(&page->lru, &clean_pages); |
4214 | } |
4215 | diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c |
4216 | index 0f04e1c..33b6144 100644 |
4217 | --- a/net/batman-adv/soft-interface.c |
4218 | +++ b/net/batman-adv/soft-interface.c |
4219 | @@ -168,6 +168,7 @@ static int batadv_interface_tx(struct sk_buff *skb, |
4220 | case ETH_P_8021Q: |
4221 | vhdr = (struct vlan_ethhdr *)skb->data; |
4222 | vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; |
4223 | + vid |= BATADV_VLAN_HAS_TAG; |
4224 | |
4225 | if (vhdr->h_vlan_encapsulated_proto != ethertype) |
4226 | break; |
4227 | @@ -329,6 +330,7 @@ void batadv_interface_rx(struct net_device *soft_iface, |
4228 | case ETH_P_8021Q: |
4229 | vhdr = (struct vlan_ethhdr *)skb->data; |
4230 | vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; |
4231 | + vid |= BATADV_VLAN_HAS_TAG; |
4232 | |
4233 | if (vhdr->h_vlan_encapsulated_proto != ethertype) |
4234 | break; |
4235 | diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c |
4236 | index cc27297..2bfd65b 100644 |
4237 | --- a/net/bluetooth/hci_core.c |
4238 | +++ b/net/bluetooth/hci_core.c |
4239 | @@ -1134,7 +1134,11 @@ int hci_dev_open(__u16 dev) |
4240 | goto done; |
4241 | } |
4242 | |
4243 | - if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { |
4244 | + /* Check for rfkill but allow the HCI setup stage to proceed |
4245 | + * (which in itself doesn't cause any RF activity). |
4246 | + */ |
4247 | + if (test_bit(HCI_RFKILLED, &hdev->dev_flags) && |
4248 | + !test_bit(HCI_SETUP, &hdev->dev_flags)) { |
4249 | ret = -ERFKILL; |
4250 | goto done; |
4251 | } |
4252 | @@ -1554,10 +1558,13 @@ static int hci_rfkill_set_block(void *data, bool blocked) |
4253 | |
4254 | BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); |
4255 | |
4256 | - if (!blocked) |
4257 | - return 0; |
4258 | - |
4259 | - hci_dev_do_close(hdev); |
4260 | + if (blocked) { |
4261 | + set_bit(HCI_RFKILLED, &hdev->dev_flags); |
4262 | + if (!test_bit(HCI_SETUP, &hdev->dev_flags)) |
4263 | + hci_dev_do_close(hdev); |
4264 | + } else { |
4265 | + clear_bit(HCI_RFKILLED, &hdev->dev_flags); |
4266 | +} |
4267 | |
4268 | return 0; |
4269 | } |
4270 | @@ -1579,9 +1586,13 @@ static void hci_power_on(struct work_struct *work) |
4271 | return; |
4272 | } |
4273 | |
4274 | - if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) |
4275 | + if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) { |
4276 | + clear_bit(HCI_AUTO_OFF, &hdev->dev_flags); |
4277 | + hci_dev_do_close(hdev); |
4278 | + } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { |
4279 | queue_delayed_work(hdev->req_workqueue, &hdev->power_off, |
4280 | HCI_AUTO_OFF_TIMEOUT); |
4281 | + } |
4282 | |
4283 | if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) |
4284 | mgmt_index_added(hdev); |
4285 | @@ -2197,6 +2208,9 @@ int hci_register_dev(struct hci_dev *hdev) |
4286 | } |
4287 | } |
4288 | |
4289 | + if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) |
4290 | + set_bit(HCI_RFKILLED, &hdev->dev_flags); |
4291 | + |
4292 | set_bit(HCI_SETUP, &hdev->dev_flags); |
4293 | |
4294 | if (hdev->dev_type != HCI_AMP) |
4295 | diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c |
4296 | index 0437200..ca24601 100644 |
4297 | --- a/net/bluetooth/hci_event.c |
4298 | +++ b/net/bluetooth/hci_event.c |
4299 | @@ -3552,7 +3552,11 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
4300 | cp.handle = cpu_to_le16(conn->handle); |
4301 | |
4302 | if (ltk->authenticated) |
4303 | - conn->sec_level = BT_SECURITY_HIGH; |
4304 | + conn->pending_sec_level = BT_SECURITY_HIGH; |
4305 | + else |
4306 | + conn->pending_sec_level = BT_SECURITY_MEDIUM; |
4307 | + |
4308 | + conn->enc_key_size = ltk->enc_size; |
4309 | |
4310 | hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); |
4311 | |
4312 | diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c |
4313 | index 0c699cd..d38ab15 100644 |
4314 | --- a/net/bluetooth/hidp/core.c |
4315 | +++ b/net/bluetooth/hidp/core.c |
4316 | @@ -225,17 +225,22 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb) |
4317 | |
4318 | static int hidp_send_report(struct hidp_session *session, struct hid_report *report) |
4319 | { |
4320 | - unsigned char buf[32], hdr; |
4321 | - int rsize; |
4322 | + unsigned char hdr; |
4323 | + u8 *buf; |
4324 | + int rsize, ret; |
4325 | |
4326 | - rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0); |
4327 | - if (rsize > sizeof(buf)) |
4328 | + buf = hid_alloc_report_buf(report, GFP_ATOMIC); |
4329 | + if (!buf) |
4330 | return -EIO; |
4331 | |
4332 | hid_output_report(report, buf); |
4333 | hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; |
4334 | |
4335 | - return hidp_send_intr_message(session, hdr, buf, rsize); |
4336 | + rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0); |
4337 | + ret = hidp_send_intr_message(session, hdr, buf, rsize); |
4338 | + |
4339 | + kfree(buf); |
4340 | + return ret; |
4341 | } |
4342 | |
4343 | static int hidp_get_raw_report(struct hid_device *hid, |
4344 | diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c |
4345 | index b9259ef..e74ddc1 100644 |
4346 | --- a/net/bridge/br_netlink.c |
4347 | +++ b/net/bridge/br_netlink.c |
4348 | @@ -207,7 +207,7 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
4349 | struct net_device *dev, u32 filter_mask) |
4350 | { |
4351 | int err = 0; |
4352 | - struct net_bridge_port *port = br_port_get_rcu(dev); |
4353 | + struct net_bridge_port *port = br_port_get_rtnl(dev); |
4354 | |
4355 | /* not a bridge port and */ |
4356 | if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN)) |
4357 | @@ -451,7 +451,7 @@ static size_t br_get_link_af_size(const struct net_device *dev) |
4358 | struct net_port_vlans *pv; |
4359 | |
4360 | if (br_port_exists(dev)) |
4361 | - pv = nbp_get_vlan_info(br_port_get_rcu(dev)); |
4362 | + pv = nbp_get_vlan_info(br_port_get_rtnl(dev)); |
4363 | else if (dev->priv_flags & IFF_EBRIDGE) |
4364 | pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev)); |
4365 | else |
4366 | diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h |
4367 | index 263ba90..cde1eb1 100644 |
4368 | --- a/net/bridge/br_private.h |
4369 | +++ b/net/bridge/br_private.h |
4370 | @@ -202,13 +202,10 @@ struct net_bridge_port |
4371 | |
4372 | static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev) |
4373 | { |
4374 | - struct net_bridge_port *port = |
4375 | - rcu_dereference_rtnl(dev->rx_handler_data); |
4376 | - |
4377 | - return br_port_exists(dev) ? port : NULL; |
4378 | + return rcu_dereference(dev->rx_handler_data); |
4379 | } |
4380 | |
4381 | -static inline struct net_bridge_port *br_port_get_rtnl(struct net_device *dev) |
4382 | +static inline struct net_bridge_port *br_port_get_rtnl(const struct net_device *dev) |
4383 | { |
4384 | return br_port_exists(dev) ? |
4385 | rtnl_dereference(dev->rx_handler_data) : NULL; |
4386 | @@ -766,6 +763,7 @@ extern struct net_bridge_port *br_get_port(struct net_bridge *br, |
4387 | extern void br_init_port(struct net_bridge_port *p); |
4388 | extern void br_become_designated_port(struct net_bridge_port *p); |
4389 | |
4390 | +extern void __br_set_forward_delay(struct net_bridge *br, unsigned long t); |
4391 | extern int br_set_forward_delay(struct net_bridge *br, unsigned long x); |
4392 | extern int br_set_hello_time(struct net_bridge *br, unsigned long x); |
4393 | extern int br_set_max_age(struct net_bridge *br, unsigned long x); |
4394 | diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c |
4395 | index 1c0a50f..3c86f05 100644 |
4396 | --- a/net/bridge/br_stp.c |
4397 | +++ b/net/bridge/br_stp.c |
4398 | @@ -209,7 +209,7 @@ static void br_record_config_information(struct net_bridge_port *p, |
4399 | p->designated_age = jiffies - bpdu->message_age; |
4400 | |
4401 | mod_timer(&p->message_age_timer, jiffies |
4402 | - + (p->br->max_age - bpdu->message_age)); |
4403 | + + (bpdu->max_age - bpdu->message_age)); |
4404 | } |
4405 | |
4406 | /* called under bridge lock */ |
4407 | @@ -544,18 +544,27 @@ int br_set_max_age(struct net_bridge *br, unsigned long val) |
4408 | |
4409 | } |
4410 | |
4411 | +void __br_set_forward_delay(struct net_bridge *br, unsigned long t) |
4412 | +{ |
4413 | + br->bridge_forward_delay = t; |
4414 | + if (br_is_root_bridge(br)) |
4415 | + br->forward_delay = br->bridge_forward_delay; |
4416 | +} |
4417 | + |
4418 | int br_set_forward_delay(struct net_bridge *br, unsigned long val) |
4419 | { |
4420 | unsigned long t = clock_t_to_jiffies(val); |
4421 | + int err = -ERANGE; |
4422 | |
4423 | + spin_lock_bh(&br->lock); |
4424 | if (br->stp_enabled != BR_NO_STP && |
4425 | (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY)) |
4426 | - return -ERANGE; |
4427 | + goto unlock; |
4428 | |
4429 | - spin_lock_bh(&br->lock); |
4430 | - br->bridge_forward_delay = t; |
4431 | - if (br_is_root_bridge(br)) |
4432 | - br->forward_delay = br->bridge_forward_delay; |
4433 | + __br_set_forward_delay(br, t); |
4434 | + err = 0; |
4435 | + |
4436 | +unlock: |
4437 | spin_unlock_bh(&br->lock); |
4438 | - return 0; |
4439 | + return err; |
4440 | } |
4441 | diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c |
4442 | index d45e760..108084a 100644 |
4443 | --- a/net/bridge/br_stp_if.c |
4444 | +++ b/net/bridge/br_stp_if.c |
4445 | @@ -129,6 +129,14 @@ static void br_stp_start(struct net_bridge *br) |
4446 | char *envp[] = { NULL }; |
4447 | |
4448 | r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); |
4449 | + |
4450 | + spin_lock_bh(&br->lock); |
4451 | + |
4452 | + if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY) |
4453 | + __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY); |
4454 | + else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY) |
4455 | + __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY); |
4456 | + |
4457 | if (r == 0) { |
4458 | br->stp_enabled = BR_USER_STP; |
4459 | br_debug(br, "userspace STP started\n"); |
4460 | @@ -137,10 +145,10 @@ static void br_stp_start(struct net_bridge *br) |
4461 | br_debug(br, "using kernel STP\n"); |
4462 | |
4463 | /* To start timers on any ports left in blocking */ |
4464 | - spin_lock_bh(&br->lock); |
4465 | br_port_state_selection(br); |
4466 | - spin_unlock_bh(&br->lock); |
4467 | } |
4468 | + |
4469 | + spin_unlock_bh(&br->lock); |
4470 | } |
4471 | |
4472 | static void br_stp_stop(struct net_bridge *br) |
4473 | diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c |
4474 | index 2bd4b58..0f45522 100644 |
4475 | --- a/net/caif/cfctrl.c |
4476 | +++ b/net/caif/cfctrl.c |
4477 | @@ -293,9 +293,10 @@ int cfctrl_linkup_request(struct cflayer *layer, |
4478 | |
4479 | count = cfctrl_cancel_req(&cfctrl->serv.layer, |
4480 | user_layer); |
4481 | - if (count != 1) |
4482 | + if (count != 1) { |
4483 | pr_err("Could not remove request (%d)", count); |
4484 | return -ENODEV; |
4485 | + } |
4486 | } |
4487 | return 0; |
4488 | } |
4489 | diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c |
4490 | index d12e3a9..52d0f83 100644 |
4491 | --- a/net/core/flow_dissector.c |
4492 | +++ b/net/core/flow_dissector.c |
4493 | @@ -150,8 +150,8 @@ ipv6: |
4494 | if (poff >= 0) { |
4495 | __be32 *ports, _ports; |
4496 | |
4497 | - nhoff += poff; |
4498 | - ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports); |
4499 | + ports = skb_header_pointer(skb, nhoff + poff, |
4500 | + sizeof(_ports), &_ports); |
4501 | if (ports) |
4502 | flow->ports = *ports; |
4503 | } |
4504 | @@ -348,7 +348,7 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) |
4505 | |
4506 | if (queue_index != new_index && sk && |
4507 | rcu_access_pointer(sk->sk_dst_cache)) |
4508 | - sk_tx_queue_set(sk, queue_index); |
4509 | + sk_tx_queue_set(sk, new_index); |
4510 | |
4511 | queue_index = new_index; |
4512 | } |
4513 | diff --git a/net/core/netpoll.c b/net/core/netpoll.c |
4514 | index 2c637e9..fc75c9e 100644 |
4515 | --- a/net/core/netpoll.c |
4516 | +++ b/net/core/netpoll.c |
4517 | @@ -550,7 +550,7 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo |
4518 | return; |
4519 | |
4520 | proto = ntohs(eth_hdr(skb)->h_proto); |
4521 | - if (proto == ETH_P_IP) { |
4522 | + if (proto == ETH_P_ARP) { |
4523 | struct arphdr *arp; |
4524 | unsigned char *arp_ptr; |
4525 | /* No arp on this interface */ |
4526 | @@ -1284,15 +1284,14 @@ EXPORT_SYMBOL_GPL(__netpoll_free_async); |
4527 | |
4528 | void netpoll_cleanup(struct netpoll *np) |
4529 | { |
4530 | - if (!np->dev) |
4531 | - return; |
4532 | - |
4533 | rtnl_lock(); |
4534 | + if (!np->dev) |
4535 | + goto out; |
4536 | __netpoll_cleanup(np); |
4537 | - rtnl_unlock(); |
4538 | - |
4539 | dev_put(np->dev); |
4540 | np->dev = NULL; |
4541 | +out: |
4542 | + rtnl_unlock(); |
4543 | } |
4544 | EXPORT_SYMBOL(netpoll_cleanup); |
4545 | |
4546 | diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c |
4547 | index 6a2f13c..3f1ec15 100644 |
4548 | --- a/net/core/secure_seq.c |
4549 | +++ b/net/core/secure_seq.c |
4550 | @@ -10,11 +10,24 @@ |
4551 | |
4552 | #include <net/secure_seq.h> |
4553 | |
4554 | -static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; |
4555 | +#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4) |
4556 | |
4557 | -void net_secret_init(void) |
4558 | +static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned; |
4559 | + |
4560 | +static void net_secret_init(void) |
4561 | { |
4562 | - get_random_bytes(net_secret, sizeof(net_secret)); |
4563 | + u32 tmp; |
4564 | + int i; |
4565 | + |
4566 | + if (likely(net_secret[0])) |
4567 | + return; |
4568 | + |
4569 | + for (i = NET_SECRET_SIZE; i > 0;) { |
4570 | + do { |
4571 | + get_random_bytes(&tmp, sizeof(tmp)); |
4572 | + } while (!tmp); |
4573 | + cmpxchg(&net_secret[--i], 0, tmp); |
4574 | + } |
4575 | } |
4576 | |
4577 | #ifdef CONFIG_INET |
4578 | @@ -42,6 +55,7 @@ __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, |
4579 | u32 hash[MD5_DIGEST_WORDS]; |
4580 | u32 i; |
4581 | |
4582 | + net_secret_init(); |
4583 | memcpy(hash, saddr, 16); |
4584 | for (i = 0; i < 4; i++) |
4585 | secret[i] = net_secret[i] + (__force u32)daddr[i]; |
4586 | @@ -63,6 +77,7 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, |
4587 | u32 hash[MD5_DIGEST_WORDS]; |
4588 | u32 i; |
4589 | |
4590 | + net_secret_init(); |
4591 | memcpy(hash, saddr, 16); |
4592 | for (i = 0; i < 4; i++) |
4593 | secret[i] = net_secret[i] + (__force u32) daddr[i]; |
4594 | @@ -82,6 +97,7 @@ __u32 secure_ip_id(__be32 daddr) |
4595 | { |
4596 | u32 hash[MD5_DIGEST_WORDS]; |
4597 | |
4598 | + net_secret_init(); |
4599 | hash[0] = (__force __u32) daddr; |
4600 | hash[1] = net_secret[13]; |
4601 | hash[2] = net_secret[14]; |
4602 | @@ -96,6 +112,7 @@ __u32 secure_ipv6_id(const __be32 daddr[4]) |
4603 | { |
4604 | __u32 hash[4]; |
4605 | |
4606 | + net_secret_init(); |
4607 | memcpy(hash, daddr, 16); |
4608 | md5_transform(hash, net_secret); |
4609 | |
4610 | @@ -107,6 +124,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, |
4611 | { |
4612 | u32 hash[MD5_DIGEST_WORDS]; |
4613 | |
4614 | + net_secret_init(); |
4615 | hash[0] = (__force u32)saddr; |
4616 | hash[1] = (__force u32)daddr; |
4617 | hash[2] = ((__force u16)sport << 16) + (__force u16)dport; |
4618 | @@ -121,6 +139,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) |
4619 | { |
4620 | u32 hash[MD5_DIGEST_WORDS]; |
4621 | |
4622 | + net_secret_init(); |
4623 | hash[0] = (__force u32)saddr; |
4624 | hash[1] = (__force u32)daddr; |
4625 | hash[2] = (__force u32)dport ^ net_secret[14]; |
4626 | @@ -140,6 +159,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, |
4627 | u32 hash[MD5_DIGEST_WORDS]; |
4628 | u64 seq; |
4629 | |
4630 | + net_secret_init(); |
4631 | hash[0] = (__force u32)saddr; |
4632 | hash[1] = (__force u32)daddr; |
4633 | hash[2] = ((__force u16)sport << 16) + (__force u16)dport; |
4634 | @@ -164,6 +184,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, |
4635 | u64 seq; |
4636 | u32 i; |
4637 | |
4638 | + net_secret_init(); |
4639 | memcpy(hash, saddr, 16); |
4640 | for (i = 0; i < 4; i++) |
4641 | secret[i] = net_secret[i] + daddr[i]; |
4642 | diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c |
4643 | index 9c61f9c..6cf9f77 100644 |
4644 | --- a/net/dccp/ipv6.c |
4645 | +++ b/net/dccp/ipv6.c |
4646 | @@ -135,6 +135,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
4647 | |
4648 | if (dst) |
4649 | dst->ops->redirect(dst, sk, skb); |
4650 | + goto out; |
4651 | } |
4652 | |
4653 | if (type == ICMPV6_PKT_TOOBIG) { |
4654 | diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c |
4655 | index b4d0be2..dd6b523 100644 |
4656 | --- a/net/ipv4/af_inet.c |
4657 | +++ b/net/ipv4/af_inet.c |
4658 | @@ -263,10 +263,8 @@ void build_ehash_secret(void) |
4659 | get_random_bytes(&rnd, sizeof(rnd)); |
4660 | } while (rnd == 0); |
4661 | |
4662 | - if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) { |
4663 | + if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) |
4664 | get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); |
4665 | - net_secret_init(); |
4666 | - } |
4667 | } |
4668 | EXPORT_SYMBOL(build_ehash_secret); |
4669 | |
4670 | diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c |
4671 | index cd71190..62410fd 100644 |
4672 | --- a/net/ipv4/igmp.c |
4673 | +++ b/net/ipv4/igmp.c |
4674 | @@ -343,7 +343,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) |
4675 | pip->saddr = fl4.saddr; |
4676 | pip->protocol = IPPROTO_IGMP; |
4677 | pip->tot_len = 0; /* filled in later */ |
4678 | - ip_select_ident(pip, &rt->dst, NULL); |
4679 | + ip_select_ident(skb, &rt->dst, NULL); |
4680 | ((u8 *)&pip[1])[0] = IPOPT_RA; |
4681 | ((u8 *)&pip[1])[1] = 4; |
4682 | ((u8 *)&pip[1])[2] = 0; |
4683 | @@ -687,7 +687,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, |
4684 | iph->daddr = dst; |
4685 | iph->saddr = fl4.saddr; |
4686 | iph->protocol = IPPROTO_IGMP; |
4687 | - ip_select_ident(iph, &rt->dst, NULL); |
4688 | + ip_select_ident(skb, &rt->dst, NULL); |
4689 | ((u8 *)&iph[1])[0] = IPOPT_RA; |
4690 | ((u8 *)&iph[1])[1] = 4; |
4691 | ((u8 *)&iph[1])[2] = 0; |
4692 | @@ -709,7 +709,7 @@ static void igmp_gq_timer_expire(unsigned long data) |
4693 | |
4694 | in_dev->mr_gq_running = 0; |
4695 | igmpv3_send_report(in_dev, NULL); |
4696 | - __in_dev_put(in_dev); |
4697 | + in_dev_put(in_dev); |
4698 | } |
4699 | |
4700 | static void igmp_ifc_timer_expire(unsigned long data) |
4701 | @@ -721,7 +721,7 @@ static void igmp_ifc_timer_expire(unsigned long data) |
4702 | in_dev->mr_ifc_count--; |
4703 | igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval); |
4704 | } |
4705 | - __in_dev_put(in_dev); |
4706 | + in_dev_put(in_dev); |
4707 | } |
4708 | |
4709 | static void igmp_ifc_event(struct in_device *in_dev) |
4710 | diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c |
4711 | index 000e3d2..33d5537 100644 |
4712 | --- a/net/ipv4/inetpeer.c |
4713 | +++ b/net/ipv4/inetpeer.c |
4714 | @@ -32,8 +32,8 @@ |
4715 | * At the moment of writing this notes identifier of IP packets is generated |
4716 | * to be unpredictable using this code only for packets subjected |
4717 | * (actually or potentially) to defragmentation. I.e. DF packets less than |
4718 | - * PMTU in size uses a constant ID and do not use this code (see |
4719 | - * ip_select_ident() in include/net/ip.h). |
4720 | + * PMTU in size when local fragmentation is disabled use a constant ID and do |
4721 | + * not use this code (see ip_select_ident() in include/net/ip.h). |
4722 | * |
4723 | * Route cache entries hold references to our nodes. |
4724 | * New cache entries get references via lookup by destination IP address in |
4725 | diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
4726 | index 9ee17e3..a04d872 100644 |
4727 | --- a/net/ipv4/ip_output.c |
4728 | +++ b/net/ipv4/ip_output.c |
4729 | @@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, |
4730 | iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); |
4731 | iph->saddr = saddr; |
4732 | iph->protocol = sk->sk_protocol; |
4733 | - ip_select_ident(iph, &rt->dst, sk); |
4734 | + ip_select_ident(skb, &rt->dst, sk); |
4735 | |
4736 | if (opt && opt->opt.optlen) { |
4737 | iph->ihl += opt->opt.optlen>>2; |
4738 | @@ -386,7 +386,7 @@ packet_routed: |
4739 | ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); |
4740 | } |
4741 | |
4742 | - ip_select_ident_more(iph, &rt->dst, sk, |
4743 | + ip_select_ident_more(skb, &rt->dst, sk, |
4744 | (skb_shinfo(skb)->gso_segs ?: 1) - 1); |
4745 | |
4746 | skb->priority = sk->sk_priority; |
4747 | @@ -1316,7 +1316,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk, |
4748 | else |
4749 | ttl = ip_select_ttl(inet, &rt->dst); |
4750 | |
4751 | - iph = (struct iphdr *)skb->data; |
4752 | + iph = ip_hdr(skb); |
4753 | iph->version = 4; |
4754 | iph->ihl = 5; |
4755 | iph->tos = inet->tos; |
4756 | @@ -1324,7 +1324,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk, |
4757 | iph->ttl = ttl; |
4758 | iph->protocol = sk->sk_protocol; |
4759 | ip_copy_addrs(iph, fl4); |
4760 | - ip_select_ident(iph, &rt->dst, sk); |
4761 | + ip_select_ident(skb, &rt->dst, sk); |
4762 | |
4763 | if (opt) { |
4764 | iph->ihl += opt->optlen>>2; |
4765 | diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c |
4766 | index ca1cb2d..ffc2108 100644 |
4767 | --- a/net/ipv4/ip_tunnel.c |
4768 | +++ b/net/ipv4/ip_tunnel.c |
4769 | @@ -626,6 +626,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, |
4770 | tunnel->err_count = 0; |
4771 | } |
4772 | |
4773 | + tos = ip_tunnel_ecn_encap(tos, inner_iph, skb); |
4774 | ttl = tnl_params->ttl; |
4775 | if (ttl == 0) { |
4776 | if (skb->protocol == htons(ETH_P_IP)) |
4777 | @@ -644,18 +645,18 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, |
4778 | |
4779 | max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) |
4780 | + rt->dst.header_len; |
4781 | - if (max_headroom > dev->needed_headroom) { |
4782 | + if (max_headroom > dev->needed_headroom) |
4783 | dev->needed_headroom = max_headroom; |
4784 | - if (skb_cow_head(skb, dev->needed_headroom)) { |
4785 | - dev->stats.tx_dropped++; |
4786 | - dev_kfree_skb(skb); |
4787 | - return; |
4788 | - } |
4789 | + |
4790 | + if (skb_cow_head(skb, dev->needed_headroom)) { |
4791 | + dev->stats.tx_dropped++; |
4792 | + dev_kfree_skb(skb); |
4793 | + return; |
4794 | } |
4795 | |
4796 | err = iptunnel_xmit(dev_net(dev), rt, skb, |
4797 | fl4.saddr, fl4.daddr, protocol, |
4798 | - ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df); |
4799 | + tos, ttl, df); |
4800 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); |
4801 | |
4802 | return; |
4803 | diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c |
4804 | index 850525b..3fbba17 100644 |
4805 | --- a/net/ipv4/ip_tunnel_core.c |
4806 | +++ b/net/ipv4/ip_tunnel_core.c |
4807 | @@ -63,7 +63,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt, |
4808 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); |
4809 | |
4810 | /* Push down and install the IP header. */ |
4811 | - __skb_push(skb, sizeof(struct iphdr)); |
4812 | + skb_push(skb, sizeof(struct iphdr)); |
4813 | skb_reset_network_header(skb); |
4814 | |
4815 | iph = ip_hdr(skb); |
4816 | diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c |
4817 | index 132a096..20cb33a 100644 |
4818 | --- a/net/ipv4/ipmr.c |
4819 | +++ b/net/ipv4/ipmr.c |
4820 | @@ -1658,7 +1658,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) |
4821 | iph->protocol = IPPROTO_IPIP; |
4822 | iph->ihl = 5; |
4823 | iph->tot_len = htons(skb->len); |
4824 | - ip_select_ident(iph, skb_dst(skb), NULL); |
4825 | + ip_select_ident(skb, skb_dst(skb), NULL); |
4826 | ip_send_check(iph); |
4827 | |
4828 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
4829 | diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c |
4830 | index 61e60d6..6fb2337 100644 |
4831 | --- a/net/ipv4/raw.c |
4832 | +++ b/net/ipv4/raw.c |
4833 | @@ -387,7 +387,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, |
4834 | iph->check = 0; |
4835 | iph->tot_len = htons(length); |
4836 | if (!iph->id) |
4837 | - ip_select_ident(iph, &rt->dst, NULL); |
4838 | + ip_select_ident(skb, &rt->dst, NULL); |
4839 | |
4840 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); |
4841 | } |
4842 | diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
4843 | index b2f6c74..95544e4 100644 |
4844 | --- a/net/ipv4/tcp.c |
4845 | +++ b/net/ipv4/tcp.c |
4846 | @@ -2454,10 +2454,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level, |
4847 | case TCP_THIN_DUPACK: |
4848 | if (val < 0 || val > 1) |
4849 | err = -EINVAL; |
4850 | - else |
4851 | + else { |
4852 | tp->thin_dupack = val; |
4853 | if (tp->thin_dupack) |
4854 | tcp_disable_early_retrans(tp); |
4855 | + } |
4856 | break; |
4857 | |
4858 | case TCP_REPAIR: |
4859 | diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c |
4860 | index eb1dd4d..b5663c3 100644 |
4861 | --- a/net/ipv4/xfrm4_mode_tunnel.c |
4862 | +++ b/net/ipv4/xfrm4_mode_tunnel.c |
4863 | @@ -117,7 +117,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) |
4864 | |
4865 | top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? |
4866 | 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF)); |
4867 | - ip_select_ident(top_iph, dst->child, NULL); |
4868 | + ip_select_ident(skb, dst->child, NULL); |
4869 | |
4870 | top_iph->ttl = ip4_dst_hoplimit(dst->child); |
4871 | |
4872 | diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c |
4873 | index 498ea99..0f99f7b 100644 |
4874 | --- a/net/ipv6/addrconf.c |
4875 | +++ b/net/ipv6/addrconf.c |
4876 | @@ -1533,6 +1533,33 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, |
4877 | return false; |
4878 | } |
4879 | |
4880 | +/* Compares an address/prefix_len with addresses on device @dev. |
4881 | + * If one is found it returns true. |
4882 | + */ |
4883 | +bool ipv6_chk_custom_prefix(const struct in6_addr *addr, |
4884 | + const unsigned int prefix_len, struct net_device *dev) |
4885 | +{ |
4886 | + struct inet6_dev *idev; |
4887 | + struct inet6_ifaddr *ifa; |
4888 | + bool ret = false; |
4889 | + |
4890 | + rcu_read_lock(); |
4891 | + idev = __in6_dev_get(dev); |
4892 | + if (idev) { |
4893 | + read_lock_bh(&idev->lock); |
4894 | + list_for_each_entry(ifa, &idev->addr_list, if_list) { |
4895 | + ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len); |
4896 | + if (ret) |
4897 | + break; |
4898 | + } |
4899 | + read_unlock_bh(&idev->lock); |
4900 | + } |
4901 | + rcu_read_unlock(); |
4902 | + |
4903 | + return ret; |
4904 | +} |
4905 | +EXPORT_SYMBOL(ipv6_chk_custom_prefix); |
4906 | + |
4907 | int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev) |
4908 | { |
4909 | struct inet6_dev *idev; |
4910 | diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c |
4911 | index 07a7d65..8d67900 100644 |
4912 | --- a/net/ipv6/exthdrs.c |
4913 | +++ b/net/ipv6/exthdrs.c |
4914 | @@ -162,12 +162,6 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb) |
4915 | off += optlen; |
4916 | len -= optlen; |
4917 | } |
4918 | - /* This case will not be caught by above check since its padding |
4919 | - * length is smaller than 7: |
4920 | - * 1 byte NH + 1 byte Length + 6 bytes Padding |
4921 | - */ |
4922 | - if ((padlen == 6) && ((off - skb_network_header_len(skb)) == 8)) |
4923 | - goto bad; |
4924 | |
4925 | if (len == 0) |
4926 | return true; |
4927 | diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c |
4928 | index c4ff5bb..7fa5cb6 100644 |
4929 | --- a/net/ipv6/ip6_fib.c |
4930 | +++ b/net/ipv6/ip6_fib.c |
4931 | @@ -825,9 +825,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) |
4932 | fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr), |
4933 | rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst), |
4934 | allow_create, replace_required); |
4935 | - |
4936 | if (IS_ERR(fn)) { |
4937 | err = PTR_ERR(fn); |
4938 | + fn = NULL; |
4939 | goto out; |
4940 | } |
4941 | |
4942 | diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c |
4943 | index 90747f1..8bc717b 100644 |
4944 | --- a/net/ipv6/ip6_gre.c |
4945 | +++ b/net/ipv6/ip6_gre.c |
4946 | @@ -620,7 +620,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, |
4947 | struct ip6_tnl *tunnel = netdev_priv(dev); |
4948 | struct net_device *tdev; /* Device to other host */ |
4949 | struct ipv6hdr *ipv6h; /* Our new IP header */ |
4950 | - unsigned int max_headroom; /* The extra header space needed */ |
4951 | + unsigned int max_headroom = 0; /* The extra header space needed */ |
4952 | int gre_hlen; |
4953 | struct ipv6_tel_txoption opt; |
4954 | int mtu; |
4955 | @@ -693,7 +693,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, |
4956 | tunnel->err_count = 0; |
4957 | } |
4958 | |
4959 | - max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; |
4960 | + max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; |
4961 | |
4962 | if (skb_headroom(skb) < max_headroom || skb_shared(skb) || |
4963 | (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { |
4964 | diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
4965 | index e7ceb6c..44df1c9 100644 |
4966 | --- a/net/ipv6/ip6_output.c |
4967 | +++ b/net/ipv6/ip6_output.c |
4968 | @@ -1040,6 +1040,8 @@ static inline int ip6_ufo_append_data(struct sock *sk, |
4969 | * udp datagram |
4970 | */ |
4971 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { |
4972 | + struct frag_hdr fhdr; |
4973 | + |
4974 | skb = sock_alloc_send_skb(sk, |
4975 | hh_len + fragheaderlen + transhdrlen + 20, |
4976 | (flags & MSG_DONTWAIT), &err); |
4977 | @@ -1061,12 +1063,6 @@ static inline int ip6_ufo_append_data(struct sock *sk, |
4978 | skb->protocol = htons(ETH_P_IPV6); |
4979 | skb->ip_summed = CHECKSUM_PARTIAL; |
4980 | skb->csum = 0; |
4981 | - } |
4982 | - |
4983 | - err = skb_append_datato_frags(sk,skb, getfrag, from, |
4984 | - (length - transhdrlen)); |
4985 | - if (!err) { |
4986 | - struct frag_hdr fhdr; |
4987 | |
4988 | /* Specify the length of each IPv6 datagram fragment. |
4989 | * It has to be a multiple of 8. |
4990 | @@ -1077,15 +1073,10 @@ static inline int ip6_ufo_append_data(struct sock *sk, |
4991 | ipv6_select_ident(&fhdr, rt); |
4992 | skb_shinfo(skb)->ip6_frag_id = fhdr.identification; |
4993 | __skb_queue_tail(&sk->sk_write_queue, skb); |
4994 | - |
4995 | - return 0; |
4996 | } |
4997 | - /* There is not enough support do UPD LSO, |
4998 | - * so follow normal path |
4999 | - */ |
5000 | - kfree_skb(skb); |
5001 | |
5002 | - return err; |
5003 | + return skb_append_datato_frags(sk, skb, getfrag, from, |
5004 | + (length - transhdrlen)); |
5005 | } |
5006 | |
5007 | static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, |
5008 | @@ -1252,27 +1243,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, |
5009 | * --yoshfuji |
5010 | */ |
5011 | |
5012 | - cork->length += length; |
5013 | - if (length > mtu) { |
5014 | - int proto = sk->sk_protocol; |
5015 | - if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ |
5016 | - ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); |
5017 | - return -EMSGSIZE; |
5018 | - } |
5019 | - |
5020 | - if (proto == IPPROTO_UDP && |
5021 | - (rt->dst.dev->features & NETIF_F_UFO)) { |
5022 | + if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP || |
5023 | + sk->sk_protocol == IPPROTO_RAW)) { |
5024 | + ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); |
5025 | + return -EMSGSIZE; |
5026 | + } |
5027 | |
5028 | - err = ip6_ufo_append_data(sk, getfrag, from, length, |
5029 | - hh_len, fragheaderlen, |
5030 | - transhdrlen, mtu, flags, rt); |
5031 | - if (err) |
5032 | - goto error; |
5033 | - return 0; |
5034 | - } |
5035 | + skb = skb_peek_tail(&sk->sk_write_queue); |
5036 | + cork->length += length; |
5037 | + if (((length > mtu) || |
5038 | + (skb && skb_is_gso(skb))) && |
5039 | + (sk->sk_protocol == IPPROTO_UDP) && |
5040 | + (rt->dst.dev->features & NETIF_F_UFO)) { |
5041 | + err = ip6_ufo_append_data(sk, getfrag, from, length, |
5042 | + hh_len, fragheaderlen, |
5043 | + transhdrlen, mtu, flags, rt); |
5044 | + if (err) |
5045 | + goto error; |
5046 | + return 0; |
5047 | } |
5048 | |
5049 | - if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) |
5050 | + if (!skb) |
5051 | goto alloc_new_skb; |
5052 | |
5053 | while (length > 0) { |
5054 | diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c |
5055 | index 46ba243..cf5d490 100644 |
5056 | --- a/net/ipv6/ip6_tunnel.c |
5057 | +++ b/net/ipv6/ip6_tunnel.c |
5058 | @@ -1652,9 +1652,9 @@ static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev) |
5059 | |
5060 | if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || |
5061 | nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr), |
5062 | - &parm->raddr) || |
5063 | - nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr), |
5064 | &parm->laddr) || |
5065 | + nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr), |
5066 | + &parm->raddr) || |
5067 | nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) || |
5068 | nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || |
5069 | nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || |
5070 | @@ -1738,6 +1738,7 @@ static int __net_init ip6_tnl_init_net(struct net *net) |
5071 | if (!ip6n->fb_tnl_dev) |
5072 | goto err_alloc_dev; |
5073 | dev_net_set(ip6n->fb_tnl_dev, net); |
5074 | + ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops; |
5075 | |
5076 | err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); |
5077 | if (err < 0) |
5078 | diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c |
5079 | index 99cd65c..6b84fc0 100644 |
5080 | --- a/net/ipv6/mcast.c |
5081 | +++ b/net/ipv6/mcast.c |
5082 | @@ -1862,7 +1862,7 @@ static void mld_dad_timer_expire(unsigned long data) |
5083 | if (idev->mc_dad_count) |
5084 | mld_dad_start_timer(idev, idev->mc_maxdelay); |
5085 | } |
5086 | - __in6_dev_put(idev); |
5087 | + in6_dev_put(idev); |
5088 | } |
5089 | |
5090 | static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, |
5091 | @@ -2207,7 +2207,7 @@ static void mld_gq_timer_expire(unsigned long data) |
5092 | |
5093 | idev->mc_gq_running = 0; |
5094 | mld_send_report(idev, NULL); |
5095 | - __in6_dev_put(idev); |
5096 | + in6_dev_put(idev); |
5097 | } |
5098 | |
5099 | static void mld_ifc_timer_expire(unsigned long data) |
5100 | @@ -2220,7 +2220,7 @@ static void mld_ifc_timer_expire(unsigned long data) |
5101 | if (idev->mc_ifc_count) |
5102 | mld_ifc_start_timer(idev, idev->mc_maxdelay); |
5103 | } |
5104 | - __in6_dev_put(idev); |
5105 | + in6_dev_put(idev); |
5106 | } |
5107 | |
5108 | static void mld_ifc_event(struct inet6_dev *idev) |
5109 | diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c |
5110 | index 21b25dd..86f639b 100644 |
5111 | --- a/net/ipv6/sit.c |
5112 | +++ b/net/ipv6/sit.c |
5113 | @@ -566,6 +566,70 @@ static inline bool is_spoofed_6rd(struct ip_tunnel *tunnel, const __be32 v4addr, |
5114 | return false; |
5115 | } |
5116 | |
5117 | +/* Checks if an address matches an address on the tunnel interface. |
5118 | + * Used to detect the NAT of proto 41 packets and let them pass spoofing test. |
5119 | + * Long story: |
5120 | + * This function is called after we considered the packet as spoofed |
5121 | + * in is_spoofed_6rd. |
5122 | + * We may have a router that is doing NAT for proto 41 packets |
5123 | + * for an internal station. Destination a.a.a.a/PREFIX:bbbb:bbbb |
5124 | + * will be translated to n.n.n.n/PREFIX:bbbb:bbbb. And is_spoofed_6rd |
5125 | + * function will return true, dropping the packet. |
5126 | + * But, we can still check if is spoofed against the IP |
5127 | + * addresses associated with the interface. |
5128 | + */ |
5129 | +static bool only_dnatted(const struct ip_tunnel *tunnel, |
5130 | + const struct in6_addr *v6dst) |
5131 | +{ |
5132 | + int prefix_len; |
5133 | + |
5134 | +#ifdef CONFIG_IPV6_SIT_6RD |
5135 | + prefix_len = tunnel->ip6rd.prefixlen + 32 |
5136 | + - tunnel->ip6rd.relay_prefixlen; |
5137 | +#else |
5138 | + prefix_len = 48; |
5139 | +#endif |
5140 | + return ipv6_chk_custom_prefix(v6dst, prefix_len, tunnel->dev); |
5141 | +} |
5142 | + |
5143 | +/* Returns true if a packet is spoofed */ |
5144 | +static bool packet_is_spoofed(struct sk_buff *skb, |
5145 | + const struct iphdr *iph, |
5146 | + struct ip_tunnel *tunnel) |
5147 | +{ |
5148 | + const struct ipv6hdr *ipv6h; |
5149 | + |
5150 | + if (tunnel->dev->priv_flags & IFF_ISATAP) { |
5151 | + if (!isatap_chksrc(skb, iph, tunnel)) |
5152 | + return true; |
5153 | + |
5154 | + return false; |
5155 | + } |
5156 | + |
5157 | + if (tunnel->dev->flags & IFF_POINTOPOINT) |
5158 | + return false; |
5159 | + |
5160 | + ipv6h = ipv6_hdr(skb); |
5161 | + |
5162 | + if (unlikely(is_spoofed_6rd(tunnel, iph->saddr, &ipv6h->saddr))) { |
5163 | + net_warn_ratelimited("Src spoofed %pI4/%pI6c -> %pI4/%pI6c\n", |
5164 | + &iph->saddr, &ipv6h->saddr, |
5165 | + &iph->daddr, &ipv6h->daddr); |
5166 | + return true; |
5167 | + } |
5168 | + |
5169 | + if (likely(!is_spoofed_6rd(tunnel, iph->daddr, &ipv6h->daddr))) |
5170 | + return false; |
5171 | + |
5172 | + if (only_dnatted(tunnel, &ipv6h->daddr)) |
5173 | + return false; |
5174 | + |
5175 | + net_warn_ratelimited("Dst spoofed %pI4/%pI6c -> %pI4/%pI6c\n", |
5176 | + &iph->saddr, &ipv6h->saddr, |
5177 | + &iph->daddr, &ipv6h->daddr); |
5178 | + return true; |
5179 | +} |
5180 | + |
5181 | static int ipip6_rcv(struct sk_buff *skb) |
5182 | { |
5183 | const struct iphdr *iph = ip_hdr(skb); |
5184 | @@ -588,19 +652,9 @@ static int ipip6_rcv(struct sk_buff *skb) |
5185 | skb->protocol = htons(ETH_P_IPV6); |
5186 | skb->pkt_type = PACKET_HOST; |
5187 | |
5188 | - if (tunnel->dev->priv_flags & IFF_ISATAP) { |
5189 | - if (!isatap_chksrc(skb, iph, tunnel)) { |
5190 | - tunnel->dev->stats.rx_errors++; |
5191 | - goto out; |
5192 | - } |
5193 | - } else if (!(tunnel->dev->flags&IFF_POINTOPOINT)) { |
5194 | - if (is_spoofed_6rd(tunnel, iph->saddr, |
5195 | - &ipv6_hdr(skb)->saddr) || |
5196 | - is_spoofed_6rd(tunnel, iph->daddr, |
5197 | - &ipv6_hdr(skb)->daddr)) { |
5198 | - tunnel->dev->stats.rx_errors++; |
5199 | - goto out; |
5200 | - } |
5201 | + if (packet_is_spoofed(skb, iph, tunnel)) { |
5202 | + tunnel->dev->stats.rx_errors++; |
5203 | + goto out; |
5204 | } |
5205 | |
5206 | __skb_tunnel_rx(skb, tunnel->dev); |
5207 | @@ -752,7 +806,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, |
5208 | neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); |
5209 | |
5210 | if (neigh == NULL) { |
5211 | - net_dbg_ratelimited("sit: nexthop == NULL\n"); |
5212 | + net_dbg_ratelimited("nexthop == NULL\n"); |
5213 | goto tx_error; |
5214 | } |
5215 | |
5216 | @@ -781,7 +835,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, |
5217 | neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); |
5218 | |
5219 | if (neigh == NULL) { |
5220 | - net_dbg_ratelimited("sit: nexthop == NULL\n"); |
5221 | + net_dbg_ratelimited("nexthop == NULL\n"); |
5222 | goto tx_error; |
5223 | } |
5224 | |
5225 | @@ -1619,6 +1673,7 @@ static int __net_init sit_init_net(struct net *net) |
5226 | goto err_alloc_dev; |
5227 | } |
5228 | dev_net_set(sitn->fb_tunnel_dev, net); |
5229 | + sitn->fb_tunnel_dev->rtnl_link_ops = &sit_link_ops; |
5230 | /* FB netdevice is special: we have one, and only one per netns. |
5231 | * Allowing to move it to another netns is clearly unsafe. |
5232 | */ |
5233 | diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c |
5234 | index b75ff64..c47444e 100644 |
5235 | --- a/net/netfilter/ipvs/ip_vs_xmit.c |
5236 | +++ b/net/netfilter/ipvs/ip_vs_xmit.c |
5237 | @@ -883,7 +883,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, |
5238 | iph->daddr = cp->daddr.ip; |
5239 | iph->saddr = saddr; |
5240 | iph->ttl = old_iph->ttl; |
5241 | - ip_select_ident(iph, &rt->dst, NULL); |
5242 | + ip_select_ident(skb, &rt->dst, NULL); |
5243 | |
5244 | /* Another hack: avoid icmp_send in ip_fragment */ |
5245 | skb->local_df = 1; |
5246 | diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c |
5247 | index c2178b1..863846c 100644 |
5248 | --- a/net/sched/sch_htb.c |
5249 | +++ b/net/sched/sch_htb.c |
5250 | @@ -1495,7 +1495,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, |
5251 | psched_ratecfg_precompute(&cl->ceil, &hopt->ceil); |
5252 | |
5253 | cl->buffer = PSCHED_TICKS2NS(hopt->buffer); |
5254 | - cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer); |
5255 | + cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); |
5256 | |
5257 | sch_tree_unlock(sch); |
5258 | |
5259 | diff --git a/net/sctp/input.c b/net/sctp/input.c |
5260 | index 3fa4d85..68155e3 100644 |
5261 | --- a/net/sctp/input.c |
5262 | +++ b/net/sctp/input.c |
5263 | @@ -648,8 +648,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) |
5264 | break; |
5265 | case ICMP_REDIRECT: |
5266 | sctp_icmp_redirect(sk, transport, skb); |
5267 | - err = 0; |
5268 | - break; |
5269 | + /* Fall through to out_unlock. */ |
5270 | default: |
5271 | goto out_unlock; |
5272 | } |
5273 | diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c |
5274 | index 09ffcc9..547a461e 100644 |
5275 | --- a/net/sctp/ipv6.c |
5276 | +++ b/net/sctp/ipv6.c |
5277 | @@ -189,7 +189,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
5278 | break; |
5279 | case NDISC_REDIRECT: |
5280 | sctp_icmp_redirect(sk, transport, skb); |
5281 | - break; |
5282 | + goto out_unlock; |
5283 | default: |
5284 | break; |
5285 | } |
5286 | @@ -210,44 +210,23 @@ out: |
5287 | in6_dev_put(idev); |
5288 | } |
5289 | |
5290 | -/* Based on tcp_v6_xmit() in tcp_ipv6.c. */ |
5291 | static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) |
5292 | { |
5293 | struct sock *sk = skb->sk; |
5294 | struct ipv6_pinfo *np = inet6_sk(sk); |
5295 | - struct flowi6 fl6; |
5296 | - |
5297 | - memset(&fl6, 0, sizeof(fl6)); |
5298 | - |
5299 | - fl6.flowi6_proto = sk->sk_protocol; |
5300 | - |
5301 | - /* Fill in the dest address from the route entry passed with the skb |
5302 | - * and the source address from the transport. |
5303 | - */ |
5304 | - fl6.daddr = transport->ipaddr.v6.sin6_addr; |
5305 | - fl6.saddr = transport->saddr.v6.sin6_addr; |
5306 | - |
5307 | - fl6.flowlabel = np->flow_label; |
5308 | - IP6_ECN_flow_xmit(sk, fl6.flowlabel); |
5309 | - if (ipv6_addr_type(&fl6.saddr) & IPV6_ADDR_LINKLOCAL) |
5310 | - fl6.flowi6_oif = transport->saddr.v6.sin6_scope_id; |
5311 | - else |
5312 | - fl6.flowi6_oif = sk->sk_bound_dev_if; |
5313 | - |
5314 | - if (np->opt && np->opt->srcrt) { |
5315 | - struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; |
5316 | - fl6.daddr = *rt0->addr; |
5317 | - } |
5318 | + struct flowi6 *fl6 = &transport->fl.u.ip6; |
5319 | |
5320 | pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, |
5321 | - skb->len, &fl6.saddr, &fl6.daddr); |
5322 | + skb->len, &fl6->saddr, &fl6->daddr); |
5323 | |
5324 | - SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); |
5325 | + IP6_ECN_flow_xmit(sk, fl6->flowlabel); |
5326 | |
5327 | if (!(transport->param_flags & SPP_PMTUD_ENABLE)) |
5328 | skb->local_df = 1; |
5329 | |
5330 | - return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); |
5331 | + SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); |
5332 | + |
5333 | + return ip6_xmit(sk, skb, fl6, np->opt, np->tclass); |
5334 | } |
5335 | |
5336 | /* Returns the dst cache entry for the given source and destination ip |
5337 | @@ -260,10 +239,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
5338 | struct dst_entry *dst = NULL; |
5339 | struct flowi6 *fl6 = &fl->u.ip6; |
5340 | struct sctp_bind_addr *bp; |
5341 | + struct ipv6_pinfo *np = inet6_sk(sk); |
5342 | struct sctp_sockaddr_entry *laddr; |
5343 | union sctp_addr *baddr = NULL; |
5344 | union sctp_addr *daddr = &t->ipaddr; |
5345 | union sctp_addr dst_saddr; |
5346 | + struct in6_addr *final_p, final; |
5347 | __u8 matchlen = 0; |
5348 | __u8 bmatchlen; |
5349 | sctp_scope_t scope; |
5350 | @@ -287,7 +268,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
5351 | pr_debug("src=%pI6 - ", &fl6->saddr); |
5352 | } |
5353 | |
5354 | - dst = ip6_dst_lookup_flow(sk, fl6, NULL, false); |
5355 | + final_p = fl6_update_dst(fl6, np->opt, &final); |
5356 | + dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); |
5357 | if (!asoc || saddr) |
5358 | goto out; |
5359 | |
5360 | @@ -339,10 +321,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
5361 | } |
5362 | } |
5363 | rcu_read_unlock(); |
5364 | + |
5365 | if (baddr) { |
5366 | fl6->saddr = baddr->v6.sin6_addr; |
5367 | fl6->fl6_sport = baddr->v6.sin6_port; |
5368 | - dst = ip6_dst_lookup_flow(sk, fl6, NULL, false); |
5369 | + final_p = fl6_update_dst(fl6, np->opt, &final); |
5370 | + dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); |
5371 | } |
5372 | |
5373 | out: |
5374 | diff --git a/net/sctp/socket.c b/net/sctp/socket.c |
5375 | index c6670d2..cf6c6b0 100644 |
5376 | --- a/net/sctp/socket.c |
5377 | +++ b/net/sctp/socket.c |
5378 | @@ -812,6 +812,9 @@ static int sctp_send_asconf_del_ip(struct sock *sk, |
5379 | goto skip_mkasconf; |
5380 | } |
5381 | |
5382 | + if (laddr == NULL) |
5383 | + return -EINVAL; |
5384 | + |
5385 | /* We do not need RCU protection throughout this loop |
5386 | * because this is done under a socket lock from the |
5387 | * setsockopt call. |
5388 | @@ -6182,7 +6185,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) |
5389 | /* Is there any exceptional events? */ |
5390 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) |
5391 | mask |= POLLERR | |
5392 | - sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0; |
5393 | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); |
5394 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
5395 | mask |= POLLRDHUP | POLLIN | POLLRDNORM; |
5396 | if (sk->sk_shutdown == SHUTDOWN_MASK) |
5397 | diff --git a/net/sysctl_net.c b/net/sysctl_net.c |
5398 | index 9bc6db0..e7000be 100644 |
5399 | --- a/net/sysctl_net.c |
5400 | +++ b/net/sysctl_net.c |
5401 | @@ -47,12 +47,12 @@ static int net_ctl_permissions(struct ctl_table_header *head, |
5402 | |
5403 | /* Allow network administrator to have same access as root. */ |
5404 | if (ns_capable(net->user_ns, CAP_NET_ADMIN) || |
5405 | - uid_eq(root_uid, current_uid())) { |
5406 | + uid_eq(root_uid, current_euid())) { |
5407 | int mode = (table->mode >> 6) & 7; |
5408 | return (mode << 6) | (mode << 3) | mode; |
5409 | } |
5410 | /* Allow netns root group to have the same access as the root group */ |
5411 | - if (gid_eq(root_gid, current_gid())) { |
5412 | + if (in_egroup_p(root_gid)) { |
5413 | int mode = (table->mode >> 3) & 7; |
5414 | return (mode << 3) | mode; |
5415 | } |
5416 | diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c |
5417 | index de00ce1..ac7697e 100644 |
5418 | --- a/sound/pci/hda/patch_conexant.c |
5419 | +++ b/sound/pci/hda/patch_conexant.c |
5420 | @@ -3224,6 +3224,7 @@ enum { |
5421 | CXT_PINCFG_LEMOTE_A1205, |
5422 | CXT_FIXUP_STEREO_DMIC, |
5423 | CXT_FIXUP_INC_MIC_BOOST, |
5424 | + CXT_FIXUP_GPIO1, |
5425 | }; |
5426 | |
5427 | static void cxt_fixup_stereo_dmic(struct hda_codec *codec, |
5428 | @@ -3302,6 +3303,15 @@ static const struct hda_fixup cxt_fixups[] = { |
5429 | .type = HDA_FIXUP_FUNC, |
5430 | .v.func = cxt5066_increase_mic_boost, |
5431 | }, |
5432 | + [CXT_FIXUP_GPIO1] = { |
5433 | + .type = HDA_FIXUP_VERBS, |
5434 | + .v.verbs = (const struct hda_verb[]) { |
5435 | + { 0x01, AC_VERB_SET_GPIO_MASK, 0x01 }, |
5436 | + { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x01 }, |
5437 | + { 0x01, AC_VERB_SET_GPIO_DATA, 0x01 }, |
5438 | + { } |
5439 | + }, |
5440 | + }, |
5441 | }; |
5442 | |
5443 | static const struct snd_pci_quirk cxt5051_fixups[] = { |
5444 | @@ -3311,6 +3321,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = { |
5445 | |
5446 | static const struct snd_pci_quirk cxt5066_fixups[] = { |
5447 | SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), |
5448 | + SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_GPIO1), |
5449 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), |
5450 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410), |
5451 | SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410), |
5452 | diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c |
5453 | index 8af0434..259d1ac 100644 |
5454 | --- a/sound/soc/codecs/88pm860x-codec.c |
5455 | +++ b/sound/soc/codecs/88pm860x-codec.c |
5456 | @@ -349,6 +349,9 @@ static int snd_soc_put_volsw_2r_st(struct snd_kcontrol *kcontrol, |
5457 | val = ucontrol->value.integer.value[0]; |
5458 | val2 = ucontrol->value.integer.value[1]; |
5459 | |
5460 | + if (val >= ARRAY_SIZE(st_table) || val2 >= ARRAY_SIZE(st_table)) |
5461 | + return -EINVAL; |
5462 | + |
5463 | err = snd_soc_update_bits(codec, reg, 0x3f, st_table[val].m); |
5464 | if (err < 0) |
5465 | return err; |
5466 | diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c |
5467 | index b8ba0ad..80555d7 100644 |
5468 | --- a/sound/soc/codecs/ab8500-codec.c |
5469 | +++ b/sound/soc/codecs/ab8500-codec.c |
5470 | @@ -1225,13 +1225,18 @@ static int anc_status_control_put(struct snd_kcontrol *kcontrol, |
5471 | struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev); |
5472 | struct device *dev = codec->dev; |
5473 | bool apply_fir, apply_iir; |
5474 | - int req, status; |
5475 | + unsigned int req; |
5476 | + int status; |
5477 | |
5478 | dev_dbg(dev, "%s: Enter.\n", __func__); |
5479 | |
5480 | mutex_lock(&drvdata->anc_lock); |
5481 | |
5482 | req = ucontrol->value.integer.value[0]; |
5483 | + if (req >= ARRAY_SIZE(enum_anc_state)) { |
5484 | + status = -EINVAL; |
5485 | + goto cleanup; |
5486 | + } |
5487 | if (req != ANC_APPLY_FIR_IIR && req != ANC_APPLY_FIR && |
5488 | req != ANC_APPLY_IIR) { |
5489 | dev_err(dev, "%s: ERROR: Unsupported status to set '%s'!\n", |
5490 | diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c |
5491 | index 41cdd16..8dbcacd 100644 |
5492 | --- a/sound/soc/codecs/max98095.c |
5493 | +++ b/sound/soc/codecs/max98095.c |
5494 | @@ -1863,7 +1863,7 @@ static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol, |
5495 | struct max98095_pdata *pdata = max98095->pdata; |
5496 | int channel = max98095_get_eq_channel(kcontrol->id.name); |
5497 | struct max98095_cdata *cdata; |
5498 | - int sel = ucontrol->value.integer.value[0]; |
5499 | + unsigned int sel = ucontrol->value.integer.value[0]; |
5500 | struct max98095_eq_cfg *coef_set; |
5501 | int fs, best, best_val, i; |
5502 | int regmask, regsave; |
5503 | @@ -2016,7 +2016,7 @@ static int max98095_put_bq_enum(struct snd_kcontrol *kcontrol, |
5504 | struct max98095_pdata *pdata = max98095->pdata; |
5505 | int channel = max98095_get_bq_channel(codec, kcontrol->id.name); |
5506 | struct max98095_cdata *cdata; |
5507 | - int sel = ucontrol->value.integer.value[0]; |
5508 | + unsigned int sel = ucontrol->value.integer.value[0]; |
5509 | struct max98095_biquad_cfg *coef_set; |
5510 | int fs, best, best_val, i; |
5511 | int regmask, regsave; |