Magellan Linux

Contents of /trunk/kernel-alx/patches-3.4/0113-3.4.14-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1946 - (show annotations) (download)
Wed Nov 14 15:25:09 2012 UTC (11 years, 5 months ago) by niro
File size: 123551 byte(s)
3.4.18-alx-r1
1 diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
2 index 153d3fc..8e3d91b 100644
3 --- a/arch/alpha/kernel/process.c
4 +++ b/arch/alpha/kernel/process.c
5 @@ -28,6 +28,7 @@
6 #include <linux/tty.h>
7 #include <linux/console.h>
8 #include <linux/slab.h>
9 +#include <linux/rcupdate.h>
10
11 #include <asm/reg.h>
12 #include <asm/uaccess.h>
13 @@ -54,8 +55,11 @@ cpu_idle(void)
14 /* FIXME -- EV6 and LCA45 know how to power down
15 the CPU. */
16
17 + rcu_idle_enter();
18 while (!need_resched())
19 cpu_relax();
20 +
21 + rcu_idle_exit();
22 schedule();
23 }
24 }
25 diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
26 index 891dad8..c722027 100644
27 --- a/arch/cris/kernel/process.c
28 +++ b/arch/cris/kernel/process.c
29 @@ -25,6 +25,7 @@
30 #include <linux/elfcore.h>
31 #include <linux/mqueue.h>
32 #include <linux/reboot.h>
33 +#include <linux/rcupdate.h>
34
35 //#define DEBUG
36
37 @@ -102,6 +103,7 @@ void cpu_idle (void)
38 {
39 /* endless idle loop with no priority at all */
40 while (1) {
41 + rcu_idle_enter();
42 while (!need_resched()) {
43 void (*idle)(void);
44 /*
45 @@ -114,6 +116,7 @@ void cpu_idle (void)
46 idle = default_idle;
47 idle();
48 }
49 + rcu_idle_exit();
50 schedule_preempt_disabled();
51 }
52 }
53 diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
54 index d4de48b..3941cbc 100644
55 --- a/arch/frv/kernel/process.c
56 +++ b/arch/frv/kernel/process.c
57 @@ -25,6 +25,7 @@
58 #include <linux/reboot.h>
59 #include <linux/interrupt.h>
60 #include <linux/pagemap.h>
61 +#include <linux/rcupdate.h>
62
63 #include <asm/asm-offsets.h>
64 #include <asm/uaccess.h>
65 @@ -84,12 +85,14 @@ void cpu_idle(void)
66 {
67 /* endless idle loop with no priority at all */
68 while (1) {
69 + rcu_idle_enter();
70 while (!need_resched()) {
71 check_pgt_cache();
72
73 if (!frv_dma_inprogress && idle)
74 idle();
75 }
76 + rcu_idle_exit();
77
78 schedule_preempt_disabled();
79 }
80 diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
81 index 0e9c315..f153ed1 100644
82 --- a/arch/h8300/kernel/process.c
83 +++ b/arch/h8300/kernel/process.c
84 @@ -36,6 +36,7 @@
85 #include <linux/reboot.h>
86 #include <linux/fs.h>
87 #include <linux/slab.h>
88 +#include <linux/rcupdate.h>
89
90 #include <asm/uaccess.h>
91 #include <asm/traps.h>
92 @@ -78,8 +79,10 @@ void (*idle)(void) = default_idle;
93 void cpu_idle(void)
94 {
95 while (1) {
96 + rcu_idle_enter();
97 while (!need_resched())
98 idle();
99 + rcu_idle_exit();
100 schedule_preempt_disabled();
101 }
102 }
103 diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
104 index ce74e14..86d74ab 100644
105 --- a/arch/ia64/kernel/process.c
106 +++ b/arch/ia64/kernel/process.c
107 @@ -29,6 +29,7 @@
108 #include <linux/kdebug.h>
109 #include <linux/utsname.h>
110 #include <linux/tracehook.h>
111 +#include <linux/rcupdate.h>
112
113 #include <asm/cpu.h>
114 #include <asm/delay.h>
115 @@ -301,6 +302,7 @@ cpu_idle (void)
116
117 /* endless idle loop with no priority at all */
118 while (1) {
119 + rcu_idle_enter();
120 if (can_do_pal_halt) {
121 current_thread_info()->status &= ~TS_POLLING;
122 /*
123 @@ -331,6 +333,7 @@ cpu_idle (void)
124 normal_xtp();
125 #endif
126 }
127 + rcu_idle_exit();
128 schedule_preempt_disabled();
129 check_pgt_cache();
130 if (cpu_is_offline(cpu))
131 diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
132 index 3a4a32b..384e63f 100644
133 --- a/arch/m32r/kernel/process.c
134 +++ b/arch/m32r/kernel/process.c
135 @@ -26,6 +26,7 @@
136 #include <linux/ptrace.h>
137 #include <linux/unistd.h>
138 #include <linux/hardirq.h>
139 +#include <linux/rcupdate.h>
140
141 #include <asm/io.h>
142 #include <asm/uaccess.h>
143 @@ -82,6 +83,7 @@ void cpu_idle (void)
144 {
145 /* endless idle loop with no priority at all */
146 while (1) {
147 + rcu_idle_enter();
148 while (!need_resched()) {
149 void (*idle)(void) = pm_idle;
150
151 @@ -90,6 +92,7 @@ void cpu_idle (void)
152
153 idle();
154 }
155 + rcu_idle_exit();
156 schedule_preempt_disabled();
157 }
158 }
159 diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
160 index c488e3c..ac2892e 100644
161 --- a/arch/m68k/kernel/process.c
162 +++ b/arch/m68k/kernel/process.c
163 @@ -25,6 +25,7 @@
164 #include <linux/reboot.h>
165 #include <linux/init_task.h>
166 #include <linux/mqueue.h>
167 +#include <linux/rcupdate.h>
168
169 #include <asm/uaccess.h>
170 #include <asm/traps.h>
171 @@ -75,8 +76,10 @@ void cpu_idle(void)
172 {
173 /* endless idle loop with no priority at all */
174 while (1) {
175 + rcu_idle_enter();
176 while (!need_resched())
177 idle();
178 + rcu_idle_exit();
179 schedule_preempt_disabled();
180 }
181 }
182 diff --git a/arch/mips/Makefile b/arch/mips/Makefile
183 index 4fedf5a..5c1e75d 100644
184 --- a/arch/mips/Makefile
185 +++ b/arch/mips/Makefile
186 @@ -225,7 +225,7 @@ KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)"
187 LDFLAGS += -m $(ld-emul)
188
189 ifdef CONFIG_MIPS
190 -CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -xc /dev/null | \
191 +CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
192 egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
193 sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/")
194 ifdef CONFIG_64BIT
195 diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
196 index 0c6877e..d3d6fa9 100644
197 --- a/arch/mips/kernel/Makefile
198 +++ b/arch/mips/kernel/Makefile
199 @@ -104,7 +104,7 @@ obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
200
201 obj-$(CONFIG_OF) += prom.o
202
203 -CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
204 +CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
205
206 obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
207
208 diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile
209 index 7120282..3eb4a52 100644
210 --- a/arch/mn10300/Makefile
211 +++ b/arch/mn10300/Makefile
212 @@ -26,7 +26,7 @@ CHECKFLAGS +=
213 PROCESSOR := unset
214 UNIT := unset
215
216 -KBUILD_CFLAGS += -mam33 -mmem-funcs -DCPU=AM33
217 +KBUILD_CFLAGS += -mam33 -DCPU=AM33 $(call cc-option,-mmem-funcs,)
218 KBUILD_AFLAGS += -mam33 -DCPU=AM33
219
220 ifeq ($(CONFIG_MN10300_CURRENT_IN_E2),y)
221 diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
222 index 14707f2..675d8f2 100644
223 --- a/arch/mn10300/kernel/process.c
224 +++ b/arch/mn10300/kernel/process.c
225 @@ -25,6 +25,7 @@
226 #include <linux/err.h>
227 #include <linux/fs.h>
228 #include <linux/slab.h>
229 +#include <linux/rcupdate.h>
230 #include <asm/uaccess.h>
231 #include <asm/pgtable.h>
232 #include <asm/io.h>
233 @@ -107,6 +108,7 @@ void cpu_idle(void)
234 {
235 /* endless idle loop with no priority at all */
236 for (;;) {
237 + rcu_idle_enter();
238 while (!need_resched()) {
239 void (*idle)(void);
240
241 @@ -121,6 +123,7 @@ void cpu_idle(void)
242 }
243 idle();
244 }
245 + rcu_idle_exit();
246
247 schedule_preempt_disabled();
248 }
249 diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
250 index d4b94b3..c54a4db 100644
251 --- a/arch/parisc/kernel/process.c
252 +++ b/arch/parisc/kernel/process.c
253 @@ -48,6 +48,7 @@
254 #include <linux/unistd.h>
255 #include <linux/kallsyms.h>
256 #include <linux/uaccess.h>
257 +#include <linux/rcupdate.h>
258
259 #include <asm/io.h>
260 #include <asm/asm-offsets.h>
261 @@ -69,8 +70,10 @@ void cpu_idle(void)
262
263 /* endless idle loop with no priority at all */
264 while (1) {
265 + rcu_idle_enter();
266 while (!need_resched())
267 barrier();
268 + rcu_idle_exit();
269 schedule_preempt_disabled();
270 check_pgt_cache();
271 }
272 diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
273 index ac39e6a..2974edd 100644
274 --- a/arch/powerpc/include/asm/pci-bridge.h
275 +++ b/arch/powerpc/include/asm/pci-bridge.h
276 @@ -181,6 +181,14 @@ static inline int pci_device_from_OF_node(struct device_node *np,
277 #if defined(CONFIG_EEH)
278 static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
279 {
280 + /*
281 + * For those OF nodes whose parent isn't PCI bridge, they
282 + * don't have PCI_DN actually. So we have to skip them for
283 + * any EEH operations.
284 + */
285 + if (!dn || !PCI_DN(dn))
286 + return NULL;
287 +
288 return PCI_DN(dn)->edev;
289 }
290 #endif
291 diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
292 index a75e37d..41d4b16 100644
293 --- a/arch/powerpc/platforms/pseries/eeh.c
294 +++ b/arch/powerpc/platforms/pseries/eeh.c
295 @@ -1029,7 +1029,7 @@ static void eeh_add_device_early(struct device_node *dn)
296 {
297 struct pci_controller *phb;
298
299 - if (!dn || !of_node_to_eeh_dev(dn))
300 + if (!of_node_to_eeh_dev(dn))
301 return;
302 phb = of_node_to_eeh_dev(dn)->phb;
303
304 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
305 index 2707023..637970c 100644
306 --- a/arch/score/kernel/process.c
307 +++ b/arch/score/kernel/process.c
308 @@ -27,6 +27,7 @@
309 #include <linux/reboot.h>
310 #include <linux/elfcore.h>
311 #include <linux/pm.h>
312 +#include <linux/rcupdate.h>
313
314 void (*pm_power_off)(void);
315 EXPORT_SYMBOL(pm_power_off);
316 @@ -50,9 +51,10 @@ void __noreturn cpu_idle(void)
317 {
318 /* endless idle loop with no priority at all */
319 while (1) {
320 + rcu_idle_enter();
321 while (!need_resched())
322 barrier();
323 -
324 + rcu_idle_exit();
325 schedule_preempt_disabled();
326 }
327 }
328 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
329 index b1c611e..f1276aa 100644
330 --- a/arch/x86/Makefile
331 +++ b/arch/x86/Makefile
332 @@ -85,7 +85,7 @@ endif
333 ifdef CONFIG_X86_X32
334 x32_ld_ok := $(call try-run,\
335 /bin/echo -e '1: .quad 1b' | \
336 - $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" - && \
337 + $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" - && \
338 $(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMPO" && \
339 $(LD) -m elf32_x86_64 "$$TMPO" -o "$$TMP",y,n)
340 ifeq ($(x32_ld_ok),y)
341 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
342 index e398bb5..8a84501 100644
343 --- a/arch/x86/boot/compressed/Makefile
344 +++ b/arch/x86/boot/compressed/Makefile
345 @@ -28,6 +28,9 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
346 $(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \
347 $(obj)/piggy.o
348
349 +$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
350 +$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone
351 +
352 ifeq ($(CONFIG_EFI_STUB), y)
353 VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
354 endif
355 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
356 index 49afb3f..c3520d7 100644
357 --- a/arch/x86/include/asm/pgtable.h
358 +++ b/arch/x86/include/asm/pgtable.h
359 @@ -146,8 +146,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
360
361 static inline int pmd_large(pmd_t pte)
362 {
363 - return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
364 - (_PAGE_PSE | _PAGE_PRESENT);
365 + return pmd_flags(pte) & _PAGE_PSE;
366 }
367
368 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
369 @@ -415,7 +414,13 @@ static inline int pte_hidden(pte_t pte)
370
371 static inline int pmd_present(pmd_t pmd)
372 {
373 - return pmd_flags(pmd) & _PAGE_PRESENT;
374 + /*
375 + * Checking for _PAGE_PSE is needed too because
376 + * split_huge_page will temporarily clear the present bit (but
377 + * the _PAGE_PSE flag will remain set at all times while the
378 + * _PAGE_PRESENT bit is clear).
379 + */
380 + return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
381 }
382
383 static inline int pmd_none(pmd_t pmd)
384 diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
385 index 92660eda..f55a4ce 100644
386 --- a/arch/x86/platform/efi/efi.c
387 +++ b/arch/x86/platform/efi/efi.c
388 @@ -890,6 +890,7 @@ void __init efi_enter_virtual_mode(void)
389 *
390 * Call EFI services through wrapper functions.
391 */
392 + efi.runtime_version = efi_systab.fw_revision;
393 efi.get_time = virt_efi_get_time;
394 efi.set_time = virt_efi_set_time;
395 efi.get_wakeup_time = virt_efi_get_wakeup_time;
396 diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
397 index 6a2d6ed..7a41d9e 100644
398 --- a/arch/xtensa/kernel/process.c
399 +++ b/arch/xtensa/kernel/process.c
400 @@ -31,6 +31,7 @@
401 #include <linux/mqueue.h>
402 #include <linux/fs.h>
403 #include <linux/slab.h>
404 +#include <linux/rcupdate.h>
405
406 #include <asm/pgtable.h>
407 #include <asm/uaccess.h>
408 @@ -110,8 +111,10 @@ void cpu_idle(void)
409
410 /* endless idle loop with no priority at all */
411 while (1) {
412 + rcu_idle_enter();
413 while (!need_resched())
414 platform_idle();
415 + rcu_idle_exit();
416 schedule_preempt_disabled();
417 }
418 }
419 diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
420 index 3188da3..cf02e97 100644
421 --- a/drivers/acpi/bus.c
422 +++ b/drivers/acpi/bus.c
423 @@ -954,8 +954,6 @@ static int __init acpi_bus_init(void)
424 status = acpi_ec_ecdt_probe();
425 /* Ignore result. Not having an ECDT is not fatal. */
426
427 - acpi_bus_osc_support();
428 -
429 status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
430 if (ACPI_FAILURE(status)) {
431 printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
432 @@ -963,6 +961,12 @@ static int __init acpi_bus_init(void)
433 }
434
435 /*
436 + * _OSC method may exist in module level code,
437 + * so it must be run after ACPI_FULL_INITIALIZATION
438 + */
439 + acpi_bus_osc_support();
440 +
441 + /*
442 * _PDC control method may load dynamic SSDT tables,
443 * and we need to install the table handler before that.
444 */
445 diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
446 index f7eff25..ebc272f 100644
447 --- a/drivers/base/power/main.c
448 +++ b/drivers/base/power/main.c
449 @@ -984,7 +984,7 @@ int dpm_suspend_end(pm_message_t state)
450
451 error = dpm_suspend_noirq(state);
452 if (error) {
453 - dpm_resume_early(state);
454 + dpm_resume_early(resume_event(state));
455 return error;
456 }
457
458 diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
459 index de0435e..887f68f 100644
460 --- a/drivers/block/aoe/aoecmd.c
461 +++ b/drivers/block/aoe/aoecmd.c
462 @@ -35,6 +35,7 @@ new_skb(ulong len)
463 skb_reset_mac_header(skb);
464 skb_reset_network_header(skb);
465 skb->protocol = __constant_htons(ETH_P_AOE);
466 + skb_checksum_none_assert(skb);
467 }
468 return skb;
469 }
470 diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
471 index 2397f6f..6c87d67 100644
472 --- a/drivers/dma/dmaengine.c
473 +++ b/drivers/dma/dmaengine.c
474 @@ -578,7 +578,7 @@ void dmaengine_get(void)
475 list_del_rcu(&device->global_node);
476 break;
477 } else if (err)
478 - pr_err("%s: failed to get %s: (%d)\n",
479 + pr_debug("%s: failed to get %s: (%d)\n",
480 __func__, dma_chan_name(chan), err);
481 }
482 }
483 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
484 index 7e479a4..4fd363f 100644
485 --- a/drivers/gpu/drm/drm_crtc.c
486 +++ b/drivers/gpu/drm/drm_crtc.c
487 @@ -1028,15 +1028,15 @@ void drm_mode_config_cleanup(struct drm_device *dev)
488 fb->funcs->destroy(fb);
489 }
490
491 - list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
492 - crtc->funcs->destroy(crtc);
493 - }
494 -
495 list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
496 head) {
497 plane->funcs->destroy(plane);
498 }
499
500 + list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
501 + crtc->funcs->destroy(crtc);
502 + }
503 +
504 idr_remove_all(&dev->mode_config.crtc_idr);
505 idr_destroy(&dev->mode_config.crtc_idr);
506 }
507 diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
508 index 65060b7..645dcbf 100644
509 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
510 +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
511 @@ -147,6 +147,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
512 (rdev->pdev->subsystem_device == 0x01fd))
513 return true;
514
515 + /* Gateway RS690 only seems to work with MSIs. */
516 + if ((rdev->pdev->device == 0x791f) &&
517 + (rdev->pdev->subsystem_vendor == 0x107b) &&
518 + (rdev->pdev->subsystem_device == 0x0185))
519 + return true;
520 +
521 + /* try and enable MSIs by default on all RS690s */
522 + if (rdev->family == CHIP_RS690)
523 + return true;
524 +
525 /* RV515 seems to have MSI issues where it loses
526 * MSI rearms occasionally. This leads to lockups and freezes.
527 * disable it by default.
528 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
529 index caa55d6..b8459bd 100644
530 --- a/drivers/gpu/drm/radeon/radeon_pm.c
531 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
532 @@ -567,7 +567,9 @@ void radeon_pm_suspend(struct radeon_device *rdev)
533 void radeon_pm_resume(struct radeon_device *rdev)
534 {
535 /* set up the default clocks if the MC ucode is loaded */
536 - if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
537 + if ((rdev->family >= CHIP_BARTS) &&
538 + (rdev->family <= CHIP_CAYMAN) &&
539 + rdev->mc_fw) {
540 if (rdev->pm.default_vddc)
541 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
542 SET_VOLTAGE_TYPE_ASIC_VDDC);
543 @@ -622,7 +624,9 @@ int radeon_pm_init(struct radeon_device *rdev)
544 radeon_pm_print_states(rdev);
545 radeon_pm_init_profile(rdev);
546 /* set up the default clocks if the MC ucode is loaded */
547 - if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
548 + if ((rdev->family >= CHIP_BARTS) &&
549 + (rdev->family <= CHIP_CAYMAN) &&
550 + rdev->mc_fw) {
551 if (rdev->pm.default_vddc)
552 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
553 SET_VOLTAGE_TYPE_ASIC_VDDC);
554 diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
555 index cb1ee4e..2a25888 100644
556 --- a/drivers/gpu/drm/savage/savage_bci.c
557 +++ b/drivers/gpu/drm/savage/savage_bci.c
558 @@ -547,6 +547,8 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
559
560 dev_priv->chipset = (enum savage_family)chipset;
561
562 + pci_set_master(dev->pdev);
563 +
564 return 0;
565 }
566
567 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
568 index 5fda348..0d251d3 100644
569 --- a/drivers/iommu/intel-iommu.c
570 +++ b/drivers/iommu/intel-iommu.c
571 @@ -588,7 +588,9 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
572 {
573 int i;
574
575 - domain->iommu_coherency = 1;
576 + i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
577 +
578 + domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
579
580 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
581 if (!ecap_coherent(g_iommus[i]->ecap)) {
582 diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
583 index 0e49c99..c06992e 100644
584 --- a/drivers/media/rc/ite-cir.c
585 +++ b/drivers/media/rc/ite-cir.c
586 @@ -1473,6 +1473,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
587 rdev = rc_allocate_device();
588 if (!rdev)
589 goto failure;
590 + itdev->rdev = rdev;
591
592 ret = -ENODEV;
593
594 @@ -1604,7 +1605,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
595 if (ret)
596 goto failure;
597
598 - itdev->rdev = rdev;
599 ite_pr(KERN_NOTICE, "driver has been successfully loaded\n");
600
601 return 0;
602 diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
603 index 30662fc..63f571b 100644
604 --- a/drivers/media/video/gspca/pac7302.c
605 +++ b/drivers/media/video/gspca/pac7302.c
606 @@ -945,6 +945,7 @@ static const struct usb_device_id device_table[] = {
607 {USB_DEVICE(0x093a, 0x262a)},
608 {USB_DEVICE(0x093a, 0x262c)},
609 {USB_DEVICE(0x145f, 0x013c)},
610 + {USB_DEVICE(0x1ae7, 0x2001)}, /* SpeedLink Snappy Mic SL-6825-SBK */
611 {}
612 };
613 MODULE_DEVICE_TABLE(usb, device_table);
614 diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
615 index ca881ef..746a59c 100644
616 --- a/drivers/mfd/max8925-core.c
617 +++ b/drivers/mfd/max8925-core.c
618 @@ -18,12 +18,19 @@
619 #include <linux/mfd/core.h>
620 #include <linux/mfd/max8925.h>
621
622 +static struct resource io_parent = {
623 + .start = 0,
624 + .end = 0xffffffff,
625 + .flags = IORESOURCE_IO,
626 +};
627 +
628 static struct resource backlight_resources[] = {
629 {
630 .name = "max8925-backlight",
631 .start = MAX8925_WLED_MODE_CNTL,
632 .end = MAX8925_WLED_CNTL,
633 .flags = IORESOURCE_IO,
634 + .parent = &io_parent,
635 },
636 };
637
638 @@ -42,6 +49,7 @@ static struct resource touch_resources[] = {
639 .start = MAX8925_TSC_IRQ,
640 .end = MAX8925_ADC_RES_END,
641 .flags = IORESOURCE_IO,
642 + .parent = &io_parent,
643 },
644 };
645
646 @@ -60,6 +68,7 @@ static struct resource power_supply_resources[] = {
647 .start = MAX8925_CHG_IRQ1,
648 .end = MAX8925_CHG_IRQ1_MASK,
649 .flags = IORESOURCE_IO,
650 + .parent = &io_parent,
651 },
652 };
653
654 @@ -118,6 +127,7 @@ static struct mfd_cell onkey_devs[] = {
655 .start = MAX8925_##_start, \
656 .end = MAX8925_##_end, \
657 .flags = IORESOURCE_IO, \
658 + .parent = &io_parent, \
659 }
660
661 static struct resource regulator_resources[] = {
662 diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
663 index 71a0c4e..1025377 100644
664 --- a/drivers/mmc/host/omap_hsmmc.c
665 +++ b/drivers/mmc/host/omap_hsmmc.c
666 @@ -2097,8 +2097,7 @@ static int omap_hsmmc_suspend(struct device *dev)
667 if (ret) {
668 host->suspended = 0;
669 if (host->pdata->resume) {
670 - ret = host->pdata->resume(dev, host->slot_id);
671 - if (ret)
672 + if (host->pdata->resume(dev, host->slot_id))
673 dev_dbg(dev, "Unmask interrupt failed\n");
674 }
675 goto err;
676 diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
677 index 724b35e..3b8236b 100644
678 --- a/drivers/mmc/host/sh_mmcif.c
679 +++ b/drivers/mmc/host/sh_mmcif.c
680 @@ -1191,6 +1191,10 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
681 host->sd_error = true;
682 dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
683 }
684 + if (host->state == STATE_IDLE) {
685 + dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state);
686 + return IRQ_HANDLED;
687 + }
688 if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
689 if (!host->dma_active)
690 return IRQ_WAKE_THREAD;
691 diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
692 index e5bfd0e..0598d52 100644
693 --- a/drivers/mtd/maps/autcpu12-nvram.c
694 +++ b/drivers/mtd/maps/autcpu12-nvram.c
695 @@ -43,7 +43,8 @@ struct map_info autcpu12_sram_map = {
696
697 static int __init init_autcpu12_sram (void)
698 {
699 - int err, save0, save1;
700 + map_word tmp, save0, save1;
701 + int err;
702
703 autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K);
704 if (!autcpu12_sram_map.virt) {
705 @@ -51,7 +52,7 @@ static int __init init_autcpu12_sram (void)
706 err = -EIO;
707 goto out;
708 }
709 - simple_map_init(&autcpu_sram_map);
710 + simple_map_init(&autcpu12_sram_map);
711
712 /*
713 * Check for 32K/128K
714 @@ -61,20 +62,22 @@ static int __init init_autcpu12_sram (void)
715 * Read and check result on ofs 0x0
716 * Restore contents
717 */
718 - save0 = map_read32(&autcpu12_sram_map,0);
719 - save1 = map_read32(&autcpu12_sram_map,0x10000);
720 - map_write32(&autcpu12_sram_map,~save0,0x10000);
721 + save0 = map_read(&autcpu12_sram_map, 0);
722 + save1 = map_read(&autcpu12_sram_map, 0x10000);
723 + tmp.x[0] = ~save0.x[0];
724 + map_write(&autcpu12_sram_map, tmp, 0x10000);
725 /* if we find this pattern on 0x0, we have 32K size
726 * restore contents and exit
727 */
728 - if ( map_read32(&autcpu12_sram_map,0) != save0) {
729 - map_write32(&autcpu12_sram_map,save0,0x0);
730 + tmp = map_read(&autcpu12_sram_map, 0);
731 + if (!map_word_equal(&autcpu12_sram_map, tmp, save0)) {
732 + map_write(&autcpu12_sram_map, save0, 0x0);
733 goto map;
734 }
735 /* We have a 128K found, restore 0x10000 and set size
736 * to 128K
737 */
738 - map_write32(&autcpu12_sram_map,save1,0x10000);
739 + map_write(&autcpu12_sram_map, save1, 0x10000);
740 autcpu12_sram_map.size = SZ_128K;
741
742 map:
743 diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
744 index 9651c06..bf24aa7 100644
745 --- a/drivers/mtd/mtdpart.c
746 +++ b/drivers/mtd/mtdpart.c
747 @@ -709,6 +709,8 @@ static const char *default_mtd_part_types[] = {
748 * partition parsers, specified in @types. However, if @types is %NULL, then
749 * the default list of parsers is used. The default list contains only the
750 * "cmdlinepart" and "ofpart" parsers ATM.
751 + * Note: If there are more then one parser in @types, the kernel only takes the
752 + * partitions parsed out by the first parser.
753 *
754 * This function may return:
755 * o a negative error code in case of failure
756 @@ -733,11 +735,12 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
757 if (!parser)
758 continue;
759 ret = (*parser->parse_fn)(master, pparts, data);
760 + put_partition_parser(parser);
761 if (ret > 0) {
762 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
763 ret, parser->name, master->name);
764 + break;
765 }
766 - put_partition_parser(parser);
767 }
768 return ret;
769 }
770 diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
771 index 30d1319..c126469 100644
772 --- a/drivers/mtd/nand/nand_bbt.c
773 +++ b/drivers/mtd/nand/nand_bbt.c
774 @@ -390,7 +390,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
775 /* Read the mirror version, if available */
776 if (md && (md->options & NAND_BBT_VERSION)) {
777 scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
778 - mtd->writesize, td);
779 + mtd->writesize, md);
780 md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
781 pr_info("Bad block table at page %d, version 0x%02X\n",
782 md->pages[0], md->version[0]);
783 diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
784 index c606b6a..b9cbd65 100644
785 --- a/drivers/mtd/nand/nandsim.c
786 +++ b/drivers/mtd/nand/nandsim.c
787 @@ -2355,6 +2355,7 @@ static int __init ns_init_module(void)
788 uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
789 if (new_size >> overridesize != nsmtd->erasesize) {
790 NS_ERR("overridesize is too big\n");
791 + retval = -EINVAL;
792 goto err_exit;
793 }
794 /* N.B. This relies on nand_scan not doing anything with the size before we change it */
795 diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
796 index c2b0bba..62d039a 100644
797 --- a/drivers/mtd/nand/omap2.c
798 +++ b/drivers/mtd/nand/omap2.c
799 @@ -1133,7 +1133,8 @@ static int omap_nand_remove(struct platform_device *pdev)
800 /* Release NAND device, its internal structures and partitions */
801 nand_release(&info->mtd);
802 iounmap(info->nand.IO_ADDR_R);
803 - kfree(&info->mtd);
804 + release_mem_region(info->phys_base, NAND_IO_SIZE);
805 + kfree(info);
806 return 0;
807 }
808
809 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
810 index 41bb34f..acd8246 100644
811 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
812 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
813 @@ -571,14 +571,16 @@ drop:
814 static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
815 struct bnx2x_fastpath *fp)
816 {
817 - /* Do nothing if no IP/L4 csum validation was done */
818 -
819 + /* Do nothing if no L4 csum validation was done.
820 + * We do not check whether IP csum was validated. For IPv4 we assume
821 + * that if the card got as far as validating the L4 csum, it also
822 + * validated the IP csum. IPv6 has no IP csum.
823 + */
824 if (cqe->fast_path_cqe.status_flags &
825 - (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
826 - ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
827 + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
828 return;
829
830 - /* If both IP/L4 validation were done, check if an error was found. */
831 + /* If L4 validation was done, check if an error was found. */
832
833 if (cqe->fast_path_cqe.type_error_flags &
834 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
835 diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
836 index 65a718f..22b399a 100644
837 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
838 +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
839 @@ -1370,6 +1370,10 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
840 struct pci_dev *root = pdev->bus->self;
841 u32 aer_pos;
842
843 + /* root bus? */
844 + if (!root)
845 + return;
846 +
847 if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
848 adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
849 return;
850 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
851 index 5fb74c4..482dcd3 100644
852 --- a/drivers/net/ethernet/realtek/r8169.c
853 +++ b/drivers/net/ethernet/realtek/r8169.c
854 @@ -319,6 +319,8 @@ enum rtl_registers {
855 Config0 = 0x51,
856 Config1 = 0x52,
857 Config2 = 0x53,
858 +#define PME_SIGNAL (1 << 5) /* 8168c and later */
859 +
860 Config3 = 0x54,
861 Config4 = 0x55,
862 Config5 = 0x56,
863 @@ -1400,7 +1402,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
864 u16 reg;
865 u8 mask;
866 } cfg[] = {
867 - { WAKE_ANY, Config1, PMEnable },
868 { WAKE_PHY, Config3, LinkUp },
869 { WAKE_MAGIC, Config3, MagicPacket },
870 { WAKE_UCAST, Config5, UWF },
871 @@ -1408,16 +1409,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
872 { WAKE_MCAST, Config5, MWF },
873 { WAKE_ANY, Config5, LanWake }
874 };
875 + u8 options;
876
877 RTL_W8(Cfg9346, Cfg9346_Unlock);
878
879 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
880 - u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
881 + options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
882 if (wolopts & cfg[i].opt)
883 options |= cfg[i].mask;
884 RTL_W8(cfg[i].reg, options);
885 }
886
887 + switch (tp->mac_version) {
888 + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
889 + options = RTL_R8(Config1) & ~PMEnable;
890 + if (wolopts)
891 + options |= PMEnable;
892 + RTL_W8(Config1, options);
893 + break;
894 + default:
895 + options = RTL_R8(Config2) & ~PME_SIGNAL;
896 + if (wolopts)
897 + options |= PME_SIGNAL;
898 + RTL_W8(Config2, options);
899 + break;
900 + }
901 +
902 RTL_W8(Cfg9346, Cfg9346_Lock);
903 }
904
905 diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
906 index 3455876..06f2b49 100644
907 --- a/drivers/net/ethernet/ti/davinci_cpdma.c
908 +++ b/drivers/net/ethernet/ti/davinci_cpdma.c
909 @@ -851,6 +851,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
910
911 next_dma = desc_read(desc, hw_next);
912 chan->head = desc_from_phys(pool, next_dma);
913 + chan->count--;
914 chan->stats.teardown_dequeue++;
915
916 /* issue callback without locks held */
917 diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
918 index 2fa1a9b..2e0d876 100644
919 --- a/drivers/net/ppp/pppoe.c
920 +++ b/drivers/net/ppp/pppoe.c
921 @@ -576,7 +576,7 @@ static int pppoe_release(struct socket *sock)
922
923 po = pppox_sk(sk);
924
925 - if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
926 + if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
927 dev_put(po->pppoe_dev);
928 po->pppoe_dev = NULL;
929 }
930 diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
931 index 91d2588..1470d3e 100644
932 --- a/drivers/net/rionet.c
933 +++ b/drivers/net/rionet.c
934 @@ -79,6 +79,7 @@ static int rionet_capable = 1;
935 * on system trade-offs.
936 */
937 static struct rio_dev **rionet_active;
938 +static int nact; /* total number of active rionet peers */
939
940 #define is_rionet_capable(src_ops, dst_ops) \
941 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
942 @@ -175,6 +176,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
943 struct ethhdr *eth = (struct ethhdr *)skb->data;
944 u16 destid;
945 unsigned long flags;
946 + int add_num = 1;
947
948 local_irq_save(flags);
949 if (!spin_trylock(&rnet->tx_lock)) {
950 @@ -182,7 +184,10 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
951 return NETDEV_TX_LOCKED;
952 }
953
954 - if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
955 + if (is_multicast_ether_addr(eth->h_dest))
956 + add_num = nact;
957 +
958 + if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
959 netif_stop_queue(ndev);
960 spin_unlock_irqrestore(&rnet->tx_lock, flags);
961 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
962 @@ -191,11 +196,16 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
963 }
964
965 if (is_multicast_ether_addr(eth->h_dest)) {
966 + int count = 0;
967 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
968 i++)
969 - if (rionet_active[i])
970 + if (rionet_active[i]) {
971 rionet_queue_tx_msg(skb, ndev,
972 rionet_active[i]);
973 + if (count)
974 + atomic_inc(&skb->users);
975 + count++;
976 + }
977 } else if (RIONET_MAC_MATCH(eth->h_dest)) {
978 destid = RIONET_GET_DESTID(eth->h_dest);
979 if (rionet_active[destid])
980 @@ -220,14 +230,17 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
981 if (info == RIONET_DOORBELL_JOIN) {
982 if (!rionet_active[sid]) {
983 list_for_each_entry(peer, &rionet_peers, node) {
984 - if (peer->rdev->destid == sid)
985 + if (peer->rdev->destid == sid) {
986 rionet_active[sid] = peer->rdev;
987 + nact++;
988 + }
989 }
990 rio_mport_send_doorbell(mport, sid,
991 RIONET_DOORBELL_JOIN);
992 }
993 } else if (info == RIONET_DOORBELL_LEAVE) {
994 rionet_active[sid] = NULL;
995 + nact--;
996 } else {
997 if (netif_msg_intr(rnet))
998 printk(KERN_WARNING "%s: unhandled doorbell\n",
999 @@ -523,6 +536,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
1000
1001 rc = rionet_setup_netdev(rdev->net->hport, ndev);
1002 rionet_check = 1;
1003 + nact = 0;
1004 }
1005
1006 /*
1007 diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
1008 index cc9776c..8789bc5 100644
1009 --- a/drivers/net/usb/sierra_net.c
1010 +++ b/drivers/net/usb/sierra_net.c
1011 @@ -678,7 +678,7 @@ static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
1012 return -EIO;
1013 }
1014
1015 - *datap = *attrdata;
1016 + *datap = le16_to_cpu(*attrdata);
1017
1018 kfree(attrdata);
1019 return result;
1020 diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
1021 index aaaca9a..3f575af 100644
1022 --- a/drivers/net/wan/ixp4xx_hss.c
1023 +++ b/drivers/net/wan/ixp4xx_hss.c
1024 @@ -10,6 +10,7 @@
1025
1026 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1027
1028 +#include <linux/module.h>
1029 #include <linux/bitops.h>
1030 #include <linux/cdev.h>
1031 #include <linux/dma-mapping.h>
1032 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
1033 index 806c44f..09bf377 100644
1034 --- a/drivers/pci/hotplug/acpiphp_glue.c
1035 +++ b/drivers/pci/hotplug/acpiphp_glue.c
1036 @@ -132,6 +132,15 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
1037 if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
1038 return AE_OK;
1039
1040 + status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
1041 + if (ACPI_FAILURE(status)) {
1042 + warn("can't evaluate _ADR (%#x)\n", status);
1043 + return AE_OK;
1044 + }
1045 +
1046 + device = (adr >> 16) & 0xffff;
1047 + function = adr & 0xffff;
1048 +
1049 pdev = pbus->self;
1050 if (pdev && pci_is_pcie(pdev)) {
1051 tmp = acpi_find_root_bridge_handle(pdev);
1052 @@ -144,10 +153,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
1053 }
1054 }
1055
1056 - acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
1057 - device = (adr >> 16) & 0xffff;
1058 - function = adr & 0xffff;
1059 -
1060 newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL);
1061 if (!newfunc)
1062 return AE_NO_MEMORY;
1063 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
1064 index 5e1ca3c..63e0199 100644
1065 --- a/drivers/pci/probe.c
1066 +++ b/drivers/pci/probe.c
1067 @@ -749,8 +749,10 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1068
1069 /* Check if setup is sensible at all */
1070 if (!pass &&
1071 - (primary != bus->number || secondary <= bus->number)) {
1072 - dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
1073 + (primary != bus->number || secondary <= bus->number ||
1074 + secondary > subordinate)) {
1075 + dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1076 + secondary, subordinate);
1077 broken = 1;
1078 }
1079
1080 diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
1081 index 0860181..4f1b10b 100644
1082 --- a/drivers/s390/scsi/zfcp_aux.c
1083 +++ b/drivers/s390/scsi/zfcp_aux.c
1084 @@ -519,6 +519,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
1085
1086 rwlock_init(&port->unit_list_lock);
1087 INIT_LIST_HEAD(&port->unit_list);
1088 + atomic_set(&port->units, 0);
1089
1090 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
1091 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
1092 diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
1093 index 96f13ad8..79a6afe 100644
1094 --- a/drivers/s390/scsi/zfcp_ccw.c
1095 +++ b/drivers/s390/scsi/zfcp_ccw.c
1096 @@ -39,17 +39,23 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
1097 spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
1098 }
1099
1100 -static int zfcp_ccw_activate(struct ccw_device *cdev)
1101 -
1102 +/**
1103 + * zfcp_ccw_activate - activate adapter and wait for it to finish
1104 + * @cdev: pointer to belonging ccw device
1105 + * @clear: Status flags to clear.
1106 + * @tag: s390dbf trace record tag
1107 + */
1108 +static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
1109 {
1110 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
1111
1112 if (!adapter)
1113 return 0;
1114
1115 + zfcp_erp_clear_adapter_status(adapter, clear);
1116 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
1117 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
1118 - "ccresu2");
1119 + tag);
1120 zfcp_erp_wait(adapter);
1121 flush_work(&adapter->scan_work);
1122
1123 @@ -164,26 +170,29 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
1124 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
1125 adapter->req_no = 0;
1126
1127 - zfcp_ccw_activate(cdev);
1128 + zfcp_ccw_activate(cdev, 0, "ccsonl1");
1129 zfcp_ccw_adapter_put(adapter);
1130 return 0;
1131 }
1132
1133 /**
1134 - * zfcp_ccw_set_offline - set_offline function of zfcp driver
1135 + * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
1136 * @cdev: pointer to belonging ccw device
1137 + * @set: Status flags to set.
1138 + * @tag: s390dbf trace record tag
1139 *
1140 * This function gets called by the common i/o layer and sets an adapter
1141 * into state offline.
1142 */
1143 -static int zfcp_ccw_set_offline(struct ccw_device *cdev)
1144 +static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
1145 {
1146 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
1147
1148 if (!adapter)
1149 return 0;
1150
1151 - zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
1152 + zfcp_erp_set_adapter_status(adapter, set);
1153 + zfcp_erp_adapter_shutdown(adapter, 0, tag);
1154 zfcp_erp_wait(adapter);
1155
1156 zfcp_ccw_adapter_put(adapter);
1157 @@ -191,6 +200,18 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev)
1158 }
1159
1160 /**
1161 + * zfcp_ccw_set_offline - set_offline function of zfcp driver
1162 + * @cdev: pointer to belonging ccw device
1163 + *
1164 + * This function gets called by the common i/o layer and sets an adapter
1165 + * into state offline.
1166 + */
1167 +static int zfcp_ccw_set_offline(struct ccw_device *cdev)
1168 +{
1169 + return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
1170 +}
1171 +
1172 +/**
1173 * zfcp_ccw_notify - ccw notify function
1174 * @cdev: pointer to belonging ccw device
1175 * @event: indicates if adapter was detached or attached
1176 @@ -207,6 +228,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
1177
1178 switch (event) {
1179 case CIO_GONE:
1180 + if (atomic_read(&adapter->status) &
1181 + ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
1182 + zfcp_dbf_hba_basic("ccnigo1", adapter);
1183 + break;
1184 + }
1185 dev_warn(&cdev->dev, "The FCP device has been detached\n");
1186 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
1187 break;
1188 @@ -216,6 +242,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
1189 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
1190 break;
1191 case CIO_OPER:
1192 + if (atomic_read(&adapter->status) &
1193 + ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
1194 + zfcp_dbf_hba_basic("ccniop1", adapter);
1195 + break;
1196 + }
1197 dev_info(&cdev->dev, "The FCP device is operational again\n");
1198 zfcp_erp_set_adapter_status(adapter,
1199 ZFCP_STATUS_COMMON_RUNNING);
1200 @@ -251,6 +282,28 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
1201 zfcp_ccw_adapter_put(adapter);
1202 }
1203
1204 +static int zfcp_ccw_suspend(struct ccw_device *cdev)
1205 +{
1206 + zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
1207 + return 0;
1208 +}
1209 +
1210 +static int zfcp_ccw_thaw(struct ccw_device *cdev)
1211 +{
1212 + /* trace records for thaw and final shutdown during suspend
1213 + can only be found in system dump until the end of suspend
1214 + but not after resume because it's based on the memory image
1215 + right after the very first suspend (freeze) callback */
1216 + zfcp_ccw_activate(cdev, 0, "ccthaw1");
1217 + return 0;
1218 +}
1219 +
1220 +static int zfcp_ccw_resume(struct ccw_device *cdev)
1221 +{
1222 + zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
1223 + return 0;
1224 +}
1225 +
1226 struct ccw_driver zfcp_ccw_driver = {
1227 .driver = {
1228 .owner = THIS_MODULE,
1229 @@ -263,7 +316,7 @@ struct ccw_driver zfcp_ccw_driver = {
1230 .set_offline = zfcp_ccw_set_offline,
1231 .notify = zfcp_ccw_notify,
1232 .shutdown = zfcp_ccw_shutdown,
1233 - .freeze = zfcp_ccw_set_offline,
1234 - .thaw = zfcp_ccw_activate,
1235 - .restore = zfcp_ccw_activate,
1236 + .freeze = zfcp_ccw_suspend,
1237 + .thaw = zfcp_ccw_thaw,
1238 + .restore = zfcp_ccw_resume,
1239 };
1240 diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
1241 index fab2c25..8ed63aa 100644
1242 --- a/drivers/s390/scsi/zfcp_cfdc.c
1243 +++ b/drivers/s390/scsi/zfcp_cfdc.c
1244 @@ -293,7 +293,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
1245 }
1246 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1247
1248 - shost_for_each_device(sdev, port->adapter->scsi_host) {
1249 + shost_for_each_device(sdev, adapter->scsi_host) {
1250 zfcp_sdev = sdev_to_zfcp(sdev);
1251 status = atomic_read(&zfcp_sdev->status);
1252 if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
1253 diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
1254 index a9a816e..79b9848 100644
1255 --- a/drivers/s390/scsi/zfcp_dbf.c
1256 +++ b/drivers/s390/scsi/zfcp_dbf.c
1257 @@ -191,7 +191,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
1258 length = min((u16)sizeof(struct qdio_buffer),
1259 (u16)ZFCP_DBF_PAY_MAX_REC);
1260
1261 - while ((char *)pl[payload->counter] && payload->counter < scount) {
1262 + while (payload->counter < scount && (char *)pl[payload->counter]) {
1263 memcpy(payload->data, (char *)pl[payload->counter], length);
1264 debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
1265 payload->counter++;
1266 @@ -200,6 +200,26 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
1267 spin_unlock_irqrestore(&dbf->pay_lock, flags);
1268 }
1269
1270 +/**
1271 + * zfcp_dbf_hba_basic - trace event for basic adapter events
1272 + * @adapter: pointer to struct zfcp_adapter
1273 + */
1274 +void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
1275 +{
1276 + struct zfcp_dbf *dbf = adapter->dbf;
1277 + struct zfcp_dbf_hba *rec = &dbf->hba_buf;
1278 + unsigned long flags;
1279 +
1280 + spin_lock_irqsave(&dbf->hba_lock, flags);
1281 + memset(rec, 0, sizeof(*rec));
1282 +
1283 + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
1284 + rec->id = ZFCP_DBF_HBA_BASIC;
1285 +
1286 + debug_event(dbf->hba, 1, rec, sizeof(*rec));
1287 + spin_unlock_irqrestore(&dbf->hba_lock, flags);
1288 +}
1289 +
1290 static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
1291 struct zfcp_adapter *adapter,
1292 struct zfcp_port *port,
1293 diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
1294 index 714f087..3ac7a4b 100644
1295 --- a/drivers/s390/scsi/zfcp_dbf.h
1296 +++ b/drivers/s390/scsi/zfcp_dbf.h
1297 @@ -154,6 +154,7 @@ enum zfcp_dbf_hba_id {
1298 ZFCP_DBF_HBA_RES = 1,
1299 ZFCP_DBF_HBA_USS = 2,
1300 ZFCP_DBF_HBA_BIT = 3,
1301 + ZFCP_DBF_HBA_BASIC = 4,
1302 };
1303
1304 /**
1305 diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
1306 index ed5d921..f172b84 100644
1307 --- a/drivers/s390/scsi/zfcp_def.h
1308 +++ b/drivers/s390/scsi/zfcp_def.h
1309 @@ -77,6 +77,7 @@ struct zfcp_reqlist;
1310 #define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
1311 #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
1312 #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
1313 +#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040
1314 #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
1315 #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
1316 #define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
1317 @@ -204,6 +205,7 @@ struct zfcp_port {
1318 struct zfcp_adapter *adapter; /* adapter used to access port */
1319 struct list_head unit_list; /* head of logical unit list */
1320 rwlock_t unit_list_lock; /* unit list lock */
1321 + atomic_t units; /* zfcp_unit count */
1322 atomic_t status; /* status of this remote port */
1323 u64 wwnn; /* WWNN if known */
1324 u64 wwpn; /* WWPN */
1325 diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
1326 index 2302e1c..ef9e502 100644
1327 --- a/drivers/s390/scsi/zfcp_ext.h
1328 +++ b/drivers/s390/scsi/zfcp_ext.h
1329 @@ -54,6 +54,7 @@ extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
1330 extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
1331 extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
1332 extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
1333 +extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
1334 extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
1335 extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
1336 extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
1337 @@ -158,6 +159,7 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
1338 extern struct attribute_group zfcp_sysfs_unit_attrs;
1339 extern struct attribute_group zfcp_sysfs_adapter_attrs;
1340 extern struct attribute_group zfcp_sysfs_port_attrs;
1341 +extern struct mutex zfcp_sysfs_port_units_mutex;
1342 extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
1343 extern struct device_attribute *zfcp_sysfs_shost_attrs[];
1344
1345 diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
1346 index e9a787e..2136fc2 100644
1347 --- a/drivers/s390/scsi/zfcp_fsf.c
1348 +++ b/drivers/s390/scsi/zfcp_fsf.c
1349 @@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
1350 return;
1351 }
1352
1353 - zfcp_dbf_hba_fsf_uss("fssrh_2", req);
1354 + zfcp_dbf_hba_fsf_uss("fssrh_4", req);
1355
1356 switch (sr_buf->status_type) {
1357 case FSF_STATUS_READ_PORT_CLOSED:
1358 @@ -437,6 +437,34 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
1359 }
1360 }
1361
1362 +#define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
1363 +#define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
1364 +#define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
1365 +#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
1366 +#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
1367 +#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
1368 +#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
1369 +
1370 +static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
1371 +{
1372 + u32 fdmi_speed = 0;
1373 + if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
1374 + fdmi_speed |= FC_PORTSPEED_1GBIT;
1375 + if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
1376 + fdmi_speed |= FC_PORTSPEED_2GBIT;
1377 + if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
1378 + fdmi_speed |= FC_PORTSPEED_4GBIT;
1379 + if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
1380 + fdmi_speed |= FC_PORTSPEED_10GBIT;
1381 + if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
1382 + fdmi_speed |= FC_PORTSPEED_8GBIT;
1383 + if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
1384 + fdmi_speed |= FC_PORTSPEED_16GBIT;
1385 + if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
1386 + fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
1387 + return fdmi_speed;
1388 +}
1389 +
1390 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
1391 {
1392 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
1393 @@ -456,7 +484,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
1394 fc_host_port_name(shost) = nsp->fl_wwpn;
1395 fc_host_node_name(shost) = nsp->fl_wwnn;
1396 fc_host_port_id(shost) = ntoh24(bottom->s_id);
1397 - fc_host_speed(shost) = bottom->fc_link_speed;
1398 + fc_host_speed(shost) =
1399 + zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
1400 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
1401
1402 adapter->hydra_version = bottom->adapter_type;
1403 @@ -580,7 +609,8 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
1404 } else
1405 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
1406 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
1407 - fc_host_supported_speeds(shost) = bottom->supported_speed;
1408 + fc_host_supported_speeds(shost) =
1409 + zfcp_fsf_convert_portspeed(bottom->supported_speed);
1410 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
1411 FC_FC4_LIST_SIZE);
1412 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
1413 @@ -771,12 +801,14 @@ out:
1414 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
1415 {
1416 struct scsi_device *sdev = req->data;
1417 - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1418 + struct zfcp_scsi_dev *zfcp_sdev;
1419 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
1420
1421 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1422 return;
1423
1424 + zfcp_sdev = sdev_to_zfcp(sdev);
1425 +
1426 switch (req->qtcb->header.fsf_status) {
1427 case FSF_PORT_HANDLE_NOT_VALID:
1428 if (fsq->word[0] == fsq->word[1]) {
1429 @@ -885,7 +917,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1430
1431 switch (header->fsf_status) {
1432 case FSF_GOOD:
1433 - zfcp_dbf_san_res("fsscth1", req);
1434 + zfcp_dbf_san_res("fsscth2", req);
1435 ct->status = 0;
1436 break;
1437 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1438 @@ -1739,13 +1771,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1439 {
1440 struct zfcp_adapter *adapter = req->adapter;
1441 struct scsi_device *sdev = req->data;
1442 - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1443 + struct zfcp_scsi_dev *zfcp_sdev;
1444 struct fsf_qtcb_header *header = &req->qtcb->header;
1445 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1446
1447 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1448 return;
1449
1450 + zfcp_sdev = sdev_to_zfcp(sdev);
1451 +
1452 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1453 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1454 ZFCP_STATUS_LUN_SHARED |
1455 @@ -1856,11 +1890,13 @@ out:
1456 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1457 {
1458 struct scsi_device *sdev = req->data;
1459 - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1460 + struct zfcp_scsi_dev *zfcp_sdev;
1461
1462 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1463 return;
1464
1465 + zfcp_sdev = sdev_to_zfcp(sdev);
1466 +
1467 switch (req->qtcb->header.fsf_status) {
1468 case FSF_PORT_HANDLE_NOT_VALID:
1469 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1470 @@ -1950,7 +1986,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1471 {
1472 struct fsf_qual_latency_info *lat_in;
1473 struct latency_cont *lat = NULL;
1474 - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
1475 + struct zfcp_scsi_dev *zfcp_sdev;
1476 struct zfcp_blk_drv_data blktrc;
1477 int ticks = req->adapter->timer_ticks;
1478
1479 @@ -1965,6 +2001,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1480
1481 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
1482 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
1483 + zfcp_sdev = sdev_to_zfcp(scsi->device);
1484 blktrc.flags |= ZFCP_BLK_LAT_VALID;
1485 blktrc.channel_lat = lat_in->channel_lat * ticks;
1486 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
1487 @@ -2002,12 +2039,14 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
1488 {
1489 struct scsi_cmnd *scmnd = req->data;
1490 struct scsi_device *sdev = scmnd->device;
1491 - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1492 + struct zfcp_scsi_dev *zfcp_sdev;
1493 struct fsf_qtcb_header *header = &req->qtcb->header;
1494
1495 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
1496 return;
1497
1498 + zfcp_sdev = sdev_to_zfcp(sdev);
1499 +
1500 switch (header->fsf_status) {
1501 case FSF_HANDLE_MISMATCH:
1502 case FSF_PORT_HANDLE_NOT_VALID:
1503 diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
1504 index e14da57..e76d003 100644
1505 --- a/drivers/s390/scsi/zfcp_qdio.c
1506 +++ b/drivers/s390/scsi/zfcp_qdio.c
1507 @@ -102,18 +102,22 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
1508 {
1509 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
1510 struct zfcp_adapter *adapter = qdio->adapter;
1511 - struct qdio_buffer_element *sbale;
1512 int sbal_no, sbal_idx;
1513 - void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
1514 - u64 req_id;
1515 - u8 scount;
1516
1517 if (unlikely(qdio_err)) {
1518 - memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
1519 if (zfcp_adapter_multi_buffer_active(adapter)) {
1520 + void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
1521 + struct qdio_buffer_element *sbale;
1522 + u64 req_id;
1523 + u8 scount;
1524 +
1525 + memset(pl, 0,
1526 + ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
1527 sbale = qdio->res_q[idx]->element;
1528 req_id = (u64) sbale->addr;
1529 - scount = sbale->scount + 1; /* incl. signaling SBAL */
1530 + scount = min(sbale->scount + 1,
1531 + ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
1532 + /* incl. signaling SBAL */
1533
1534 for (sbal_no = 0; sbal_no < scount; sbal_no++) {
1535 sbal_idx = (idx + sbal_no) %
1536 diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
1537 index cdc4ff7..9e62210 100644
1538 --- a/drivers/s390/scsi/zfcp_sysfs.c
1539 +++ b/drivers/s390/scsi/zfcp_sysfs.c
1540 @@ -227,6 +227,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
1541 static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
1542 zfcp_sysfs_port_rescan_store);
1543
1544 +DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
1545 +
1546 static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
1547 struct device_attribute *attr,
1548 const char *buf, size_t count)
1549 @@ -249,6 +251,16 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
1550 else
1551 retval = 0;
1552
1553 + mutex_lock(&zfcp_sysfs_port_units_mutex);
1554 + if (atomic_read(&port->units) > 0) {
1555 + retval = -EBUSY;
1556 + mutex_unlock(&zfcp_sysfs_port_units_mutex);
1557 + goto out;
1558 + }
1559 + /* port is about to be removed, so no more unit_add */
1560 + atomic_set(&port->units, -1);
1561 + mutex_unlock(&zfcp_sysfs_port_units_mutex);
1562 +
1563 write_lock_irq(&adapter->port_list_lock);
1564 list_del(&port->list);
1565 write_unlock_irq(&adapter->port_list_lock);
1566 @@ -289,12 +301,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
1567 {
1568 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
1569 u64 fcp_lun;
1570 + int retval;
1571
1572 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
1573 return -EINVAL;
1574
1575 - if (zfcp_unit_add(port, fcp_lun))
1576 - return -EINVAL;
1577 + retval = zfcp_unit_add(port, fcp_lun);
1578 + if (retval)
1579 + return retval;
1580
1581 return count;
1582 }
1583 diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
1584 index 20796eb..4e6a535 100644
1585 --- a/drivers/s390/scsi/zfcp_unit.c
1586 +++ b/drivers/s390/scsi/zfcp_unit.c
1587 @@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev)
1588 {
1589 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
1590
1591 - put_device(&unit->port->dev);
1592 + atomic_dec(&unit->port->units);
1593 kfree(unit);
1594 }
1595
1596 @@ -119,16 +119,27 @@ static void zfcp_unit_release(struct device *dev)
1597 int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
1598 {
1599 struct zfcp_unit *unit;
1600 + int retval = 0;
1601 +
1602 + mutex_lock(&zfcp_sysfs_port_units_mutex);
1603 + if (atomic_read(&port->units) == -1) {
1604 + /* port is already gone */
1605 + retval = -ENODEV;
1606 + goto out;
1607 + }
1608
1609 unit = zfcp_unit_find(port, fcp_lun);
1610 if (unit) {
1611 put_device(&unit->dev);
1612 - return -EEXIST;
1613 + retval = -EEXIST;
1614 + goto out;
1615 }
1616
1617 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
1618 - if (!unit)
1619 - return -ENOMEM;
1620 + if (!unit) {
1621 + retval = -ENOMEM;
1622 + goto out;
1623 + }
1624
1625 unit->port = port;
1626 unit->fcp_lun = fcp_lun;
1627 @@ -139,28 +150,33 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
1628 if (dev_set_name(&unit->dev, "0x%016llx",
1629 (unsigned long long) fcp_lun)) {
1630 kfree(unit);
1631 - return -ENOMEM;
1632 + retval = -ENOMEM;
1633 + goto out;
1634 }
1635
1636 - get_device(&port->dev);
1637 -
1638 if (device_register(&unit->dev)) {
1639 put_device(&unit->dev);
1640 - return -ENOMEM;
1641 + retval = -ENOMEM;
1642 + goto out;
1643 }
1644
1645 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
1646 device_unregister(&unit->dev);
1647 - return -EINVAL;
1648 + retval = -EINVAL;
1649 + goto out;
1650 }
1651
1652 + atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
1653 +
1654 write_lock_irq(&port->unit_list_lock);
1655 list_add_tail(&unit->list, &port->unit_list);
1656 write_unlock_irq(&port->unit_list_lock);
1657
1658 zfcp_unit_scsi_scan(unit);
1659
1660 - return 0;
1661 +out:
1662 + mutex_unlock(&zfcp_sysfs_port_units_mutex);
1663 + return retval;
1664 }
1665
1666 /**
1667 diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
1668 index 68ce085..a540162 100644
1669 --- a/drivers/scsi/atp870u.c
1670 +++ b/drivers/scsi/atp870u.c
1671 @@ -1173,7 +1173,16 @@ wait_io1:
1672 outw(val, tmport);
1673 outb(2, 0x80);
1674 TCM_SYNC:
1675 - udelay(0x800);
1676 + /*
1677 + * The funny division into multiple delays is to accomodate
1678 + * arches like ARM where udelay() multiplies its argument by
1679 + * a large number to initialize a loop counter. To avoid
1680 + * overflow, the maximum supported udelay is 2000 microseconds.
1681 + *
1682 + * XXX it would be more polite to find a way to use msleep()
1683 + */
1684 + mdelay(2);
1685 + udelay(48);
1686 if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */
1687 outw(0, tmport--);
1688 outb(0, tmport);
1689 diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
1690 index 33ef60d..6a8568c 100644
1691 --- a/fs/cifs/cifs_unicode.c
1692 +++ b/fs/cifs/cifs_unicode.c
1693 @@ -203,6 +203,27 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len,
1694 int i;
1695 wchar_t wchar_to; /* needed to quiet sparse */
1696
1697 + /* special case for utf8 to handle no plane0 chars */
1698 + if (!strcmp(codepage->charset, "utf8")) {
1699 + /*
1700 + * convert utf8 -> utf16, we assume we have enough space
1701 + * as caller should have assumed conversion does not overflow
1702 + * in destination len is length in wchar_t units (16bits)
1703 + */
1704 + i = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN,
1705 + (wchar_t *) to, len);
1706 +
1707 + /* if success terminate and exit */
1708 + if (i >= 0)
1709 + goto success;
1710 + /*
1711 + * if fails fall back to UCS encoding as this
1712 + * function should not return negative values
1713 + * currently can fail only if source contains
1714 + * invalid encoded characters
1715 + */
1716 + }
1717 +
1718 for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
1719 charlen = codepage->char2uni(from, len, &wchar_to);
1720 if (charlen < 1) {
1721 @@ -215,6 +236,7 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len,
1722 put_unaligned_le16(wchar_to, &to[i]);
1723 }
1724
1725 +success:
1726 put_unaligned_le16(0, &to[i]);
1727 return i;
1728 }
1729 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1730 index 65a78e9..f771e9f 100644
1731 --- a/fs/cifs/connect.c
1732 +++ b/fs/cifs/connect.c
1733 @@ -70,6 +70,7 @@ enum {
1734 /* Mount options that take no arguments */
1735 Opt_user_xattr, Opt_nouser_xattr,
1736 Opt_forceuid, Opt_noforceuid,
1737 + Opt_forcegid, Opt_noforcegid,
1738 Opt_noblocksend, Opt_noautotune,
1739 Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
1740 Opt_mapchars, Opt_nomapchars, Opt_sfu,
1741 @@ -121,6 +122,8 @@ static const match_table_t cifs_mount_option_tokens = {
1742 { Opt_nouser_xattr, "nouser_xattr" },
1743 { Opt_forceuid, "forceuid" },
1744 { Opt_noforceuid, "noforceuid" },
1745 + { Opt_forcegid, "forcegid" },
1746 + { Opt_noforcegid, "noforcegid" },
1747 { Opt_noblocksend, "noblocksend" },
1748 { Opt_noautotune, "noautotune" },
1749 { Opt_hard, "hard" },
1750 @@ -1287,6 +1290,12 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1751 case Opt_noforceuid:
1752 override_uid = 0;
1753 break;
1754 + case Opt_forcegid:
1755 + override_gid = 1;
1756 + break;
1757 + case Opt_noforcegid:
1758 + override_gid = 0;
1759 + break;
1760 case Opt_noblocksend:
1761 vol->noblocksnd = 1;
1762 break;
1763 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1764 index 55a654d..dcd08e4 100644
1765 --- a/fs/ext4/inode.c
1766 +++ b/fs/ext4/inode.c
1767 @@ -2386,6 +2386,16 @@ static int ext4_nonda_switch(struct super_block *sb)
1768 free_blocks = EXT4_C2B(sbi,
1769 percpu_counter_read_positive(&sbi->s_freeclusters_counter));
1770 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
1771 + /*
1772 + * Start pushing delalloc when 1/2 of free blocks are dirty.
1773 + */
1774 + if (dirty_blocks && (free_blocks < 2 * dirty_blocks) &&
1775 + !writeback_in_progress(sb->s_bdi) &&
1776 + down_read_trylock(&sb->s_umount)) {
1777 + writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
1778 + up_read(&sb->s_umount);
1779 + }
1780 +
1781 if (2 * free_blocks < 3 * dirty_blocks ||
1782 free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
1783 /*
1784 @@ -2394,13 +2404,6 @@ static int ext4_nonda_switch(struct super_block *sb)
1785 */
1786 return 1;
1787 }
1788 - /*
1789 - * Even if we don't switch but are nearing capacity,
1790 - * start pushing delalloc when 1/2 of free blocks are dirty.
1791 - */
1792 - if (free_blocks < 2 * dirty_blocks)
1793 - writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
1794 -
1795 return 0;
1796 }
1797
1798 @@ -3889,6 +3892,7 @@ static int ext4_do_update_inode(handle_t *handle,
1799 struct ext4_inode_info *ei = EXT4_I(inode);
1800 struct buffer_head *bh = iloc->bh;
1801 int err = 0, rc, block;
1802 + int need_datasync = 0;
1803
1804 /* For fields not not tracking in the in-memory inode,
1805 * initialise them to zero for new inodes. */
1806 @@ -3937,7 +3941,10 @@ static int ext4_do_update_inode(handle_t *handle,
1807 raw_inode->i_file_acl_high =
1808 cpu_to_le16(ei->i_file_acl >> 32);
1809 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
1810 - ext4_isize_set(raw_inode, ei->i_disksize);
1811 + if (ei->i_disksize != ext4_isize(raw_inode)) {
1812 + ext4_isize_set(raw_inode, ei->i_disksize);
1813 + need_datasync = 1;
1814 + }
1815 if (ei->i_disksize > 0x7fffffffULL) {
1816 struct super_block *sb = inode->i_sb;
1817 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
1818 @@ -3988,7 +3995,7 @@ static int ext4_do_update_inode(handle_t *handle,
1819 err = rc;
1820 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
1821
1822 - ext4_update_inode_fsync_trans(handle, inode, 0);
1823 + ext4_update_inode_fsync_trans(handle, inode, need_datasync);
1824 out_brelse:
1825 brelse(bh);
1826 ext4_std_error(inode->i_sb, err);
1827 diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
1828 index c5826c6..e2016f3 100644
1829 --- a/fs/ext4/move_extent.c
1830 +++ b/fs/ext4/move_extent.c
1831 @@ -141,55 +141,21 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
1832 }
1833
1834 /**
1835 - * mext_check_null_inode - NULL check for two inodes
1836 - *
1837 - * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
1838 - */
1839 -static int
1840 -mext_check_null_inode(struct inode *inode1, struct inode *inode2,
1841 - const char *function, unsigned int line)
1842 -{
1843 - int ret = 0;
1844 -
1845 - if (inode1 == NULL) {
1846 - __ext4_error(inode2->i_sb, function, line,
1847 - "Both inodes should not be NULL: "
1848 - "inode1 NULL inode2 %lu", inode2->i_ino);
1849 - ret = -EIO;
1850 - } else if (inode2 == NULL) {
1851 - __ext4_error(inode1->i_sb, function, line,
1852 - "Both inodes should not be NULL: "
1853 - "inode1 %lu inode2 NULL", inode1->i_ino);
1854 - ret = -EIO;
1855 - }
1856 - return ret;
1857 -}
1858 -
1859 -/**
1860 * double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem
1861 *
1862 - * @orig_inode: original inode structure
1863 - * @donor_inode: donor inode structure
1864 - * Acquire write lock of i_data_sem of the two inodes (orig and donor) by
1865 - * i_ino order.
1866 + * Acquire write lock of i_data_sem of the two inodes
1867 */
1868 static void
1869 -double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
1870 +double_down_write_data_sem(struct inode *first, struct inode *second)
1871 {
1872 - struct inode *first = orig_inode, *second = donor_inode;
1873 + if (first < second) {
1874 + down_write(&EXT4_I(first)->i_data_sem);
1875 + down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
1876 + } else {
1877 + down_write(&EXT4_I(second)->i_data_sem);
1878 + down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
1879
1880 - /*
1881 - * Use the inode number to provide the stable locking order instead
1882 - * of its address, because the C language doesn't guarantee you can
1883 - * compare pointers that don't come from the same array.
1884 - */
1885 - if (donor_inode->i_ino < orig_inode->i_ino) {
1886 - first = donor_inode;
1887 - second = orig_inode;
1888 }
1889 -
1890 - down_write(&EXT4_I(first)->i_data_sem);
1891 - down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
1892 }
1893
1894 /**
1895 @@ -969,14 +935,6 @@ mext_check_arguments(struct inode *orig_inode,
1896 return -EINVAL;
1897 }
1898
1899 - /* Files should be in the same ext4 FS */
1900 - if (orig_inode->i_sb != donor_inode->i_sb) {
1901 - ext4_debug("ext4 move extent: The argument files "
1902 - "should be in same FS [ino:orig %lu, donor %lu]\n",
1903 - orig_inode->i_ino, donor_inode->i_ino);
1904 - return -EINVAL;
1905 - }
1906 -
1907 /* Ext4 move extent supports only extent based file */
1908 if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
1909 ext4_debug("ext4 move extent: orig file is not extents "
1910 @@ -1072,35 +1030,19 @@ mext_check_arguments(struct inode *orig_inode,
1911 * @inode1: the inode structure
1912 * @inode2: the inode structure
1913 *
1914 - * Lock two inodes' i_mutex by i_ino order.
1915 - * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
1916 + * Lock two inodes' i_mutex
1917 */
1918 -static int
1919 +static void
1920 mext_inode_double_lock(struct inode *inode1, struct inode *inode2)
1921 {
1922 - int ret = 0;
1923 -
1924 - BUG_ON(inode1 == NULL && inode2 == NULL);
1925 -
1926 - ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
1927 - if (ret < 0)
1928 - goto out;
1929 -
1930 - if (inode1 == inode2) {
1931 - mutex_lock(&inode1->i_mutex);
1932 - goto out;
1933 - }
1934 -
1935 - if (inode1->i_ino < inode2->i_ino) {
1936 + BUG_ON(inode1 == inode2);
1937 + if (inode1 < inode2) {
1938 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
1939 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
1940 } else {
1941 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
1942 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
1943 }
1944 -
1945 -out:
1946 - return ret;
1947 }
1948
1949 /**
1950 @@ -1109,28 +1051,13 @@ out:
1951 * @inode1: the inode that is released first
1952 * @inode2: the inode that is released second
1953 *
1954 - * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
1955 */
1956
1957 -static int
1958 +static void
1959 mext_inode_double_unlock(struct inode *inode1, struct inode *inode2)
1960 {
1961 - int ret = 0;
1962 -
1963 - BUG_ON(inode1 == NULL && inode2 == NULL);
1964 -
1965 - ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
1966 - if (ret < 0)
1967 - goto out;
1968 -
1969 - if (inode1)
1970 - mutex_unlock(&inode1->i_mutex);
1971 -
1972 - if (inode2 && inode2 != inode1)
1973 - mutex_unlock(&inode2->i_mutex);
1974 -
1975 -out:
1976 - return ret;
1977 + mutex_unlock(&inode1->i_mutex);
1978 + mutex_unlock(&inode2->i_mutex);
1979 }
1980
1981 /**
1982 @@ -1187,16 +1114,23 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
1983 ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0;
1984 ext4_lblk_t rest_blocks;
1985 pgoff_t orig_page_offset = 0, seq_end_page;
1986 - int ret1, ret2, depth, last_extent = 0;
1987 + int ret, depth, last_extent = 0;
1988 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
1989 int data_offset_in_page;
1990 int block_len_in_page;
1991 int uninit;
1992
1993 - /* orig and donor should be different file */
1994 - if (orig_inode->i_ino == donor_inode->i_ino) {
1995 + if (orig_inode->i_sb != donor_inode->i_sb) {
1996 + ext4_debug("ext4 move extent: The argument files "
1997 + "should be in same FS [ino:orig %lu, donor %lu]\n",
1998 + orig_inode->i_ino, donor_inode->i_ino);
1999 + return -EINVAL;
2000 + }
2001 +
2002 + /* orig and donor should be different inodes */
2003 + if (orig_inode == donor_inode) {
2004 ext4_debug("ext4 move extent: The argument files should not "
2005 - "be same file [ino:orig %lu, donor %lu]\n",
2006 + "be same inode [ino:orig %lu, donor %lu]\n",
2007 orig_inode->i_ino, donor_inode->i_ino);
2008 return -EINVAL;
2009 }
2010 @@ -1208,18 +1142,21 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2011 orig_inode->i_ino, donor_inode->i_ino);
2012 return -EINVAL;
2013 }
2014 -
2015 + /* TODO: This is non obvious task to swap blocks for inodes with full
2016 + jornaling enabled */
2017 + if (ext4_should_journal_data(orig_inode) ||
2018 + ext4_should_journal_data(donor_inode)) {
2019 + return -EINVAL;
2020 + }
2021 /* Protect orig and donor inodes against a truncate */
2022 - ret1 = mext_inode_double_lock(orig_inode, donor_inode);
2023 - if (ret1 < 0)
2024 - return ret1;
2025 + mext_inode_double_lock(orig_inode, donor_inode);
2026
2027 /* Protect extent tree against block allocations via delalloc */
2028 double_down_write_data_sem(orig_inode, donor_inode);
2029 /* Check the filesystem environment whether move_extent can be done */
2030 - ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start,
2031 + ret = mext_check_arguments(orig_inode, donor_inode, orig_start,
2032 donor_start, &len);
2033 - if (ret1)
2034 + if (ret)
2035 goto out;
2036
2037 file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits;
2038 @@ -1227,13 +1164,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2039 if (file_end < block_end)
2040 len -= block_end - file_end;
2041
2042 - ret1 = get_ext_path(orig_inode, block_start, &orig_path);
2043 - if (ret1)
2044 + ret = get_ext_path(orig_inode, block_start, &orig_path);
2045 + if (ret)
2046 goto out;
2047
2048 /* Get path structure to check the hole */
2049 - ret1 = get_ext_path(orig_inode, block_start, &holecheck_path);
2050 - if (ret1)
2051 + ret = get_ext_path(orig_inode, block_start, &holecheck_path);
2052 + if (ret)
2053 goto out;
2054
2055 depth = ext_depth(orig_inode);
2056 @@ -1252,13 +1189,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2057 last_extent = mext_next_extent(orig_inode,
2058 holecheck_path, &ext_cur);
2059 if (last_extent < 0) {
2060 - ret1 = last_extent;
2061 + ret = last_extent;
2062 goto out;
2063 }
2064 last_extent = mext_next_extent(orig_inode, orig_path,
2065 &ext_dummy);
2066 if (last_extent < 0) {
2067 - ret1 = last_extent;
2068 + ret = last_extent;
2069 goto out;
2070 }
2071 seq_start = le32_to_cpu(ext_cur->ee_block);
2072 @@ -1272,7 +1209,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2073 if (le32_to_cpu(ext_cur->ee_block) > block_end) {
2074 ext4_debug("ext4 move extent: The specified range of file "
2075 "may be the hole\n");
2076 - ret1 = -EINVAL;
2077 + ret = -EINVAL;
2078 goto out;
2079 }
2080
2081 @@ -1292,7 +1229,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2082 last_extent = mext_next_extent(orig_inode, holecheck_path,
2083 &ext_cur);
2084 if (last_extent < 0) {
2085 - ret1 = last_extent;
2086 + ret = last_extent;
2087 break;
2088 }
2089 add_blocks = ext4_ext_get_actual_len(ext_cur);
2090 @@ -1349,18 +1286,18 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2091 orig_page_offset,
2092 data_offset_in_page,
2093 block_len_in_page, uninit,
2094 - &ret1);
2095 + &ret);
2096
2097 /* Count how many blocks we have exchanged */
2098 *moved_len += block_len_in_page;
2099 - if (ret1 < 0)
2100 + if (ret < 0)
2101 break;
2102 if (*moved_len > len) {
2103 EXT4_ERROR_INODE(orig_inode,
2104 "We replaced blocks too much! "
2105 "sum of replaced: %llu requested: %llu",
2106 *moved_len, len);
2107 - ret1 = -EIO;
2108 + ret = -EIO;
2109 break;
2110 }
2111
2112 @@ -1374,22 +1311,22 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2113 }
2114
2115 double_down_write_data_sem(orig_inode, donor_inode);
2116 - if (ret1 < 0)
2117 + if (ret < 0)
2118 break;
2119
2120 /* Decrease buffer counter */
2121 if (holecheck_path)
2122 ext4_ext_drop_refs(holecheck_path);
2123 - ret1 = get_ext_path(orig_inode, seq_start, &holecheck_path);
2124 - if (ret1)
2125 + ret = get_ext_path(orig_inode, seq_start, &holecheck_path);
2126 + if (ret)
2127 break;
2128 depth = holecheck_path->p_depth;
2129
2130 /* Decrease buffer counter */
2131 if (orig_path)
2132 ext4_ext_drop_refs(orig_path);
2133 - ret1 = get_ext_path(orig_inode, seq_start, &orig_path);
2134 - if (ret1)
2135 + ret = get_ext_path(orig_inode, seq_start, &orig_path);
2136 + if (ret)
2137 break;
2138
2139 ext_cur = holecheck_path[depth].p_ext;
2140 @@ -1412,12 +1349,7 @@ out:
2141 kfree(holecheck_path);
2142 }
2143 double_up_write_data_sem(orig_inode, donor_inode);
2144 - ret2 = mext_inode_double_unlock(orig_inode, donor_inode);
2145 -
2146 - if (ret1)
2147 - return ret1;
2148 - else if (ret2)
2149 - return ret2;
2150 + mext_inode_double_unlock(orig_inode, donor_inode);
2151
2152 - return 0;
2153 + return ret;
2154 }
2155 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
2156 index 0a94cbb..ac76939 100644
2157 --- a/fs/ext4/namei.c
2158 +++ b/fs/ext4/namei.c
2159 @@ -1801,9 +1801,7 @@ retry:
2160 err = PTR_ERR(inode);
2161 if (!IS_ERR(inode)) {
2162 init_special_inode(inode, inode->i_mode, rdev);
2163 -#ifdef CONFIG_EXT4_FS_XATTR
2164 inode->i_op = &ext4_special_inode_operations;
2165 -#endif
2166 err = ext4_add_nondir(handle, dentry, inode);
2167 }
2168 ext4_journal_stop(handle);
2169 diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
2170 index 3407a62..231cacb 100644
2171 --- a/fs/ext4/resize.c
2172 +++ b/fs/ext4/resize.c
2173 @@ -200,8 +200,11 @@ static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
2174 * be a partial of a flex group.
2175 *
2176 * @sb: super block of fs to which the groups belongs
2177 + *
2178 + * Returns 0 on a successful allocation of the metadata blocks in the
2179 + * block group.
2180 */
2181 -static void ext4_alloc_group_tables(struct super_block *sb,
2182 +static int ext4_alloc_group_tables(struct super_block *sb,
2183 struct ext4_new_flex_group_data *flex_gd,
2184 int flexbg_size)
2185 {
2186 @@ -226,6 +229,8 @@ static void ext4_alloc_group_tables(struct super_block *sb,
2187 (last_group & ~(flexbg_size - 1))));
2188 next_group:
2189 group = group_data[0].group;
2190 + if (src_group >= group_data[0].group + flex_gd->count)
2191 + return -ENOSPC;
2192 start_blk = ext4_group_first_block_no(sb, src_group);
2193 last_blk = start_blk + group_data[src_group - group].blocks_count;
2194
2195 @@ -235,7 +240,6 @@ next_group:
2196
2197 start_blk += overhead;
2198
2199 - BUG_ON(src_group >= group_data[0].group + flex_gd->count);
2200 /* We collect contiguous blocks as much as possible. */
2201 src_group++;
2202 for (; src_group <= last_group; src_group++)
2203 @@ -300,6 +304,7 @@ next_group:
2204 group_data[i].free_blocks_count);
2205 }
2206 }
2207 + return 0;
2208 }
2209
2210 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
2211 @@ -451,6 +456,9 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
2212 gdblocks = ext4_bg_num_gdb(sb, group);
2213 start = ext4_group_first_block_no(sb, group);
2214
2215 + if (!ext4_bg_has_super(sb, group))
2216 + goto handle_itb;
2217 +
2218 /* Copy all of the GDT blocks into the backup in this group */
2219 for (j = 0, block = start + 1; j < gdblocks; j++, block++) {
2220 struct buffer_head *gdb;
2221 @@ -493,6 +501,7 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
2222 goto out;
2223 }
2224
2225 +handle_itb:
2226 /* Initialize group tables of the grop @group */
2227 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
2228 goto handle_bb;
2229 @@ -1293,13 +1302,15 @@ exit_journal:
2230 err = err2;
2231
2232 if (!err) {
2233 - int i;
2234 + int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
2235 + int gdb_num_end = ((group + flex_gd->count - 1) /
2236 + EXT4_DESC_PER_BLOCK(sb));
2237 +
2238 update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
2239 sizeof(struct ext4_super_block));
2240 - for (i = 0; i < flex_gd->count; i++, group++) {
2241 + for (; gdb_num <= gdb_num_end; gdb_num++) {
2242 struct buffer_head *gdb_bh;
2243 - int gdb_num;
2244 - gdb_num = group / EXT4_BLOCKS_PER_GROUP(sb);
2245 +
2246 gdb_bh = sbi->s_group_desc[gdb_num];
2247 update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
2248 gdb_bh->b_size);
2249 @@ -1676,7 +1687,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
2250 */
2251 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
2252 flexbg_size)) {
2253 - ext4_alloc_group_tables(sb, flex_gd, flexbg_size);
2254 + if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2255 + break;
2256 err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2257 if (unlikely(err))
2258 break;
2259 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2260 index 12a278f..b1c28f1 100644
2261 --- a/fs/ext4/super.c
2262 +++ b/fs/ext4/super.c
2263 @@ -1692,7 +1692,7 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
2264
2265 static const char *token2str(int token)
2266 {
2267 - static const struct match_token *t;
2268 + const struct match_token *t;
2269
2270 for (t = tokens; t->token != Opt_err; t++)
2271 if (t->token == token && !strchr(t->pattern, '='))
2272 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
2273 index 539f36c..b35bd64 100644
2274 --- a/fs/fs-writeback.c
2275 +++ b/fs/fs-writeback.c
2276 @@ -68,6 +68,7 @@ int writeback_in_progress(struct backing_dev_info *bdi)
2277 {
2278 return test_bit(BDI_writeback_running, &bdi->state);
2279 }
2280 +EXPORT_SYMBOL(writeback_in_progress);
2281
2282 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
2283 {
2284 diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
2285 index 9956ac6..e5bfb11 100644
2286 --- a/fs/jbd2/journal.c
2287 +++ b/fs/jbd2/journal.c
2288 @@ -1317,6 +1317,11 @@ static void jbd2_mark_journal_empty(journal_t *journal)
2289
2290 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
2291 read_lock(&journal->j_state_lock);
2292 + /* Is it already empty? */
2293 + if (sb->s_start == 0) {
2294 + read_unlock(&journal->j_state_lock);
2295 + return;
2296 + }
2297 jbd_debug(1, "JBD2: Marking journal as empty (seq %d)\n",
2298 journal->j_tail_sequence);
2299
2300 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
2301 index 74d9be1..6bec5c0 100644
2302 --- a/fs/jffs2/wbuf.c
2303 +++ b/fs/jffs2/wbuf.c
2304 @@ -1043,10 +1043,10 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
2305 ops.datbuf = NULL;
2306
2307 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
2308 - if (ret || ops.oobretlen != ops.ooblen) {
2309 + if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
2310 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
2311 jeb->offset, ops.ooblen, ops.oobretlen, ret);
2312 - if (!ret)
2313 + if (!ret || mtd_is_bitflip(ret))
2314 ret = -EIO;
2315 return ret;
2316 }
2317 @@ -1085,10 +1085,10 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
2318 ops.datbuf = NULL;
2319
2320 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
2321 - if (ret || ops.oobretlen != ops.ooblen) {
2322 + if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
2323 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
2324 jeb->offset, ops.ooblen, ops.oobretlen, ret);
2325 - if (!ret)
2326 + if (!ret || mtd_is_bitflip(ret))
2327 ret = -EIO;
2328 return ret;
2329 }
2330 diff --git a/fs/proc/page.c b/fs/proc/page.c
2331 index 7fcd0d6..b8730d9 100644
2332 --- a/fs/proc/page.c
2333 +++ b/fs/proc/page.c
2334 @@ -115,7 +115,13 @@ u64 stable_page_flags(struct page *page)
2335 u |= 1 << KPF_COMPOUND_TAIL;
2336 if (PageHuge(page))
2337 u |= 1 << KPF_HUGE;
2338 - else if (PageTransCompound(page))
2339 + /*
2340 + * PageTransCompound can be true for non-huge compound pages (slab
2341 + * pages or pages allocated by drivers with __GFP_COMP) because it
2342 + * just checks PG_head/PG_tail, so we need to check PageLRU to make
2343 + * sure a given page is a thp, not a non-huge compound page.
2344 + */
2345 + else if (PageTransCompound(page) && PageLRU(compound_trans_head(page)))
2346 u |= 1 << KPF_THP;
2347
2348 /*
2349 diff --git a/fs/udf/super.c b/fs/udf/super.c
2350 index e660ffd..4988a8a 100644
2351 --- a/fs/udf/super.c
2352 +++ b/fs/udf/super.c
2353 @@ -1287,6 +1287,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
2354 udf_err(sb, "error loading logical volume descriptor: "
2355 "Partition table too long (%u > %lu)\n", table_len,
2356 sb->s_blocksize - sizeof(*lvd));
2357 + ret = 1;
2358 goto out_bh;
2359 }
2360
2361 @@ -1331,8 +1332,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
2362 UDF_ID_SPARABLE,
2363 strlen(UDF_ID_SPARABLE))) {
2364 if (udf_load_sparable_map(sb, map,
2365 - (struct sparablePartitionMap *)gpm) < 0)
2366 + (struct sparablePartitionMap *)gpm) < 0) {
2367 + ret = 1;
2368 goto out_bh;
2369 + }
2370 } else if (!strncmp(upm2->partIdent.ident,
2371 UDF_ID_METADATA,
2372 strlen(UDF_ID_METADATA))) {
2373 diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
2374 index 7c727a9..0abf1d4 100644
2375 --- a/include/linux/mempolicy.h
2376 +++ b/include/linux/mempolicy.h
2377 @@ -188,7 +188,7 @@ struct sp_node {
2378
2379 struct shared_policy {
2380 struct rb_root root;
2381 - spinlock_t lock;
2382 + struct mutex mutex;
2383 };
2384
2385 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
2386 diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
2387 index 22e61fd..28e493b 100644
2388 --- a/include/linux/xfrm.h
2389 +++ b/include/linux/xfrm.h
2390 @@ -84,6 +84,8 @@ struct xfrm_replay_state {
2391 __u32 bitmap;
2392 };
2393
2394 +#define XFRMA_REPLAY_ESN_MAX 4096
2395 +
2396 struct xfrm_replay_state_esn {
2397 unsigned int bmp_len;
2398 __u32 oseq;
2399 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
2400 index 96239e7..9f7e94b 100644
2401 --- a/include/net/xfrm.h
2402 +++ b/include/net/xfrm.h
2403 @@ -269,6 +269,9 @@ struct xfrm_replay {
2404 int (*check)(struct xfrm_state *x,
2405 struct sk_buff *skb,
2406 __be32 net_seq);
2407 + int (*recheck)(struct xfrm_state *x,
2408 + struct sk_buff *skb,
2409 + __be32 net_seq);
2410 void (*notify)(struct xfrm_state *x, int event);
2411 int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
2412 };
2413 diff --git a/kernel/cpuset.c b/kernel/cpuset.c
2414 index 14f7070..5fc1570 100644
2415 --- a/kernel/cpuset.c
2416 +++ b/kernel/cpuset.c
2417 @@ -2065,6 +2065,9 @@ static void scan_for_empty_cpusets(struct cpuset *root)
2418 * (of no affect) on systems that are actively using CPU hotplug
2419 * but making no active use of cpusets.
2420 *
2421 + * The only exception to this is suspend/resume, where we don't
2422 + * modify cpusets at all.
2423 + *
2424 * This routine ensures that top_cpuset.cpus_allowed tracks
2425 * cpu_active_mask on each CPU hotplug (cpuhp) event.
2426 *
2427 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
2428 index d0c5baf..4eec66e 100644
2429 --- a/kernel/rcutree.c
2430 +++ b/kernel/rcutree.c
2431 @@ -295,7 +295,9 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
2432 static int
2433 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
2434 {
2435 - return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
2436 + return *rdp->nxttail[RCU_DONE_TAIL +
2437 + ACCESS_ONCE(rsp->completed) != rdp->completed] &&
2438 + !rcu_gp_in_progress(rsp);
2439 }
2440
2441 /*
2442 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2443 index 593087b..1d22981 100644
2444 --- a/kernel/sched/core.c
2445 +++ b/kernel/sched/core.c
2446 @@ -6937,34 +6937,66 @@ int __init sched_create_sysfs_power_savings_entries(struct device *dev)
2447 }
2448 #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2449
2450 +static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
2451 +
2452 /*
2453 * Update cpusets according to cpu_active mask. If cpusets are
2454 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
2455 * around partition_sched_domains().
2456 + *
2457 + * If we come here as part of a suspend/resume, don't touch cpusets because we
2458 + * want to restore it back to its original state upon resume anyway.
2459 */
2460 static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
2461 void *hcpu)
2462 {
2463 - switch (action & ~CPU_TASKS_FROZEN) {
2464 + switch (action) {
2465 + case CPU_ONLINE_FROZEN:
2466 + case CPU_DOWN_FAILED_FROZEN:
2467 +
2468 + /*
2469 + * num_cpus_frozen tracks how many CPUs are involved in suspend
2470 + * resume sequence. As long as this is not the last online
2471 + * operation in the resume sequence, just build a single sched
2472 + * domain, ignoring cpusets.
2473 + */
2474 + num_cpus_frozen--;
2475 + if (likely(num_cpus_frozen)) {
2476 + partition_sched_domains(1, NULL, NULL);
2477 + break;
2478 + }
2479 +
2480 + /*
2481 + * This is the last CPU online operation. So fall through and
2482 + * restore the original sched domains by considering the
2483 + * cpuset configurations.
2484 + */
2485 +
2486 case CPU_ONLINE:
2487 case CPU_DOWN_FAILED:
2488 cpuset_update_active_cpus();
2489 - return NOTIFY_OK;
2490 + break;
2491 default:
2492 return NOTIFY_DONE;
2493 }
2494 + return NOTIFY_OK;
2495 }
2496
2497 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
2498 void *hcpu)
2499 {
2500 - switch (action & ~CPU_TASKS_FROZEN) {
2501 + switch (action) {
2502 case CPU_DOWN_PREPARE:
2503 cpuset_update_active_cpus();
2504 - return NOTIFY_OK;
2505 + break;
2506 + case CPU_DOWN_PREPARE_FROZEN:
2507 + num_cpus_frozen++;
2508 + partition_sched_domains(1, NULL, NULL);
2509 + break;
2510 default:
2511 return NOTIFY_DONE;
2512 }
2513 + return NOTIFY_OK;
2514 }
2515
2516 void __init sched_init_smp(void)
2517 diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
2518 index 7b386e8..da5eb5b 100644
2519 --- a/kernel/sched/stop_task.c
2520 +++ b/kernel/sched/stop_task.c
2521 @@ -27,8 +27,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
2522 {
2523 struct task_struct *stop = rq->stop;
2524
2525 - if (stop && stop->on_rq)
2526 + if (stop && stop->on_rq) {
2527 + stop->se.exec_start = rq->clock_task;
2528 return stop;
2529 + }
2530
2531 return NULL;
2532 }
2533 @@ -52,6 +54,21 @@ static void yield_task_stop(struct rq *rq)
2534
2535 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
2536 {
2537 + struct task_struct *curr = rq->curr;
2538 + u64 delta_exec;
2539 +
2540 + delta_exec = rq->clock_task - curr->se.exec_start;
2541 + if (unlikely((s64)delta_exec < 0))
2542 + delta_exec = 0;
2543 +
2544 + schedstat_set(curr->se.statistics.exec_max,
2545 + max(curr->se.statistics.exec_max, delta_exec));
2546 +
2547 + curr->se.sum_exec_runtime += delta_exec;
2548 + account_group_exec_runtime(curr, delta_exec);
2549 +
2550 + curr->se.exec_start = rq->clock_task;
2551 + cpuacct_charge(curr, delta_exec);
2552 }
2553
2554 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
2555 @@ -60,6 +77,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
2556
2557 static void set_curr_task_stop(struct rq *rq)
2558 {
2559 + struct task_struct *stop = rq->stop;
2560 +
2561 + stop->se.exec_start = rq->clock_task;
2562 }
2563
2564 static void switched_to_stop(struct rq *rq, struct task_struct *p)
2565 diff --git a/kernel/sys.c b/kernel/sys.c
2566 index e7006eb..898a84c 100644
2567 --- a/kernel/sys.c
2568 +++ b/kernel/sys.c
2569 @@ -365,6 +365,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
2570 void kernel_restart(char *cmd)
2571 {
2572 kernel_restart_prepare(cmd);
2573 + disable_nonboot_cpus();
2574 if (!cmd)
2575 printk(KERN_EMERG "Restarting system.\n");
2576 else
2577 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2578 index 7584322..56f793d 100644
2579 --- a/kernel/workqueue.c
2580 +++ b/kernel/workqueue.c
2581 @@ -1864,7 +1864,9 @@ __acquires(&gcwq->lock)
2582
2583 spin_unlock_irq(&gcwq->lock);
2584
2585 + smp_wmb(); /* paired with test_and_set_bit(PENDING) */
2586 work_clear_pending(work);
2587 +
2588 lock_map_acquire_read(&cwq->wq->lockdep_map);
2589 lock_map_acquire(&lockdep_map);
2590 trace_workqueue_execute_start(work);
2591 diff --git a/lib/gcd.c b/lib/gcd.c
2592 index cce4f3c..3657f12 100644
2593 --- a/lib/gcd.c
2594 +++ b/lib/gcd.c
2595 @@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b)
2596
2597 if (a < b)
2598 swap(a, b);
2599 +
2600 + if (!b)
2601 + return a;
2602 while ((r = a % b) != 0) {
2603 a = b;
2604 b = r;
2605 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2606 index a799df5..c384e09 100644
2607 --- a/mm/hugetlb.c
2608 +++ b/mm/hugetlb.c
2609 @@ -2431,7 +2431,8 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2610 * from page cache lookup which is in HPAGE_SIZE units.
2611 */
2612 address = address & huge_page_mask(h);
2613 - pgoff = vma_hugecache_offset(h, vma, address);
2614 + pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2615 + vma->vm_pgoff;
2616 mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
2617
2618 /*
2619 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
2620 index 9afcbad..d1e4bef 100644
2621 --- a/mm/mempolicy.c
2622 +++ b/mm/mempolicy.c
2623 @@ -607,6 +607,42 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
2624 return first;
2625 }
2626
2627 +/*
2628 + * Apply policy to a single VMA
2629 + * This must be called with the mmap_sem held for writing.
2630 + */
2631 +static int vma_replace_policy(struct vm_area_struct *vma,
2632 + struct mempolicy *pol)
2633 +{
2634 + int err;
2635 + struct mempolicy *old;
2636 + struct mempolicy *new;
2637 +
2638 + pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
2639 + vma->vm_start, vma->vm_end, vma->vm_pgoff,
2640 + vma->vm_ops, vma->vm_file,
2641 + vma->vm_ops ? vma->vm_ops->set_policy : NULL);
2642 +
2643 + new = mpol_dup(pol);
2644 + if (IS_ERR(new))
2645 + return PTR_ERR(new);
2646 +
2647 + if (vma->vm_ops && vma->vm_ops->set_policy) {
2648 + err = vma->vm_ops->set_policy(vma, new);
2649 + if (err)
2650 + goto err_out;
2651 + }
2652 +
2653 + old = vma->vm_policy;
2654 + vma->vm_policy = new; /* protected by mmap_sem */
2655 + mpol_put(old);
2656 +
2657 + return 0;
2658 + err_out:
2659 + mpol_put(new);
2660 + return err;
2661 +}
2662 +
2663 /* Step 2: apply policy to a range and do splits. */
2664 static int mbind_range(struct mm_struct *mm, unsigned long start,
2665 unsigned long end, struct mempolicy *new_pol)
2666 @@ -655,23 +691,9 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
2667 if (err)
2668 goto out;
2669 }
2670 -
2671 - /*
2672 - * Apply policy to a single VMA. The reference counting of
2673 - * policy for vma_policy linkages has already been handled by
2674 - * vma_merge and split_vma as necessary. If this is a shared
2675 - * policy then ->set_policy will increment the reference count
2676 - * for an sp node.
2677 - */
2678 - pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
2679 - vma->vm_start, vma->vm_end, vma->vm_pgoff,
2680 - vma->vm_ops, vma->vm_file,
2681 - vma->vm_ops ? vma->vm_ops->set_policy : NULL);
2682 - if (vma->vm_ops && vma->vm_ops->set_policy) {
2683 - err = vma->vm_ops->set_policy(vma, new_pol);
2684 - if (err)
2685 - goto out;
2686 - }
2687 + err = vma_replace_policy(vma, new_pol);
2688 + if (err)
2689 + goto out;
2690 }
2691
2692 out:
2693 @@ -1510,8 +1532,18 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
2694 addr);
2695 if (vpol)
2696 pol = vpol;
2697 - } else if (vma->vm_policy)
2698 + } else if (vma->vm_policy) {
2699 pol = vma->vm_policy;
2700 +
2701 + /*
2702 + * shmem_alloc_page() passes MPOL_F_SHARED policy with
2703 + * a pseudo vma whose vma->vm_ops=NULL. Take a reference
2704 + * count on these policies which will be dropped by
2705 + * mpol_cond_put() later
2706 + */
2707 + if (mpol_needs_cond_ref(pol))
2708 + mpol_get(pol);
2709 + }
2710 }
2711 if (!pol)
2712 pol = &default_policy;
2713 @@ -2035,7 +2067,7 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2714 */
2715
2716 /* lookup first element intersecting start-end */
2717 -/* Caller holds sp->lock */
2718 +/* Caller holds sp->mutex */
2719 static struct sp_node *
2720 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2721 {
2722 @@ -2099,36 +2131,50 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2723
2724 if (!sp->root.rb_node)
2725 return NULL;
2726 - spin_lock(&sp->lock);
2727 + mutex_lock(&sp->mutex);
2728 sn = sp_lookup(sp, idx, idx+1);
2729 if (sn) {
2730 mpol_get(sn->policy);
2731 pol = sn->policy;
2732 }
2733 - spin_unlock(&sp->lock);
2734 + mutex_unlock(&sp->mutex);
2735 return pol;
2736 }
2737
2738 +static void sp_free(struct sp_node *n)
2739 +{
2740 + mpol_put(n->policy);
2741 + kmem_cache_free(sn_cache, n);
2742 +}
2743 +
2744 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2745 {
2746 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2747 rb_erase(&n->nd, &sp->root);
2748 - mpol_put(n->policy);
2749 - kmem_cache_free(sn_cache, n);
2750 + sp_free(n);
2751 }
2752
2753 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2754 struct mempolicy *pol)
2755 {
2756 - struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2757 + struct sp_node *n;
2758 + struct mempolicy *newpol;
2759
2760 + n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2761 if (!n)
2762 return NULL;
2763 +
2764 + newpol = mpol_dup(pol);
2765 + if (IS_ERR(newpol)) {
2766 + kmem_cache_free(sn_cache, n);
2767 + return NULL;
2768 + }
2769 + newpol->flags |= MPOL_F_SHARED;
2770 +
2771 n->start = start;
2772 n->end = end;
2773 - mpol_get(pol);
2774 - pol->flags |= MPOL_F_SHARED; /* for unref */
2775 - n->policy = pol;
2776 + n->policy = newpol;
2777 +
2778 return n;
2779 }
2780
2781 @@ -2136,10 +2182,10 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2782 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2783 unsigned long end, struct sp_node *new)
2784 {
2785 - struct sp_node *n, *new2 = NULL;
2786 + struct sp_node *n;
2787 + int ret = 0;
2788
2789 -restart:
2790 - spin_lock(&sp->lock);
2791 + mutex_lock(&sp->mutex);
2792 n = sp_lookup(sp, start, end);
2793 /* Take care of old policies in the same range. */
2794 while (n && n->start < end) {
2795 @@ -2152,16 +2198,14 @@ restart:
2796 } else {
2797 /* Old policy spanning whole new range. */
2798 if (n->end > end) {
2799 + struct sp_node *new2;
2800 + new2 = sp_alloc(end, n->end, n->policy);
2801 if (!new2) {
2802 - spin_unlock(&sp->lock);
2803 - new2 = sp_alloc(end, n->end, n->policy);
2804 - if (!new2)
2805 - return -ENOMEM;
2806 - goto restart;
2807 + ret = -ENOMEM;
2808 + goto out;
2809 }
2810 n->end = start;
2811 sp_insert(sp, new2);
2812 - new2 = NULL;
2813 break;
2814 } else
2815 n->end = start;
2816 @@ -2172,12 +2216,9 @@ restart:
2817 }
2818 if (new)
2819 sp_insert(sp, new);
2820 - spin_unlock(&sp->lock);
2821 - if (new2) {
2822 - mpol_put(new2->policy);
2823 - kmem_cache_free(sn_cache, new2);
2824 - }
2825 - return 0;
2826 +out:
2827 + mutex_unlock(&sp->mutex);
2828 + return ret;
2829 }
2830
2831 /**
2832 @@ -2195,7 +2236,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2833 int ret;
2834
2835 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2836 - spin_lock_init(&sp->lock);
2837 + mutex_init(&sp->mutex);
2838
2839 if (mpol) {
2840 struct vm_area_struct pvma;
2841 @@ -2249,7 +2290,7 @@ int mpol_set_shared_policy(struct shared_policy *info,
2842 }
2843 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2844 if (err && new)
2845 - kmem_cache_free(sn_cache, new);
2846 + sp_free(new);
2847 return err;
2848 }
2849
2850 @@ -2261,16 +2302,14 @@ void mpol_free_shared_policy(struct shared_policy *p)
2851
2852 if (!p->root.rb_node)
2853 return;
2854 - spin_lock(&p->lock);
2855 + mutex_lock(&p->mutex);
2856 next = rb_first(&p->root);
2857 while (next) {
2858 n = rb_entry(next, struct sp_node, nd);
2859 next = rb_next(&n->nd);
2860 - rb_erase(&n->nd, &p->root);
2861 - mpol_put(n->policy);
2862 - kmem_cache_free(sn_cache, n);
2863 + sp_delete(p, n);
2864 }
2865 - spin_unlock(&p->lock);
2866 + mutex_unlock(&p->mutex);
2867 }
2868
2869 /* assumes fs == KERNEL_DS */
2870 diff --git a/mm/slab.c b/mm/slab.c
2871 index e901a36..da2bb68 100644
2872 --- a/mm/slab.c
2873 +++ b/mm/slab.c
2874 @@ -1685,9 +1685,6 @@ void __init kmem_cache_init_late(void)
2875
2876 g_cpucache_up = LATE;
2877
2878 - /* Annotate slab for lockdep -- annotate the malloc caches */
2879 - init_lock_keys();
2880 -
2881 /* 6) resize the head arrays to their final sizes */
2882 mutex_lock(&cache_chain_mutex);
2883 list_for_each_entry(cachep, &cache_chain, next)
2884 @@ -1695,6 +1692,9 @@ void __init kmem_cache_init_late(void)
2885 BUG();
2886 mutex_unlock(&cache_chain_mutex);
2887
2888 + /* Annotate slab for lockdep -- annotate the malloc caches */
2889 + init_lock_keys();
2890 +
2891 /* Done! */
2892 g_cpucache_up = FULL;
2893
2894 diff --git a/mm/truncate.c b/mm/truncate.c
2895 index 61a183b..4224627 100644
2896 --- a/mm/truncate.c
2897 +++ b/mm/truncate.c
2898 @@ -394,11 +394,12 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
2899 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
2900 return 0;
2901
2902 + clear_page_mlock(page);
2903 +
2904 spin_lock_irq(&mapping->tree_lock);
2905 if (PageDirty(page))
2906 goto failed;
2907
2908 - clear_page_mlock(page);
2909 BUG_ON(page_has_private(page));
2910 __delete_from_page_cache(page);
2911 spin_unlock_irq(&mapping->tree_lock);
2912 diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
2913 index 4d39d80..f364630 100644
2914 --- a/net/8021q/vlan_core.c
2915 +++ b/net/8021q/vlan_core.c
2916 @@ -106,7 +106,6 @@ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
2917 return NULL;
2918 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
2919 skb->mac_header += VLAN_HLEN;
2920 - skb_reset_mac_len(skb);
2921 return skb;
2922 }
2923
2924 @@ -140,6 +139,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
2925
2926 skb_reset_network_header(skb);
2927 skb_reset_transport_header(skb);
2928 + skb_reset_mac_len(skb);
2929 +
2930 return skb;
2931
2932 err_free:
2933 diff --git a/net/core/dev.c b/net/core/dev.c
2934 index 3fd9cae..086bc2e 100644
2935 --- a/net/core/dev.c
2936 +++ b/net/core/dev.c
2937 @@ -2121,7 +2121,8 @@ static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2938 static netdev_features_t harmonize_features(struct sk_buff *skb,
2939 __be16 protocol, netdev_features_t features)
2940 {
2941 - if (!can_checksum_protocol(features, protocol)) {
2942 + if (skb->ip_summed != CHECKSUM_NONE &&
2943 + !can_checksum_protocol(features, protocol)) {
2944 features &= ~NETIF_F_ALL_CSUM;
2945 features &= ~NETIF_F_SG;
2946 } else if (illegal_highdma(skb->dev, skb)) {
2947 @@ -2617,15 +2618,16 @@ void __skb_get_rxhash(struct sk_buff *skb)
2948 if (!skb_flow_dissect(skb, &keys))
2949 return;
2950
2951 - if (keys.ports) {
2952 - if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
2953 - swap(keys.port16[0], keys.port16[1]);
2954 + if (keys.ports)
2955 skb->l4_rxhash = 1;
2956 - }
2957
2958 /* get a consistent hash (same value on both flow directions) */
2959 - if ((__force u32)keys.dst < (__force u32)keys.src)
2960 + if (((__force u32)keys.dst < (__force u32)keys.src) ||
2961 + (((__force u32)keys.dst == (__force u32)keys.src) &&
2962 + ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
2963 swap(keys.dst, keys.src);
2964 + swap(keys.port16[0], keys.port16[1]);
2965 + }
2966
2967 hash = jhash_3words((__force u32)keys.dst,
2968 (__force u32)keys.src,
2969 diff --git a/net/core/sock.c b/net/core/sock.c
2970 index d3e0a52..4b469e3 100644
2971 --- a/net/core/sock.c
2972 +++ b/net/core/sock.c
2973 @@ -644,7 +644,8 @@ set_rcvbuf:
2974
2975 case SO_KEEPALIVE:
2976 #ifdef CONFIG_INET
2977 - if (sk->sk_protocol == IPPROTO_TCP)
2978 + if (sk->sk_protocol == IPPROTO_TCP &&
2979 + sk->sk_type == SOCK_STREAM)
2980 tcp_set_keepalive(sk, valbool);
2981 #endif
2982 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
2983 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
2984 index bbd604c..2fe0dc2 100644
2985 --- a/net/ipv4/raw.c
2986 +++ b/net/ipv4/raw.c
2987 @@ -131,18 +131,20 @@ found:
2988 * 0 - deliver
2989 * 1 - block
2990 */
2991 -static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
2992 +static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
2993 {
2994 - int type;
2995 + struct icmphdr _hdr;
2996 + const struct icmphdr *hdr;
2997
2998 - if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
2999 + hdr = skb_header_pointer(skb, skb_transport_offset(skb),
3000 + sizeof(_hdr), &_hdr);
3001 + if (!hdr)
3002 return 1;
3003
3004 - type = icmp_hdr(skb)->type;
3005 - if (type < 32) {
3006 + if (hdr->type < 32) {
3007 __u32 data = raw_sk(sk)->filter.data;
3008
3009 - return ((1 << type) & data) != 0;
3010 + return ((1U << hdr->type) & data) != 0;
3011 }
3012
3013 /* Do not block unknown ICMP types */
3014 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
3015 index 367bdaf..8fbe2e2 100644
3016 --- a/net/ipv4/tcp.c
3017 +++ b/net/ipv4/tcp.c
3018 @@ -1594,8 +1594,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
3019 }
3020
3021 #ifdef CONFIG_NET_DMA
3022 - if (tp->ucopy.dma_chan)
3023 - dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
3024 + if (tp->ucopy.dma_chan) {
3025 + if (tp->rcv_wnd == 0 &&
3026 + !skb_queue_empty(&sk->sk_async_wait_queue)) {
3027 + tcp_service_net_dma(sk, true);
3028 + tcp_cleanup_rbuf(sk, copied);
3029 + } else
3030 + dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
3031 + }
3032 #endif
3033 if (copied >= target) {
3034 /* Do not sleep, just process backlog. */
3035 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
3036 index 2c69eca..5ec6069 100644
3037 --- a/net/ipv6/addrconf.c
3038 +++ b/net/ipv6/addrconf.c
3039 @@ -793,10 +793,16 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
3040 struct in6_addr prefix;
3041 struct rt6_info *rt;
3042 struct net *net = dev_net(ifp->idev->dev);
3043 + struct flowi6 fl6 = {};
3044 +
3045 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
3046 - rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1);
3047 + fl6.flowi6_oif = ifp->idev->dev->ifindex;
3048 + fl6.daddr = prefix;
3049 + rt = (struct rt6_info *)ip6_route_lookup(net, &fl6,
3050 + RT6_LOOKUP_F_IFACE);
3051
3052 - if (rt && addrconf_is_prefix_route(rt)) {
3053 + if (rt != net->ipv6.ip6_null_entry &&
3054 + addrconf_is_prefix_route(rt)) {
3055 if (onlink == 0) {
3056 ip6_del_rt(rt);
3057 rt = NULL;
3058 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
3059 index 92bb9cb..c3a007d 100644
3060 --- a/net/ipv6/ip6_fib.c
3061 +++ b/net/ipv6/ip6_fib.c
3062 @@ -818,6 +818,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
3063 offsetof(struct rt6_info, rt6i_src),
3064 allow_create, replace_required);
3065
3066 + if (IS_ERR(sn)) {
3067 + err = PTR_ERR(sn);
3068 + sn = NULL;
3069 + }
3070 if (!sn) {
3071 /* If it is failed, discard just allocated
3072 root, and then (in st_failure) stale node
3073 diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
3074 index 7e1e0fb..740c919 100644
3075 --- a/net/ipv6/mip6.c
3076 +++ b/net/ipv6/mip6.c
3077 @@ -84,28 +84,30 @@ static int mip6_mh_len(int type)
3078
3079 static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
3080 {
3081 - struct ip6_mh *mh;
3082 + struct ip6_mh _hdr;
3083 + const struct ip6_mh *mh;
3084
3085 - if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) ||
3086 - !pskb_may_pull(skb, (skb_transport_offset(skb) +
3087 - ((skb_transport_header(skb)[1] + 1) << 3))))
3088 + mh = skb_header_pointer(skb, skb_transport_offset(skb),
3089 + sizeof(_hdr), &_hdr);
3090 + if (!mh)
3091 return -1;
3092
3093 - mh = (struct ip6_mh *)skb_transport_header(skb);
3094 + if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len)
3095 + return -1;
3096
3097 if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
3098 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
3099 mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type));
3100 - mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) -
3101 - skb_network_header(skb)));
3102 + mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) +
3103 + skb_network_header_len(skb));
3104 return -1;
3105 }
3106
3107 if (mh->ip6mh_proto != IPPROTO_NONE) {
3108 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
3109 mh->ip6mh_proto);
3110 - mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) -
3111 - skb_network_header(skb)));
3112 + mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) +
3113 + skb_network_header_len(skb));
3114 return -1;
3115 }
3116
3117 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
3118 index 5bddea7..3ee2870 100644
3119 --- a/net/ipv6/raw.c
3120 +++ b/net/ipv6/raw.c
3121 @@ -107,21 +107,20 @@ found:
3122 * 0 - deliver
3123 * 1 - block
3124 */
3125 -static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
3126 +static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
3127 {
3128 - struct icmp6hdr *icmph;
3129 - struct raw6_sock *rp = raw6_sk(sk);
3130 -
3131 - if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
3132 - __u32 *data = &rp->filter.data[0];
3133 - int bit_nr;
3134 + struct icmp6hdr *_hdr;
3135 + const struct icmp6hdr *hdr;
3136
3137 - icmph = (struct icmp6hdr *) skb->data;
3138 - bit_nr = icmph->icmp6_type;
3139 + hdr = skb_header_pointer(skb, skb_transport_offset(skb),
3140 + sizeof(_hdr), &_hdr);
3141 + if (hdr) {
3142 + const __u32 *data = &raw6_sk(sk)->filter.data[0];
3143 + unsigned int type = hdr->icmp6_type;
3144
3145 - return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
3146 + return (data[type >> 5] & (1U << (type & 31))) != 0;
3147 }
3148 - return 0;
3149 + return 1;
3150 }
3151
3152 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
3153 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3154 index c4920ca..2796b37 100644
3155 --- a/net/ipv6/route.c
3156 +++ b/net/ipv6/route.c
3157 @@ -1485,17 +1485,18 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
3158 struct fib6_table *table;
3159 struct net *net = dev_net(rt->dst.dev);
3160
3161 - if (rt == net->ipv6.ip6_null_entry)
3162 - return -ENOENT;
3163 + if (rt == net->ipv6.ip6_null_entry) {
3164 + err = -ENOENT;
3165 + goto out;
3166 + }
3167
3168 table = rt->rt6i_table;
3169 write_lock_bh(&table->tb6_lock);
3170 -
3171 err = fib6_del(rt, info);
3172 - dst_release(&rt->dst);
3173 -
3174 write_unlock_bh(&table->tb6_lock);
3175
3176 +out:
3177 + dst_release(&rt->dst);
3178 return err;
3179 }
3180
3181 diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
3182 index 7446038..5c82907 100644
3183 --- a/net/l2tp/l2tp_eth.c
3184 +++ b/net/l2tp/l2tp_eth.c
3185 @@ -132,7 +132,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
3186 printk("\n");
3187 }
3188
3189 - if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
3190 + if (!pskb_may_pull(skb, ETH_HLEN))
3191 goto error;
3192
3193 secpath_reset(skb);
3194 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
3195 index 06592d8..1b9024e 100644
3196 --- a/net/netrom/af_netrom.c
3197 +++ b/net/netrom/af_netrom.c
3198 @@ -1169,7 +1169,12 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
3199 msg->msg_flags |= MSG_TRUNC;
3200 }
3201
3202 - skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
3203 + er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
3204 + if (er < 0) {
3205 + skb_free_datagram(sk, skb);
3206 + release_sock(sk);
3207 + return er;
3208 + }
3209
3210 if (sax != NULL) {
3211 sax->sax25_family = AF_NETROM;
3212 diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
3213 index 24d94c0..599f67a 100644
3214 --- a/net/sched/sch_cbq.c
3215 +++ b/net/sched/sch_cbq.c
3216 @@ -250,10 +250,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
3217 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
3218 cl = defmap[TC_PRIO_BESTEFFORT];
3219
3220 - if (cl == NULL || cl->level >= head->level)
3221 + if (cl == NULL)
3222 goto fallback;
3223 }
3224 -
3225 + if (cl->level >= head->level)
3226 + goto fallback;
3227 #ifdef CONFIG_NET_CLS_ACT
3228 switch (result) {
3229 case TC_ACT_QUEUED:
3230 diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
3231 index e68cb44..cdd474a 100644
3232 --- a/net/sched/sch_qfq.c
3233 +++ b/net/sched/sch_qfq.c
3234 @@ -830,7 +830,10 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
3235 if (mask) {
3236 struct qfq_group *next = qfq_ffs(q, mask);
3237 if (qfq_gt(roundedF, next->F)) {
3238 - cl->S = next->F;
3239 + if (qfq_gt(limit, next->F))
3240 + cl->S = next->F;
3241 + else /* preserve timestamp correctness */
3242 + cl->S = limit;
3243 return;
3244 }
3245 }
3246 diff --git a/net/sctp/output.c b/net/sctp/output.c
3247 index 8fc4dcd..32ba8d0 100644
3248 --- a/net/sctp/output.c
3249 +++ b/net/sctp/output.c
3250 @@ -334,6 +334,25 @@ finish:
3251 return retval;
3252 }
3253
3254 +static void sctp_packet_release_owner(struct sk_buff *skb)
3255 +{
3256 + sk_free(skb->sk);
3257 +}
3258 +
3259 +static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
3260 +{
3261 + skb_orphan(skb);
3262 + skb->sk = sk;
3263 + skb->destructor = sctp_packet_release_owner;
3264 +
3265 + /*
3266 + * The data chunks have already been accounted for in sctp_sendmsg(),
3267 + * therefore only reserve a single byte to keep socket around until
3268 + * the packet has been transmitted.
3269 + */
3270 + atomic_inc(&sk->sk_wmem_alloc);
3271 +}
3272 +
3273 /* All packets are sent to the network through this function from
3274 * sctp_outq_tail().
3275 *
3276 @@ -375,7 +394,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
3277 /* Set the owning socket so that we know where to get the
3278 * destination IP address.
3279 */
3280 - skb_set_owner_w(nskb, sk);
3281 + sctp_packet_set_owner_w(nskb, sk);
3282
3283 if (!sctp_transport_dst_check(tp)) {
3284 sctp_transport_route(tp, NULL, sctp_sk(sk));
3285 diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
3286 index 54a0dc2..ab2bb42 100644
3287 --- a/net/xfrm/xfrm_input.c
3288 +++ b/net/xfrm/xfrm_input.c
3289 @@ -212,7 +212,7 @@ resume:
3290 /* only the first xfrm gets the encap type */
3291 encap_type = 0;
3292
3293 - if (async && x->repl->check(x, skb, seq)) {
3294 + if (async && x->repl->recheck(x, skb, seq)) {
3295 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
3296 goto drop_unlock;
3297 }
3298 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
3299 index a15d2a0..71c80c7 100644
3300 --- a/net/xfrm/xfrm_policy.c
3301 +++ b/net/xfrm/xfrm_policy.c
3302 @@ -1761,7 +1761,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family,
3303
3304 if (!afinfo) {
3305 dst_release(dst_orig);
3306 - ret = ERR_PTR(-EINVAL);
3307 + return ERR_PTR(-EINVAL);
3308 } else {
3309 ret = afinfo->blackhole_route(net, dst_orig);
3310 }
3311 diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
3312 index 2f6d11d..3efb07d 100644
3313 --- a/net/xfrm/xfrm_replay.c
3314 +++ b/net/xfrm/xfrm_replay.c
3315 @@ -420,6 +420,18 @@ err:
3316 return -EINVAL;
3317 }
3318
3319 +static int xfrm_replay_recheck_esn(struct xfrm_state *x,
3320 + struct sk_buff *skb, __be32 net_seq)
3321 +{
3322 + if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi !=
3323 + htonl(xfrm_replay_seqhi(x, net_seq)))) {
3324 + x->stats.replay_window++;
3325 + return -EINVAL;
3326 + }
3327 +
3328 + return xfrm_replay_check_esn(x, skb, net_seq);
3329 +}
3330 +
3331 static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
3332 {
3333 unsigned int bitnr, nr, i;
3334 @@ -479,6 +491,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
3335 static struct xfrm_replay xfrm_replay_legacy = {
3336 .advance = xfrm_replay_advance,
3337 .check = xfrm_replay_check,
3338 + .recheck = xfrm_replay_check,
3339 .notify = xfrm_replay_notify,
3340 .overflow = xfrm_replay_overflow,
3341 };
3342 @@ -486,6 +499,7 @@ static struct xfrm_replay xfrm_replay_legacy = {
3343 static struct xfrm_replay xfrm_replay_bmp = {
3344 .advance = xfrm_replay_advance_bmp,
3345 .check = xfrm_replay_check_bmp,
3346 + .recheck = xfrm_replay_check_bmp,
3347 .notify = xfrm_replay_notify_bmp,
3348 .overflow = xfrm_replay_overflow_bmp,
3349 };
3350 @@ -493,6 +507,7 @@ static struct xfrm_replay xfrm_replay_bmp = {
3351 static struct xfrm_replay xfrm_replay_esn = {
3352 .advance = xfrm_replay_advance_esn,
3353 .check = xfrm_replay_check_esn,
3354 + .recheck = xfrm_replay_recheck_esn,
3355 .notify = xfrm_replay_notify_bmp,
3356 .overflow = xfrm_replay_overflow_esn,
3357 };
3358 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
3359 index 7128dde..c8b903d 100644
3360 --- a/net/xfrm/xfrm_user.c
3361 +++ b/net/xfrm/xfrm_user.c
3362 @@ -123,9 +123,21 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
3363 struct nlattr **attrs)
3364 {
3365 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
3366 + struct xfrm_replay_state_esn *rs;
3367
3368 - if ((p->flags & XFRM_STATE_ESN) && !rt)
3369 - return -EINVAL;
3370 + if (p->flags & XFRM_STATE_ESN) {
3371 + if (!rt)
3372 + return -EINVAL;
3373 +
3374 + rs = nla_data(rt);
3375 +
3376 + if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
3377 + return -EINVAL;
3378 +
3379 + if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
3380 + nla_len(rt) != sizeof(*rs))
3381 + return -EINVAL;
3382 + }
3383
3384 if (!rt)
3385 return 0;
3386 @@ -370,14 +382,15 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
3387 struct nlattr *rp)
3388 {
3389 struct xfrm_replay_state_esn *up;
3390 + int ulen;
3391
3392 if (!replay_esn || !rp)
3393 return 0;
3394
3395 up = nla_data(rp);
3396 + ulen = xfrm_replay_state_esn_len(up);
3397
3398 - if (xfrm_replay_state_esn_len(replay_esn) !=
3399 - xfrm_replay_state_esn_len(up))
3400 + if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
3401 return -EINVAL;
3402
3403 return 0;
3404 @@ -388,22 +401,28 @@ static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn
3405 struct nlattr *rta)
3406 {
3407 struct xfrm_replay_state_esn *p, *pp, *up;
3408 + int klen, ulen;
3409
3410 if (!rta)
3411 return 0;
3412
3413 up = nla_data(rta);
3414 + klen = xfrm_replay_state_esn_len(up);
3415 + ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
3416
3417 - p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
3418 + p = kzalloc(klen, GFP_KERNEL);
3419 if (!p)
3420 return -ENOMEM;
3421
3422 - pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
3423 + pp = kzalloc(klen, GFP_KERNEL);
3424 if (!pp) {
3425 kfree(p);
3426 return -ENOMEM;
3427 }
3428
3429 + memcpy(p, up, ulen);
3430 + memcpy(pp, up, ulen);
3431 +
3432 *replay_esn = p;
3433 *preplay_esn = pp;
3434
3435 @@ -442,10 +461,11 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
3436 * somehow made shareable and move it to xfrm_state.c - JHS
3437 *
3438 */
3439 -static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
3440 +static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
3441 + int update_esn)
3442 {
3443 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
3444 - struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
3445 + struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
3446 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
3447 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
3448 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
3449 @@ -555,7 +575,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
3450 goto error;
3451
3452 /* override default values from above */
3453 - xfrm_update_ae_params(x, attrs);
3454 + xfrm_update_ae_params(x, attrs, 0);
3455
3456 return x;
3457
3458 @@ -689,6 +709,7 @@ out:
3459
3460 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
3461 {
3462 + memset(p, 0, sizeof(*p));
3463 memcpy(&p->id, &x->id, sizeof(p->id));
3464 memcpy(&p->sel, &x->sel, sizeof(p->sel));
3465 memcpy(&p->lft, &x->lft, sizeof(p->lft));
3466 @@ -742,7 +763,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
3467 return -EMSGSIZE;
3468
3469 algo = nla_data(nla);
3470 - strcpy(algo->alg_name, auth->alg_name);
3471 + strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
3472 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
3473 algo->alg_key_len = auth->alg_key_len;
3474
3475 @@ -862,6 +883,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
3476 {
3477 struct xfrm_dump_info info;
3478 struct sk_buff *skb;
3479 + int err;
3480
3481 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3482 if (!skb)
3483 @@ -872,9 +894,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
3484 info.nlmsg_seq = seq;
3485 info.nlmsg_flags = 0;
3486
3487 - if (dump_one_state(x, 0, &info)) {
3488 + err = dump_one_state(x, 0, &info);
3489 + if (err) {
3490 kfree_skb(skb);
3491 - return NULL;
3492 + return ERR_PTR(err);
3493 }
3494
3495 return skb;
3496 @@ -1297,6 +1320,7 @@ static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy
3497
3498 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
3499 {
3500 + memset(p, 0, sizeof(*p));
3501 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
3502 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
3503 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
3504 @@ -1401,6 +1425,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
3505 struct xfrm_user_tmpl *up = &vec[i];
3506 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
3507
3508 + memset(up, 0, sizeof(*up));
3509 memcpy(&up->id, &kp->id, sizeof(up->id));
3510 up->family = kp->encap_family;
3511 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
3512 @@ -1529,6 +1554,7 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
3513 {
3514 struct xfrm_dump_info info;
3515 struct sk_buff *skb;
3516 + int err;
3517
3518 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3519 if (!skb)
3520 @@ -1539,9 +1565,10 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
3521 info.nlmsg_seq = seq;
3522 info.nlmsg_flags = 0;
3523
3524 - if (dump_one_policy(xp, dir, 0, &info) < 0) {
3525 + err = dump_one_policy(xp, dir, 0, &info);
3526 + if (err) {
3527 kfree_skb(skb);
3528 - return NULL;
3529 + return ERR_PTR(err);
3530 }
3531
3532 return skb;
3533 @@ -1794,7 +1821,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
3534 goto out;
3535
3536 spin_lock_bh(&x->lock);
3537 - xfrm_update_ae_params(x, attrs);
3538 + xfrm_update_ae_params(x, attrs, 1);
3539 spin_unlock_bh(&x->lock);
3540
3541 c.event = nlh->nlmsg_type;
3542 diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
3543 index 6a3ee98..978416d 100644
3544 --- a/scripts/Kbuild.include
3545 +++ b/scripts/Kbuild.include
3546 @@ -98,24 +98,24 @@ try-run = $(shell set -e; \
3547 # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
3548
3549 as-option = $(call try-run,\
3550 - $(CC) $(KBUILD_CFLAGS) $(1) -c -xassembler /dev/null -o "$$TMP",$(1),$(2))
3551 + $(CC) $(KBUILD_CFLAGS) $(1) -c -x assembler /dev/null -o "$$TMP",$(1),$(2))
3552
3553 # as-instr
3554 # Usage: cflags-y += $(call as-instr,instr,option1,option2)
3555
3556 as-instr = $(call try-run,\
3557 - printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3))
3558 + printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
3559
3560 # cc-option
3561 # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
3562
3563 cc-option = $(call try-run,\
3564 - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",$(1),$(2))
3565 + $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
3566
3567 # cc-option-yn
3568 # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
3569 cc-option-yn = $(call try-run,\
3570 - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",y,n)
3571 + $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
3572
3573 # cc-option-align
3574 # Prefix align with either -falign or -malign
3575 @@ -125,7 +125,7 @@ cc-option-align = $(subst -functions=0,,\
3576 # cc-disable-warning
3577 # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
3578 cc-disable-warning = $(call try-run,\
3579 - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -xc /dev/null -o "$$TMP",-Wno-$(strip $(1)))
3580 + $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
3581
3582 # cc-version
3583 # Usage gcc-ver := $(call cc-version)
3584 @@ -143,7 +143,7 @@ cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
3585 # cc-ldoption
3586 # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
3587 cc-ldoption = $(call try-run,\
3588 - $(CC) $(1) -nostdlib -xc /dev/null -o "$$TMP",$(1),$(2))
3589 + $(CC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
3590
3591 # ld-option
3592 # Usage: LDFLAGS += $(call ld-option, -X)
3593 @@ -209,7 +209,7 @@ endif
3594 # >$< substitution to preserve $ when reloading .cmd file
3595 # note: when using inline perl scripts [perl -e '...$$t=1;...']
3596 # in $(cmd_xxx) double $$ your perl vars
3597 -make-cmd = $(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1)))))
3598 +make-cmd = $(subst \\,\\\\,$(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1))))))
3599
3600 # Find any prerequisites that is newer than target or that does not exist.
3601 # PHONY targets skipped in both cases.
3602 diff --git a/scripts/gcc-version.sh b/scripts/gcc-version.sh
3603 index debecb5..7f2126d 100644
3604 --- a/scripts/gcc-version.sh
3605 +++ b/scripts/gcc-version.sh
3606 @@ -22,10 +22,10 @@ if [ ${#compiler} -eq 0 ]; then
3607 exit 1
3608 fi
3609
3610 -MAJOR=$(echo __GNUC__ | $compiler -E -xc - | tail -n 1)
3611 -MINOR=$(echo __GNUC_MINOR__ | $compiler -E -xc - | tail -n 1)
3612 +MAJOR=$(echo __GNUC__ | $compiler -E -x c - | tail -n 1)
3613 +MINOR=$(echo __GNUC_MINOR__ | $compiler -E -x c - | tail -n 1)
3614 if [ "x$with_patchlevel" != "x" ] ; then
3615 - PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -xc - | tail -n 1)
3616 + PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -x c - | tail -n 1)
3617 printf "%02d%02d%02d\\n" $MAJOR $MINOR $PATCHLEVEL
3618 else
3619 printf "%02d%02d\\n" $MAJOR $MINOR
3620 diff --git a/scripts/gcc-x86_32-has-stack-protector.sh b/scripts/gcc-x86_32-has-stack-protector.sh
3621 index 29493dc..12dbd0b 100644
3622 --- a/scripts/gcc-x86_32-has-stack-protector.sh
3623 +++ b/scripts/gcc-x86_32-has-stack-protector.sh
3624 @@ -1,6 +1,6 @@
3625 #!/bin/sh
3626
3627 -echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
3628 +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
3629 if [ "$?" -eq "0" ] ; then
3630 echo y
3631 else
3632 diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
3633 index afaec61..973e8c1 100644
3634 --- a/scripts/gcc-x86_64-has-stack-protector.sh
3635 +++ b/scripts/gcc-x86_64-has-stack-protector.sh
3636 @@ -1,6 +1,6 @@
3637 #!/bin/sh
3638
3639 -echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
3640 +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
3641 if [ "$?" -eq "0" ] ; then
3642 echo y
3643 else
3644 diff --git a/scripts/kconfig/check.sh b/scripts/kconfig/check.sh
3645 index fa59cbf..854d9c7 100755
3646 --- a/scripts/kconfig/check.sh
3647 +++ b/scripts/kconfig/check.sh
3648 @@ -1,6 +1,6 @@
3649 #!/bin/sh
3650 # Needed for systems without gettext
3651 -$* -xc -o /dev/null - > /dev/null 2>&1 << EOF
3652 +$* -x c -o /dev/null - > /dev/null 2>&1 << EOF
3653 #include <libintl.h>
3654 int main()
3655 {
3656 diff --git a/scripts/kconfig/lxdialog/check-lxdialog.sh b/scripts/kconfig/lxdialog/check-lxdialog.sh
3657 index 82cc3a8..50df490 100644
3658 --- a/scripts/kconfig/lxdialog/check-lxdialog.sh
3659 +++ b/scripts/kconfig/lxdialog/check-lxdialog.sh
3660 @@ -38,7 +38,7 @@ trap "rm -f $tmp" 0 1 2 3 15
3661
3662 # Check if we can link to ncurses
3663 check() {
3664 - $cc -xc - -o $tmp 2>/dev/null <<'EOF'
3665 + $cc -x c - -o $tmp 2>/dev/null <<'EOF'
3666 #include CURSES_LOC
3667 main() {}
3668 EOF
3669 diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
3670 index bccf07dd..3346f42 100644
3671 --- a/scripts/kconfig/streamline_config.pl
3672 +++ b/scripts/kconfig/streamline_config.pl
3673 @@ -463,6 +463,8 @@ while(<CIN>) {
3674 if (defined($configs{$1})) {
3675 if ($localyesconfig) {
3676 $setconfigs{$1} = 'y';
3677 + print "$1=y\n";
3678 + next;
3679 } else {
3680 $setconfigs{$1} = $2;
3681 }
3682 diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
3683 index ad079b6..bdc963e 100644
3684 --- a/sound/drivers/aloop.c
3685 +++ b/sound/drivers/aloop.c
3686 @@ -119,6 +119,7 @@ struct loopback_pcm {
3687 unsigned int period_size_frac;
3688 unsigned long last_jiffies;
3689 struct timer_list timer;
3690 + spinlock_t timer_lock;
3691 };
3692
3693 static struct platform_device *devices[SNDRV_CARDS];
3694 @@ -169,6 +170,7 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
3695 unsigned long tick;
3696 unsigned int rate_shift = get_rate_shift(dpcm);
3697
3698 + spin_lock(&dpcm->timer_lock);
3699 if (rate_shift != dpcm->pcm_rate_shift) {
3700 dpcm->pcm_rate_shift = rate_shift;
3701 dpcm->period_size_frac = frac_pos(dpcm, dpcm->pcm_period_size);
3702 @@ -181,12 +183,15 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
3703 tick = (tick + dpcm->pcm_bps - 1) / dpcm->pcm_bps;
3704 dpcm->timer.expires = jiffies + tick;
3705 add_timer(&dpcm->timer);
3706 + spin_unlock(&dpcm->timer_lock);
3707 }
3708
3709 static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
3710 {
3711 + spin_lock(&dpcm->timer_lock);
3712 del_timer(&dpcm->timer);
3713 dpcm->timer.expires = 0;
3714 + spin_unlock(&dpcm->timer_lock);
3715 }
3716
3717 #define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK)
3718 @@ -659,6 +664,7 @@ static int loopback_open(struct snd_pcm_substream *substream)
3719 dpcm->substream = substream;
3720 setup_timer(&dpcm->timer, loopback_timer_function,
3721 (unsigned long)dpcm);
3722 + spin_lock_init(&dpcm->timer_lock);
3723
3724 cable = loopback->cables[substream->number][dev];
3725 if (!cable) {
3726 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3727 index 52e7a45..e7cb4bd 100644
3728 --- a/sound/pci/hda/patch_realtek.c
3729 +++ b/sound/pci/hda/patch_realtek.c
3730 @@ -6307,6 +6307,12 @@ static int patch_alc269(struct hda_codec *codec)
3731 if (err < 0)
3732 goto error;
3733
3734 + alc_pick_fixup(codec, alc269_fixup_models,
3735 + alc269_fixup_tbl, alc269_fixups);
3736 + alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
3737 +
3738 + alc_auto_parse_customize_define(codec);
3739 +
3740 if (codec->vendor_id == 0x10ec0269) {
3741 spec->codec_variant = ALC269_TYPE_ALC269VA;
3742 switch (alc_get_coef0(codec) & 0x00f0) {
3743 @@ -6331,12 +6337,6 @@ static int patch_alc269(struct hda_codec *codec)
3744 alc269_fill_coef(codec);
3745 }
3746
3747 - alc_pick_fixup(codec, alc269_fixup_models,
3748 - alc269_fixup_tbl, alc269_fixups);
3749 - alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
3750 -
3751 - alc_auto_parse_customize_define(codec);
3752 -
3753 /* automatic parse from the BIOS config */
3754 err = alc269_parse_auto_config(codec);
3755 if (err < 0)
3756 diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
3757 index b9567bc..757a52a 100644
3758 --- a/sound/soc/codecs/wm9712.c
3759 +++ b/sound/soc/codecs/wm9712.c
3760 @@ -146,7 +146,7 @@ SOC_SINGLE("Playback Attenuate (-6dB) Switch", AC97_MASTER_TONE, 6, 1, 0),
3761 SOC_SINGLE("Bass Volume", AC97_MASTER_TONE, 8, 15, 1),
3762 SOC_SINGLE("Treble Volume", AC97_MASTER_TONE, 0, 15, 1),
3763
3764 -SOC_SINGLE("Capture ADC Switch", AC97_REC_GAIN, 15, 1, 1),
3765 +SOC_SINGLE("Capture Switch", AC97_REC_GAIN, 15, 1, 1),
3766 SOC_ENUM("Capture Volume Steps", wm9712_enum[6]),
3767 SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 1),
3768 SOC_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0),
3769 diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
3770 index ab23869..8a818a4 100644
3771 --- a/sound/usb/mixer.c
3772 +++ b/sound/usb/mixer.c
3773 @@ -1247,6 +1247,13 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
3774 /* disable non-functional volume control */
3775 master_bits &= ~UAC_CONTROL_BIT(UAC_FU_VOLUME);
3776 break;
3777 + case USB_ID(0x1130, 0xf211):
3778 + snd_printk(KERN_INFO
3779 + "usbmixer: volume control quirk for Tenx TP6911 Audio Headset\n");
3780 + /* disable non-functional volume control */
3781 + channels = 0;
3782 + break;
3783 +
3784 }
3785 if (channels > 0)
3786 first_ch_bits = snd_usb_combine_bytes(bmaControls + csize, csize);
3787 diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
3788 index d89ab4c..63128cd 100644
3789 --- a/sound/usb/quirks-table.h
3790 +++ b/sound/usb/quirks-table.h
3791 @@ -2751,6 +2751,59 @@ YAMAHA_DEVICE(0x7010, "UB99"),
3792 }
3793 },
3794
3795 +/* Microsoft XboxLive Headset/Xbox Communicator */
3796 +{
3797 + USB_DEVICE(0x045e, 0x0283),
3798 + .bInterfaceClass = USB_CLASS_PER_INTERFACE,
3799 + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
3800 + .vendor_name = "Microsoft",
3801 + .product_name = "XboxLive Headset/Xbox Communicator",
3802 + .ifnum = QUIRK_ANY_INTERFACE,
3803 + .type = QUIRK_COMPOSITE,
3804 + .data = &(const struct snd_usb_audio_quirk[]) {
3805 + {
3806 + /* playback */
3807 + .ifnum = 0,
3808 + .type = QUIRK_AUDIO_FIXED_ENDPOINT,
3809 + .data = &(const struct audioformat) {
3810 + .formats = SNDRV_PCM_FMTBIT_S16_LE,
3811 + .channels = 1,
3812 + .iface = 0,
3813 + .altsetting = 0,
3814 + .altset_idx = 0,
3815 + .attributes = 0,
3816 + .endpoint = 0x04,
3817 + .ep_attr = 0x05,
3818 + .rates = SNDRV_PCM_RATE_CONTINUOUS,
3819 + .rate_min = 22050,
3820 + .rate_max = 22050
3821 + }
3822 + },
3823 + {
3824 + /* capture */
3825 + .ifnum = 1,
3826 + .type = QUIRK_AUDIO_FIXED_ENDPOINT,
3827 + .data = &(const struct audioformat) {
3828 + .formats = SNDRV_PCM_FMTBIT_S16_LE,
3829 + .channels = 1,
3830 + .iface = 1,
3831 + .altsetting = 0,
3832 + .altset_idx = 0,
3833 + .attributes = 0,
3834 + .endpoint = 0x85,
3835 + .ep_attr = 0x05,
3836 + .rates = SNDRV_PCM_RATE_CONTINUOUS,
3837 + .rate_min = 16000,
3838 + .rate_max = 16000
3839 + }
3840 + },
3841 + {
3842 + .ifnum = -1
3843 + }
3844 + }
3845 + }
3846 +},
3847 +
3848 {
3849 /*
3850 * Some USB MIDI devices don't have an audio control interface,
3851 diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
3852 index f759f4f..fd2f922 100644
3853 --- a/tools/lguest/lguest.c
3854 +++ b/tools/lguest/lguest.c
3855 @@ -1299,6 +1299,7 @@ static struct device *new_device(const char *name, u16 type)
3856 dev->feature_len = 0;
3857 dev->num_vq = 0;
3858 dev->running = false;
3859 + dev->next = NULL;
3860
3861 /*
3862 * Append to device list. Prepending to a single-linked list is
3863 diff --git a/tools/perf/Makefile b/tools/perf/Makefile
3864 index 92271d3..c3dd3d4 100644
3865 --- a/tools/perf/Makefile
3866 +++ b/tools/perf/Makefile
3867 @@ -70,7 +70,7 @@ ifeq ($(ARCH),x86_64)
3868 ARCH := x86
3869 IS_X86_64 := 0
3870 ifeq (, $(findstring m32,$(EXTRA_CFLAGS)))
3871 - IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
3872 + IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1)
3873 endif
3874 ifeq (${IS_X86_64}, 1)
3875 RAW_ARCH := x86_64
3876 diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
3877 index a93e06c..cf397bd 100644
3878 --- a/tools/power/cpupower/Makefile
3879 +++ b/tools/power/cpupower/Makefile
3880 @@ -111,7 +111,7 @@ GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo $(OUTPUT)po/$$HLANG.gmo;
3881 export CROSS CC AR STRIP RANLIB CFLAGS LDFLAGS LIB_OBJS
3882
3883 # check if compiler option is supported
3884 -cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
3885 +cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
3886
3887 # use '-Os' optimization if available, else use -O2
3888 OPTIMIZATION := $(call cc-supports,-Os,-O2)