Magellan Linux

Contents of /trunk/kernel-alx/patches-3.4/0107-3.4.8-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1945 - (show annotations) (download)
Wed Nov 14 15:23:43 2012 UTC (11 years, 5 months ago) by niro
File size: 152086 byte(s)
3.4.18-alx-r1
1 diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
2 index 03f7897..286ec04 100644
3 --- a/Documentation/sound/alsa/HD-Audio-Models.txt
4 +++ b/Documentation/sound/alsa/HD-Audio-Models.txt
5 @@ -21,10 +21,11 @@ ALC267/268
6 ==========
7 N/A
8
9 -ALC269
10 +ALC269/270/275/276/280/282
11 ======
12 laptop-amic Laptops with analog-mic input
13 laptop-dmic Laptops with digital-mic input
14 + lenovo-dock Enables docking station I/O for some Lenovos
15
16 ALC662/663/272
17 ==============
18 diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
19 index 4a7b54b..b0714d8 100644
20 --- a/Documentation/stable_kernel_rules.txt
21 +++ b/Documentation/stable_kernel_rules.txt
22 @@ -1,4 +1,4 @@
23 -Everything you ever wanted to know about Linux 2.6 -stable releases.
24 +Everything you ever wanted to know about Linux -stable releases.
25
26 Rules on what kind of patches are accepted, and which ones are not, into the
27 "-stable" tree:
28 @@ -42,10 +42,10 @@ Procedure for submitting patches to the -stable tree:
29 cherry-picked than this can be specified in the following format in
30 the sign-off area:
31
32 - Cc: <stable@vger.kernel.org> # .32.x: a1f84a3: sched: Check for idle
33 - Cc: <stable@vger.kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
34 - Cc: <stable@vger.kernel.org> # .32.x: fd21073: sched: Fix affinity logic
35 - Cc: <stable@vger.kernel.org> # .32.x
36 + Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
37 + Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
38 + Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
39 + Cc: <stable@vger.kernel.org> # 3.3.x
40 Signed-off-by: Ingo Molnar <mingo@elte.hu>
41
42 The tag sequence has the meaning of:
43 @@ -79,6 +79,15 @@ Review cycle:
44 security kernel team, and not go through the normal review cycle.
45 Contact the kernel security team for more details on this procedure.
46
47 +Trees:
48 +
49 + - The queues of patches, for both completed versions and in progress
50 + versions can be found at:
51 + http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git
52 + - The finalized and tagged releases of all stable kernels can be found
53 + in separate branches per version at:
54 + http://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git
55 +
56
57 Review committee:
58
59 diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c
60 index de6d464..d8f6dbf 100644
61 --- a/arch/arm/mach-omap2/opp.c
62 +++ b/arch/arm/mach-omap2/opp.c
63 @@ -53,7 +53,7 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
64 omap_table_init = 1;
65
66 /* Lets now register with OPP library */
67 - for (i = 0; i < opp_def_size; i++) {
68 + for (i = 0; i < opp_def_size; i++, opp_def++) {
69 struct omap_hwmod *oh;
70 struct device *dev;
71
72 @@ -86,7 +86,6 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
73 __func__, opp_def->freq,
74 opp_def->hwmod_name, i, r);
75 }
76 - opp_def++;
77 }
78
79 return 0;
80 diff --git a/arch/m68k/include/asm/entry.h b/arch/m68k/include/asm/entry.h
81 index 622138d..34c2520 100644
82 --- a/arch/m68k/include/asm/entry.h
83 +++ b/arch/m68k/include/asm/entry.h
84 @@ -33,8 +33,8 @@
85
86 /* the following macro is used when enabling interrupts */
87 #if defined(MACH_ATARI_ONLY)
88 - /* block out HSYNC on the atari */
89 -#define ALLOWINT (~0x400)
90 + /* block out HSYNC = ipl 2 on the atari */
91 +#define ALLOWINT (~0x500)
92 #define MAX_NOINT_IPL 3
93 #else
94 /* portable version */
95 diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
96 index 8623f8d..9a5932e 100644
97 --- a/arch/m68k/kernel/sys_m68k.c
98 +++ b/arch/m68k/kernel/sys_m68k.c
99 @@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
100 goto bad_access;
101 }
102
103 - mem_value = *mem;
104 + /*
105 + * No need to check for EFAULT; we know that the page is
106 + * present and writable.
107 + */
108 + __get_user(mem_value, mem);
109 if (mem_value == oldval)
110 - *mem = newval;
111 + __put_user(newval, mem);
112
113 pte_unmap_unlock(pte, ptl);
114 up_read(&mm->mmap_sem);
115 diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
116 index 84d0639..b77f56b 100644
117 --- a/arch/mips/kernel/kspd.c
118 +++ b/arch/mips/kernel/kspd.c
119 @@ -323,7 +323,7 @@ static void sp_cleanup(void)
120 fdt = files_fdtable(files);
121 for (;;) {
122 unsigned long set;
123 - i = j * __NFDBITS;
124 + i = j * BITS_PER_LONG;
125 if (i >= fdt->max_fds)
126 break;
127 set = fdt->open_fds[j++];
128 diff --git a/arch/powerpc/boot/dts/p1022ds.dtsi b/arch/powerpc/boot/dts/p1022ds.dtsi
129 index 7cdb505..1b0673e 100644
130 --- a/arch/powerpc/boot/dts/p1022ds.dtsi
131 +++ b/arch/powerpc/boot/dts/p1022ds.dtsi
132 @@ -33,22 +33,6 @@
133 */
134
135 &board_lbc {
136 - /*
137 - * This node is used to access the pixis via "indirect" mode,
138 - * which is done by writing the pixis register index to chip
139 - * select 0 and the value to/from chip select 1. Indirect
140 - * mode is the only way to access the pixis when DIU video
141 - * is enabled. Note that this assumes that the first column
142 - * of the 'ranges' property above is the chip select number.
143 - */
144 - board-control@0,0 {
145 - compatible = "fsl,p1022ds-indirect-pixis";
146 - reg = <0x0 0x0 1 /* CS0 */
147 - 0x1 0x0 1>; /* CS1 */
148 - interrupt-parent = <&mpic>;
149 - interrupts = <8 0 0 0>;
150 - };
151 -
152 nor@0,0 {
153 #address-cells = <1>;
154 #size-cells = <1>;
155 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
156 index 9d7f0fb..cae0ed7 100644
157 --- a/arch/powerpc/include/asm/reg.h
158 +++ b/arch/powerpc/include/asm/reg.h
159 @@ -1022,7 +1022,8 @@
160 /* Macros for setting and retrieving special purpose registers */
161 #ifndef __ASSEMBLY__
162 #define mfmsr() ({unsigned long rval; \
163 - asm volatile("mfmsr %0" : "=r" (rval)); rval;})
164 + asm volatile("mfmsr %0" : "=r" (rval) : \
165 + : "memory"); rval;})
166 #ifdef CONFIG_PPC_BOOK3S_64
167 #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
168 : : "r" (v) : "memory")
169 diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
170 index bf99cfa..6324008 100644
171 --- a/arch/powerpc/kernel/ftrace.c
172 +++ b/arch/powerpc/kernel/ftrace.c
173 @@ -245,9 +245,9 @@ __ftrace_make_nop(struct module *mod,
174
175 /*
176 * On PPC32 the trampoline looks like:
177 - * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
178 - * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
179 - * 0x7d, 0x69, 0x03, 0xa6 mtctr r11
180 + * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
181 + * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
182 + * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
183 * 0x4e, 0x80, 0x04, 0x20 bctr
184 */
185
186 @@ -262,9 +262,9 @@ __ftrace_make_nop(struct module *mod,
187 pr_devel(" %08x %08x ", jmp[0], jmp[1]);
188
189 /* verify that this is what we expect it to be */
190 - if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
191 - ((jmp[1] & 0xffff0000) != 0x396b0000) ||
192 - (jmp[2] != 0x7d6903a6) ||
193 + if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
194 + ((jmp[1] & 0xffff0000) != 0x398c0000) ||
195 + (jmp[2] != 0x7d8903a6) ||
196 (jmp[3] != 0x4e800420)) {
197 printk(KERN_ERR "Not a trampoline\n");
198 return -EINVAL;
199 diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
200 index f700c81..978330c 100644
201 --- a/arch/powerpc/platforms/85xx/p1022_ds.c
202 +++ b/arch/powerpc/platforms/85xx/p1022_ds.c
203 @@ -27,6 +27,7 @@
204 #include <sysdev/fsl_pci.h>
205 #include <asm/udbg.h>
206 #include <asm/fsl_guts.h>
207 +#include <asm/fsl_lbc.h>
208 #include "smp.h"
209
210 #include "mpc85xx.h"
211 @@ -142,17 +143,73 @@ static void p1022ds_set_gamma_table(enum fsl_diu_monitor_port port,
212 {
213 }
214
215 +struct fsl_law {
216 + u32 lawbar;
217 + u32 reserved1;
218 + u32 lawar;
219 + u32 reserved[5];
220 +};
221 +
222 +#define LAWBAR_MASK 0x00F00000
223 +#define LAWBAR_SHIFT 12
224 +
225 +#define LAWAR_EN 0x80000000
226 +#define LAWAR_TGT_MASK 0x01F00000
227 +#define LAW_TRGT_IF_LBC (0x04 << 20)
228 +
229 +#define LAWAR_MASK (LAWAR_EN | LAWAR_TGT_MASK)
230 +#define LAWAR_MATCH (LAWAR_EN | LAW_TRGT_IF_LBC)
231 +
232 +#define BR_BA 0xFFFF8000
233 +
234 +/*
235 + * Map a BRx value to a physical address
236 + *
237 + * The localbus BRx registers only store the lower 32 bits of the address. To
238 + * obtain the upper four bits, we need to scan the LAW table. The entry which
239 + * maps to the localbus will contain the upper four bits.
240 + */
241 +static phys_addr_t lbc_br_to_phys(const void *ecm, unsigned int count, u32 br)
242 +{
243 +#ifndef CONFIG_PHYS_64BIT
244 + /*
245 + * If we only have 32-bit addressing, then the BRx address *is* the
246 + * physical address.
247 + */
248 + return br & BR_BA;
249 +#else
250 + const struct fsl_law *law = ecm + 0xc08;
251 + unsigned int i;
252 +
253 + for (i = 0; i < count; i++) {
254 + u64 lawbar = in_be32(&law[i].lawbar);
255 + u32 lawar = in_be32(&law[i].lawar);
256 +
257 + if ((lawar & LAWAR_MASK) == LAWAR_MATCH)
258 + /* Extract the upper four bits */
259 + return (br & BR_BA) | ((lawbar & LAWBAR_MASK) << 12);
260 + }
261 +
262 + return 0;
263 +#endif
264 +}
265 +
266 /**
267 * p1022ds_set_monitor_port: switch the output to a different monitor port
268 - *
269 */
270 static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
271 {
272 struct device_node *guts_node;
273 - struct device_node *indirect_node = NULL;
274 + struct device_node *lbc_node = NULL;
275 + struct device_node *law_node = NULL;
276 struct ccsr_guts __iomem *guts;
277 + struct fsl_lbc_regs *lbc = NULL;
278 + void *ecm = NULL;
279 u8 __iomem *lbc_lcs0_ba = NULL;
280 u8 __iomem *lbc_lcs1_ba = NULL;
281 + phys_addr_t cs0_addr, cs1_addr;
282 + const __be32 *iprop;
283 + unsigned int num_laws;
284 u8 b;
285
286 /* Map the global utilities registers. */
287 @@ -168,25 +225,43 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
288 goto exit;
289 }
290
291 - indirect_node = of_find_compatible_node(NULL, NULL,
292 - "fsl,p1022ds-indirect-pixis");
293 - if (!indirect_node) {
294 - pr_err("p1022ds: missing pixis indirect mode node\n");
295 + lbc_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc");
296 + if (!lbc_node) {
297 + pr_err("p1022ds: missing localbus node\n");
298 goto exit;
299 }
300
301 - lbc_lcs0_ba = of_iomap(indirect_node, 0);
302 - if (!lbc_lcs0_ba) {
303 - pr_err("p1022ds: could not map localbus chip select 0\n");
304 + lbc = of_iomap(lbc_node, 0);
305 + if (!lbc) {
306 + pr_err("p1022ds: could not map localbus node\n");
307 goto exit;
308 }
309
310 - lbc_lcs1_ba = of_iomap(indirect_node, 1);
311 - if (!lbc_lcs1_ba) {
312 - pr_err("p1022ds: could not map localbus chip select 1\n");
313 + law_node = of_find_compatible_node(NULL, NULL, "fsl,ecm-law");
314 + if (!law_node) {
315 + pr_err("p1022ds: missing local access window node\n");
316 goto exit;
317 }
318
319 + ecm = of_iomap(law_node, 0);
320 + if (!ecm) {
321 + pr_err("p1022ds: could not map local access window node\n");
322 + goto exit;
323 + }
324 +
325 + iprop = of_get_property(law_node, "fsl,num-laws", 0);
326 + if (!iprop) {
327 + pr_err("p1022ds: LAW node is missing fsl,num-laws property\n");
328 + goto exit;
329 + }
330 + num_laws = be32_to_cpup(iprop);
331 +
332 + cs0_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[0].br));
333 + cs1_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[1].br));
334 +
335 + lbc_lcs0_ba = ioremap(cs0_addr, 1);
336 + lbc_lcs1_ba = ioremap(cs1_addr, 1);
337 +
338 /* Make sure we're in indirect mode first. */
339 if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) !=
340 PMUXCR_ELBCDIU_DIU) {
341 @@ -254,10 +329,15 @@ exit:
342 iounmap(lbc_lcs1_ba);
343 if (lbc_lcs0_ba)
344 iounmap(lbc_lcs0_ba);
345 + if (lbc)
346 + iounmap(lbc);
347 + if (ecm)
348 + iounmap(ecm);
349 if (guts)
350 iounmap(guts);
351
352 - of_node_put(indirect_node);
353 + of_node_put(law_node);
354 + of_node_put(lbc_node);
355 of_node_put(guts_node);
356 }
357
358 diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
359 index 4cb375c..fb50631 100644
360 --- a/arch/powerpc/platforms/pseries/eeh_event.c
361 +++ b/arch/powerpc/platforms/pseries/eeh_event.c
362 @@ -85,8 +85,10 @@ static int eeh_event_handler(void * dummy)
363 set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */
364 edev = handle_eeh_events(event);
365
366 - eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
367 - pci_dev_put(edev->pdev);
368 + if (edev) {
369 + eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
370 + pci_dev_put(edev->pdev);
371 + }
372
373 kfree(event);
374 mutex_unlock(&eeh_event_mutex);
375 diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
376 index 5d09e40..5d211f7 100644
377 --- a/arch/s390/include/asm/mmu_context.h
378 +++ b/arch/s390/include/asm/mmu_context.h
379 @@ -13,7 +13,6 @@
380 #include <asm/uaccess.h>
381 #include <asm/tlbflush.h>
382 #include <asm/ctl_reg.h>
383 -#include <asm-generic/mm_hooks.h>
384
385 static inline int init_new_context(struct task_struct *tsk,
386 struct mm_struct *mm)
387 @@ -93,4 +92,17 @@ static inline void activate_mm(struct mm_struct *prev,
388 switch_mm(prev, next, current);
389 }
390
391 +static inline void arch_dup_mmap(struct mm_struct *oldmm,
392 + struct mm_struct *mm)
393 +{
394 +#ifdef CONFIG_64BIT
395 + if (oldmm->context.asce_limit < mm->context.asce_limit)
396 + crst_table_downgrade(mm, oldmm->context.asce_limit);
397 +#endif
398 +}
399 +
400 +static inline void arch_exit_mmap(struct mm_struct *mm)
401 +{
402 +}
403 +
404 #endif /* __S390_MMU_CONTEXT_H */
405 diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
406 index d499b30..8b6f62e 100644
407 --- a/arch/s390/include/asm/processor.h
408 +++ b/arch/s390/include/asm/processor.h
409 @@ -129,7 +129,9 @@ struct stack_frame {
410 regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
411 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
412 regs->gprs[15] = new_stackp; \
413 + __tlb_flush_mm(current->mm); \
414 crst_table_downgrade(current->mm, 1UL << 31); \
415 + update_mm(current->mm, current); \
416 } while (0)
417
418 /* Forward declaration, a strange C thing */
419 diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
420 index 6e0073e..07c7bf4 100644
421 --- a/arch/s390/kernel/processor.c
422 +++ b/arch/s390/kernel/processor.c
423 @@ -26,12 +26,14 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
424 void __cpuinit cpu_init(void)
425 {
426 struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
427 + struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
428
429 get_cpu_id(id);
430 atomic_inc(&init_mm.mm_count);
431 current->active_mm = &init_mm;
432 BUG_ON(current->mm);
433 enter_lazy_tlb(&init_mm, current);
434 + memset(idle, 0, sizeof(*idle));
435 }
436
437 /*
438 diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
439 index 1f77227..c7b8822 100644
440 --- a/arch/s390/kernel/smp.c
441 +++ b/arch/s390/kernel/smp.c
442 @@ -1034,14 +1034,11 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
443 unsigned int cpu = (unsigned int)(long)hcpu;
444 struct cpu *c = &pcpu_devices[cpu].cpu;
445 struct device *s = &c->dev;
446 - struct s390_idle_data *idle;
447 int err = 0;
448
449 switch (action) {
450 case CPU_ONLINE:
451 case CPU_ONLINE_FROZEN:
452 - idle = &per_cpu(s390_idle, cpu);
453 - memset(idle, 0, sizeof(struct s390_idle_data));
454 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
455 break;
456 case CPU_DEAD:
457 diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
458 index 4e66860..f2b11ee 100644
459 --- a/arch/s390/mm/fault.c
460 +++ b/arch/s390/mm/fault.c
461 @@ -443,6 +443,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
462 struct pt_regs regs;
463 int access, fault;
464
465 + /* Emulate a uaccess fault from kernel mode. */
466 regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
467 if (!irqs_disabled())
468 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
469 @@ -452,12 +453,12 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
470 regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
471 access = write ? VM_WRITE : VM_READ;
472 fault = do_exception(&regs, access);
473 - if (unlikely(fault)) {
474 - if (fault & VM_FAULT_OOM)
475 - return -EFAULT;
476 - else if (fault & VM_FAULT_SIGBUS)
477 - do_sigbus(&regs);
478 - }
479 + /*
480 + * Since the fault happened in kernel mode while performing a uaccess
481 + * all we need to do now is emulating a fixup in case "fault" is not
482 + * zero.
483 + * For the calling uaccess functions this results always in -EFAULT.
484 + */
485 return fault ? -EFAULT : 0;
486 }
487
488 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
489 index 2857c48..a64fe53 100644
490 --- a/arch/s390/mm/mmap.c
491 +++ b/arch/s390/mm/mmap.c
492 @@ -105,9 +105,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
493
494 int s390_mmap_check(unsigned long addr, unsigned long len)
495 {
496 + int rc;
497 +
498 if (!is_compat_task() &&
499 - len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
500 - return crst_table_upgrade(current->mm, 1UL << 53);
501 + len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
502 + rc = crst_table_upgrade(current->mm, 1UL << 53);
503 + if (rc)
504 + return rc;
505 + update_mm(current->mm, current);
506 + }
507 return 0;
508 }
509
510 @@ -127,6 +133,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
511 rc = crst_table_upgrade(mm, 1UL << 53);
512 if (rc)
513 return (unsigned long) rc;
514 + update_mm(mm, current);
515 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
516 }
517 return area;
518 @@ -149,6 +156,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
519 rc = crst_table_upgrade(mm, 1UL << 53);
520 if (rc)
521 return (unsigned long) rc;
522 + update_mm(mm, current);
523 area = arch_get_unmapped_area_topdown(filp, addr, len,
524 pgoff, flags);
525 }
526 diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
527 index 6e765bf..87f0efd 100644
528 --- a/arch/s390/mm/pgtable.c
529 +++ b/arch/s390/mm/pgtable.c
530 @@ -85,7 +85,6 @@ repeat:
531 crst_table_free(mm, table);
532 if (mm->context.asce_limit < limit)
533 goto repeat;
534 - update_mm(mm, current);
535 return 0;
536 }
537
538 @@ -93,9 +92,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
539 {
540 pgd_t *pgd;
541
542 - if (mm->context.asce_limit <= limit)
543 - return;
544 - __tlb_flush_mm(mm);
545 while (mm->context.asce_limit > limit) {
546 pgd = mm->pgd;
547 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
548 @@ -118,7 +114,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
549 mm->task_size = mm->context.asce_limit;
550 crst_table_free(mm, (unsigned long *) pgd);
551 }
552 - update_mm(mm, current);
553 }
554 #endif
555
556 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
557 index 61604ae..0d2db0e 100644
558 --- a/arch/x86/kernel/cpu/mcheck/mce.c
559 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
560 @@ -1180,6 +1180,7 @@ void mce_notify_process(void)
561 {
562 unsigned long pfn;
563 struct mce_info *mi = mce_find_info();
564 + int flags = MF_ACTION_REQUIRED;
565
566 if (!mi)
567 mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
568 @@ -1194,8 +1195,9 @@ void mce_notify_process(void)
569 * doomed. We still need to mark the page as poisoned and alert any
570 * other users of the page.
571 */
572 - if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 ||
573 - mi->restartable == 0) {
574 + if (!mi->restartable)
575 + flags |= MF_MUST_KILL;
576 + if (memory_failure(pfn, MCE_VECTOR, flags) < 0) {
577 pr_err("Memory error not recovered");
578 force_sig(SIGBUS, current);
579 }
580 diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
581 index 6512b20..d1fcbc0 100644
582 --- a/drivers/acpi/ac.c
583 +++ b/drivers/acpi/ac.c
584 @@ -292,7 +292,9 @@ static int acpi_ac_add(struct acpi_device *device)
585 ac->charger.properties = ac_props;
586 ac->charger.num_properties = ARRAY_SIZE(ac_props);
587 ac->charger.get_property = get_ac_property;
588 - power_supply_register(&ac->device->dev, &ac->charger);
589 + result = power_supply_register(&ac->device->dev, &ac->charger);
590 + if (result)
591 + goto end;
592
593 printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
594 acpi_device_name(device), acpi_device_bid(device),
595 diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
596 index 6686b1e..00a7836 100644
597 --- a/drivers/acpi/apei/apei-base.c
598 +++ b/drivers/acpi/apei/apei-base.c
599 @@ -586,6 +586,11 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
600 }
601 *access_bit_width = 1UL << (access_size_code + 2);
602
603 + /* Fixup common BIOS bug */
604 + if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
605 + *access_bit_width < 32)
606 + *access_bit_width = 32;
607 +
608 if ((bit_width + bit_offset) > *access_bit_width) {
609 pr_warning(FW_BUG APEI_PFX
610 "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
611 diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
612 index 3085f9b..f7eff25 100644
613 --- a/drivers/base/power/main.c
614 +++ b/drivers/base/power/main.c
615 @@ -979,8 +979,16 @@ static int dpm_suspend_late(pm_message_t state)
616 int dpm_suspend_end(pm_message_t state)
617 {
618 int error = dpm_suspend_late(state);
619 + if (error)
620 + return error;
621
622 - return error ? : dpm_suspend_noirq(state);
623 + error = dpm_suspend_noirq(state);
624 + if (error) {
625 + dpm_resume_early(state);
626 + return error;
627 + }
628 +
629 + return 0;
630 }
631 EXPORT_SYMBOL_GPL(dpm_suspend_end);
632
633 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
634 index ad7c732..08427ab 100644
635 --- a/drivers/char/tpm/tpm.c
636 +++ b/drivers/char/tpm/tpm.c
637 @@ -827,10 +827,10 @@ EXPORT_SYMBOL_GPL(tpm_pcr_extend);
638 int tpm_do_selftest(struct tpm_chip *chip)
639 {
640 int rc;
641 - u8 digest[TPM_DIGEST_SIZE];
642 unsigned int loops;
643 unsigned int delay_msec = 1000;
644 unsigned long duration;
645 + struct tpm_cmd_t cmd;
646
647 duration = tpm_calc_ordinal_duration(chip,
648 TPM_ORD_CONTINUE_SELFTEST);
649 @@ -845,7 +845,15 @@ int tpm_do_selftest(struct tpm_chip *chip)
650 return rc;
651
652 do {
653 - rc = __tpm_pcr_read(chip, 0, digest);
654 + /* Attempt to read a PCR value */
655 + cmd.header.in = pcrread_header;
656 + cmd.params.pcrread_in.pcr_idx = cpu_to_be32(0);
657 + rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE);
658 +
659 + if (rc < TPM_HEADER_SIZE)
660 + return -EFAULT;
661 +
662 + rc = be32_to_cpu(cmd.header.out.return_code);
663 if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
664 dev_info(chip->dev,
665 "TPM is disabled/deactivated (0x%X)\n", rc);
666 diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
667 index abc3662..219850d 100644
668 --- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
669 +++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
670 @@ -119,9 +119,9 @@ dispatch_dma:
671 // mthd 0x030c-0x0340, various stuff
672 .b16 0xc3 14
673 .b32 #ctx_src_address_high ~0x000000ff
674 -.b32 #ctx_src_address_low ~0xfffffff0
675 +.b32 #ctx_src_address_low ~0xffffffff
676 .b32 #ctx_dst_address_high ~0x000000ff
677 -.b32 #ctx_dst_address_low ~0xfffffff0
678 +.b32 #ctx_dst_address_low ~0xffffffff
679 .b32 #ctx_src_pitch ~0x0007ffff
680 .b32 #ctx_dst_pitch ~0x0007ffff
681 .b32 #ctx_xcnt ~0x0000ffff
682 diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
683 index 1f33fbd..37d6de3 100644
684 --- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
685 +++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
686 @@ -1,37 +1,72 @@
687 -uint32_t nva3_pcopy_data[] = {
688 +u32 nva3_pcopy_data[] = {
689 +/* 0x0000: ctx_object */
690 0x00000000,
691 +/* 0x0004: ctx_dma */
692 +/* 0x0004: ctx_dma_query */
693 0x00000000,
694 +/* 0x0008: ctx_dma_src */
695 0x00000000,
696 +/* 0x000c: ctx_dma_dst */
697 0x00000000,
698 +/* 0x0010: ctx_query_address_high */
699 0x00000000,
700 +/* 0x0014: ctx_query_address_low */
701 0x00000000,
702 +/* 0x0018: ctx_query_counter */
703 0x00000000,
704 +/* 0x001c: ctx_src_address_high */
705 0x00000000,
706 +/* 0x0020: ctx_src_address_low */
707 0x00000000,
708 +/* 0x0024: ctx_src_pitch */
709 0x00000000,
710 +/* 0x0028: ctx_src_tile_mode */
711 0x00000000,
712 +/* 0x002c: ctx_src_xsize */
713 0x00000000,
714 +/* 0x0030: ctx_src_ysize */
715 0x00000000,
716 +/* 0x0034: ctx_src_zsize */
717 0x00000000,
718 +/* 0x0038: ctx_src_zoff */
719 0x00000000,
720 +/* 0x003c: ctx_src_xoff */
721 0x00000000,
722 +/* 0x0040: ctx_src_yoff */
723 0x00000000,
724 +/* 0x0044: ctx_src_cpp */
725 0x00000000,
726 +/* 0x0048: ctx_dst_address_high */
727 0x00000000,
728 +/* 0x004c: ctx_dst_address_low */
729 0x00000000,
730 +/* 0x0050: ctx_dst_pitch */
731 0x00000000,
732 +/* 0x0054: ctx_dst_tile_mode */
733 0x00000000,
734 +/* 0x0058: ctx_dst_xsize */
735 0x00000000,
736 +/* 0x005c: ctx_dst_ysize */
737 0x00000000,
738 +/* 0x0060: ctx_dst_zsize */
739 0x00000000,
740 +/* 0x0064: ctx_dst_zoff */
741 0x00000000,
742 +/* 0x0068: ctx_dst_xoff */
743 0x00000000,
744 +/* 0x006c: ctx_dst_yoff */
745 0x00000000,
746 +/* 0x0070: ctx_dst_cpp */
747 0x00000000,
748 +/* 0x0074: ctx_format */
749 0x00000000,
750 +/* 0x0078: ctx_swz_const0 */
751 0x00000000,
752 +/* 0x007c: ctx_swz_const1 */
753 0x00000000,
754 +/* 0x0080: ctx_xcnt */
755 0x00000000,
756 +/* 0x0084: ctx_ycnt */
757 0x00000000,
758 0x00000000,
759 0x00000000,
760 @@ -63,6 +98,7 @@ uint32_t nva3_pcopy_data[] = {
761 0x00000000,
762 0x00000000,
763 0x00000000,
764 +/* 0x0100: dispatch_table */
765 0x00010000,
766 0x00000000,
767 0x00000000,
768 @@ -73,6 +109,7 @@ uint32_t nva3_pcopy_data[] = {
769 0x00010162,
770 0x00000000,
771 0x00030060,
772 +/* 0x0128: dispatch_dma */
773 0x00010170,
774 0x00000000,
775 0x00010170,
776 @@ -118,11 +155,11 @@ uint32_t nva3_pcopy_data[] = {
777 0x0000001c,
778 0xffffff00,
779 0x00000020,
780 - 0x0000000f,
781 + 0x00000000,
782 0x00000048,
783 0xffffff00,
784 0x0000004c,
785 - 0x0000000f,
786 + 0x00000000,
787 0x00000024,
788 0xfff80000,
789 0x00000050,
790 @@ -146,7 +183,8 @@ uint32_t nva3_pcopy_data[] = {
791 0x00000800,
792 };
793
794 -uint32_t nva3_pcopy_code[] = {
795 +u32 nva3_pcopy_code[] = {
796 +/* 0x0000: main */
797 0x04fe04bd,
798 0x3517f000,
799 0xf10010fe,
800 @@ -158,23 +196,31 @@ uint32_t nva3_pcopy_code[] = {
801 0x17f11031,
802 0x27f01200,
803 0x0012d003,
804 +/* 0x002f: spin */
805 0xf40031f4,
806 0x0ef40028,
807 +/* 0x0035: ih */
808 0x8001cffd,
809 0xf40812c4,
810 0x21f4060b,
811 +/* 0x0041: ih_no_chsw */
812 0x0412c472,
813 0xf4060bf4,
814 +/* 0x004a: ih_no_cmd */
815 0x11c4c321,
816 0x4001d00c,
817 +/* 0x0052: swctx */
818 0x47f101f8,
819 0x4bfe7700,
820 0x0007fe00,
821 0xf00204b9,
822 0x01f40643,
823 0x0604fa09,
824 +/* 0x006b: swctx_load */
825 0xfa060ef4,
826 +/* 0x006e: swctx_done */
827 0x03f80504,
828 +/* 0x0072: chsw */
829 0x27f100f8,
830 0x23cf1400,
831 0x1e3fc800,
832 @@ -183,18 +229,22 @@ uint32_t nva3_pcopy_code[] = {
833 0x1e3af052,
834 0xf00023d0,
835 0x24d00147,
836 +/* 0x0093: chsw_no_unload */
837 0xcf00f880,
838 0x3dc84023,
839 0x220bf41e,
840 0xf40131f4,
841 0x57f05221,
842 0x0367f004,
843 +/* 0x00a8: chsw_load_ctx_dma */
844 0xa07856bc,
845 0xb6018068,
846 0x87d00884,
847 0x0162b600,
848 +/* 0x00bb: chsw_finish_load */
849 0xf0f018f4,
850 0x23d00237,
851 +/* 0x00c3: dispatch */
852 0xf100f880,
853 0xcf190037,
854 0x33cf4032,
855 @@ -202,6 +252,7 @@ uint32_t nva3_pcopy_code[] = {
856 0x1024b607,
857 0x010057f1,
858 0x74bd64bd,
859 +/* 0x00dc: dispatch_loop */
860 0x58005658,
861 0x50b60157,
862 0x0446b804,
863 @@ -211,6 +262,7 @@ uint32_t nva3_pcopy_code[] = {
864 0xb60276bb,
865 0x57bb0374,
866 0xdf0ef400,
867 +/* 0x0100: dispatch_valid_mthd */
868 0xb60246bb,
869 0x45bb0344,
870 0x01459800,
871 @@ -220,31 +272,41 @@ uint32_t nva3_pcopy_code[] = {
872 0xb0014658,
873 0x1bf40064,
874 0x00538009,
875 +/* 0x0127: dispatch_cmd */
876 0xf4300ef4,
877 0x55f90132,
878 0xf40c01f4,
879 +/* 0x0132: dispatch_invalid_bitfield */
880 0x25f0250e,
881 +/* 0x0135: dispatch_illegal_mthd */
882 0x0125f002,
883 +/* 0x0138: dispatch_error */
884 0x100047f1,
885 0xd00042d0,
886 0x27f04043,
887 0x0002d040,
888 +/* 0x0148: hostirq_wait */
889 0xf08002cf,
890 0x24b04024,
891 0xf71bf400,
892 +/* 0x0154: dispatch_done */
893 0x1d0027f1,
894 0xd00137f0,
895 0x00f80023,
896 +/* 0x0160: cmd_nop */
897 +/* 0x0162: cmd_pm_trigger */
898 0x27f100f8,
899 0x34bd2200,
900 0xd00233f0,
901 0x00f80023,
902 +/* 0x0170: cmd_dma */
903 0x012842b7,
904 0xf00145b6,
905 0x43801e39,
906 0x0040b701,
907 0x0644b606,
908 0xf80043d0,
909 +/* 0x0189: cmd_exec_set_format */
910 0xf030f400,
911 0xb00001b0,
912 0x01b00101,
913 @@ -256,20 +318,26 @@ uint32_t nva3_pcopy_code[] = {
914 0x70b63847,
915 0x0232f401,
916 0x94bd84bd,
917 +/* 0x01b4: ncomp_loop */
918 0xb60f4ac4,
919 0xb4bd0445,
920 +/* 0x01bc: bpc_loop */
921 0xf404a430,
922 0xa5ff0f18,
923 0x00cbbbc0,
924 0xf40231f4,
925 +/* 0x01ce: cmp_c0 */
926 0x1bf4220e,
927 0x10c7f00c,
928 0xf400cbbb,
929 +/* 0x01da: cmp_c1 */
930 0xa430160e,
931 0x0c18f406,
932 0xbb14c7f0,
933 0x0ef400cb,
934 +/* 0x01e9: cmp_zero */
935 0x80c7f107,
936 +/* 0x01ed: bpc_next */
937 0x01c83800,
938 0xb60180b6,
939 0xb5b801b0,
940 @@ -280,6 +348,7 @@ uint32_t nva3_pcopy_code[] = {
941 0x98110680,
942 0x68fd2008,
943 0x0502f400,
944 +/* 0x0216: dst_xcnt */
945 0x75fd64bd,
946 0x1c078000,
947 0xf10078fd,
948 @@ -304,6 +373,7 @@ uint32_t nva3_pcopy_code[] = {
949 0x980056d0,
950 0x56d01f06,
951 0x1030f440,
952 +/* 0x0276: cmd_exec_set_surface_tiled */
953 0x579800f8,
954 0x6879c70a,
955 0xb66478c7,
956 @@ -311,9 +381,11 @@ uint32_t nva3_pcopy_code[] = {
957 0x0e76b060,
958 0xf0091bf4,
959 0x0ef40477,
960 +/* 0x0291: xtile64 */
961 0x027cf00f,
962 0xfd1170b6,
963 0x77f00947,
964 +/* 0x029d: xtileok */
965 0x0f5a9806,
966 0xfd115b98,
967 0xb7f000ab,
968 @@ -371,6 +443,7 @@ uint32_t nva3_pcopy_code[] = {
969 0x67d00600,
970 0x0060b700,
971 0x0068d004,
972 +/* 0x0382: cmd_exec_set_surface_linear */
973 0x6cf000f8,
974 0x0260b702,
975 0x0864b602,
976 @@ -381,13 +454,16 @@ uint32_t nva3_pcopy_code[] = {
977 0xb70067d0,
978 0x98040060,
979 0x67d00957,
980 +/* 0x03ab: cmd_exec_wait */
981 0xf900f800,
982 0xf110f900,
983 0xb6080007,
984 +/* 0x03b6: loop */
985 0x01cf0604,
986 0x0114f000,
987 0xfcfa1bf4,
988 0xf800fc10,
989 +/* 0x03c5: cmd_exec_query */
990 0x0d34c800,
991 0xf5701bf4,
992 0xf103ab21,
993 @@ -417,6 +493,7 @@ uint32_t nva3_pcopy_code[] = {
994 0x47f10153,
995 0x44b60800,
996 0x0045d006,
997 +/* 0x0438: query_counter */
998 0x03ab21f5,
999 0x080c47f1,
1000 0x980644b6,
1001 @@ -439,11 +516,13 @@ uint32_t nva3_pcopy_code[] = {
1002 0x47f10153,
1003 0x44b60800,
1004 0x0045d006,
1005 +/* 0x0492: cmd_exec */
1006 0x21f500f8,
1007 0x3fc803ab,
1008 0x0e0bf400,
1009 0x018921f5,
1010 0x020047f1,
1011 +/* 0x04a7: cmd_exec_no_format */
1012 0xf11e0ef4,
1013 0xb6081067,
1014 0x77f00664,
1015 @@ -451,19 +530,24 @@ uint32_t nva3_pcopy_code[] = {
1016 0x981c0780,
1017 0x67d02007,
1018 0x4067d000,
1019 +/* 0x04c2: cmd_exec_init_src_surface */
1020 0x32f444bd,
1021 0xc854bd02,
1022 0x0bf4043f,
1023 0x8221f50a,
1024 0x0a0ef403,
1025 +/* 0x04d4: src_tiled */
1026 0x027621f5,
1027 +/* 0x04db: cmd_exec_init_dst_surface */
1028 0xf40749f0,
1029 0x57f00231,
1030 0x083fc82c,
1031 0xf50a0bf4,
1032 0xf4038221,
1033 +/* 0x04ee: dst_tiled */
1034 0x21f50a0e,
1035 0x49f00276,
1036 +/* 0x04f5: cmd_exec_kick */
1037 0x0057f108,
1038 0x0654b608,
1039 0xd0210698,
1040 @@ -473,6 +557,8 @@ uint32_t nva3_pcopy_code[] = {
1041 0xc80054d0,
1042 0x0bf40c3f,
1043 0xc521f507,
1044 +/* 0x0519: cmd_exec_done */
1045 +/* 0x051b: cmd_wrcache_flush */
1046 0xf100f803,
1047 0xbd220027,
1048 0x0133f034,
1049 diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
1050 index a8d1745..cd879f3 100644
1051 --- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
1052 +++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
1053 @@ -1,34 +1,65 @@
1054 -uint32_t nvc0_pcopy_data[] = {
1055 +u32 nvc0_pcopy_data[] = {
1056 +/* 0x0000: ctx_object */
1057 0x00000000,
1058 +/* 0x0004: ctx_query_address_high */
1059 0x00000000,
1060 +/* 0x0008: ctx_query_address_low */
1061 0x00000000,
1062 +/* 0x000c: ctx_query_counter */
1063 0x00000000,
1064 +/* 0x0010: ctx_src_address_high */
1065 0x00000000,
1066 +/* 0x0014: ctx_src_address_low */
1067 0x00000000,
1068 +/* 0x0018: ctx_src_pitch */
1069 0x00000000,
1070 +/* 0x001c: ctx_src_tile_mode */
1071 0x00000000,
1072 +/* 0x0020: ctx_src_xsize */
1073 0x00000000,
1074 +/* 0x0024: ctx_src_ysize */
1075 0x00000000,
1076 +/* 0x0028: ctx_src_zsize */
1077 0x00000000,
1078 +/* 0x002c: ctx_src_zoff */
1079 0x00000000,
1080 +/* 0x0030: ctx_src_xoff */
1081 0x00000000,
1082 +/* 0x0034: ctx_src_yoff */
1083 0x00000000,
1084 +/* 0x0038: ctx_src_cpp */
1085 0x00000000,
1086 +/* 0x003c: ctx_dst_address_high */
1087 0x00000000,
1088 +/* 0x0040: ctx_dst_address_low */
1089 0x00000000,
1090 +/* 0x0044: ctx_dst_pitch */
1091 0x00000000,
1092 +/* 0x0048: ctx_dst_tile_mode */
1093 0x00000000,
1094 +/* 0x004c: ctx_dst_xsize */
1095 0x00000000,
1096 +/* 0x0050: ctx_dst_ysize */
1097 0x00000000,
1098 +/* 0x0054: ctx_dst_zsize */
1099 0x00000000,
1100 +/* 0x0058: ctx_dst_zoff */
1101 0x00000000,
1102 +/* 0x005c: ctx_dst_xoff */
1103 0x00000000,
1104 +/* 0x0060: ctx_dst_yoff */
1105 0x00000000,
1106 +/* 0x0064: ctx_dst_cpp */
1107 0x00000000,
1108 +/* 0x0068: ctx_format */
1109 0x00000000,
1110 +/* 0x006c: ctx_swz_const0 */
1111 0x00000000,
1112 +/* 0x0070: ctx_swz_const1 */
1113 0x00000000,
1114 +/* 0x0074: ctx_xcnt */
1115 0x00000000,
1116 +/* 0x0078: ctx_ycnt */
1117 0x00000000,
1118 0x00000000,
1119 0x00000000,
1120 @@ -63,6 +94,7 @@ uint32_t nvc0_pcopy_data[] = {
1121 0x00000000,
1122 0x00000000,
1123 0x00000000,
1124 +/* 0x0100: dispatch_table */
1125 0x00010000,
1126 0x00000000,
1127 0x00000000,
1128 @@ -111,11 +143,11 @@ uint32_t nvc0_pcopy_data[] = {
1129 0x00000010,
1130 0xffffff00,
1131 0x00000014,
1132 - 0x0000000f,
1133 + 0x00000000,
1134 0x0000003c,
1135 0xffffff00,
1136 0x00000040,
1137 - 0x0000000f,
1138 + 0x00000000,
1139 0x00000018,
1140 0xfff80000,
1141 0x00000044,
1142 @@ -139,7 +171,8 @@ uint32_t nvc0_pcopy_data[] = {
1143 0x00000800,
1144 };
1145
1146 -uint32_t nvc0_pcopy_code[] = {
1147 +u32 nvc0_pcopy_code[] = {
1148 +/* 0x0000: main */
1149 0x04fe04bd,
1150 0x3517f000,
1151 0xf10010fe,
1152 @@ -151,15 +184,20 @@ uint32_t nvc0_pcopy_code[] = {
1153 0x17f11031,
1154 0x27f01200,
1155 0x0012d003,
1156 +/* 0x002f: spin */
1157 0xf40031f4,
1158 0x0ef40028,
1159 +/* 0x0035: ih */
1160 0x8001cffd,
1161 0xf40812c4,
1162 0x21f4060b,
1163 +/* 0x0041: ih_no_chsw */
1164 0x0412c4ca,
1165 0xf5070bf4,
1166 +/* 0x004b: ih_no_cmd */
1167 0xc4010221,
1168 0x01d00c11,
1169 +/* 0x0053: swctx */
1170 0xf101f840,
1171 0xfe770047,
1172 0x47f1004b,
1173 @@ -188,8 +226,11 @@ uint32_t nvc0_pcopy_code[] = {
1174 0xf00204b9,
1175 0x01f40643,
1176 0x0604fa09,
1177 +/* 0x00c3: swctx_load */
1178 0xfa060ef4,
1179 +/* 0x00c6: swctx_done */
1180 0x03f80504,
1181 +/* 0x00ca: chsw */
1182 0x27f100f8,
1183 0x23cf1400,
1184 0x1e3fc800,
1185 @@ -198,18 +239,22 @@ uint32_t nvc0_pcopy_code[] = {
1186 0x1e3af053,
1187 0xf00023d0,
1188 0x24d00147,
1189 +/* 0x00eb: chsw_no_unload */
1190 0xcf00f880,
1191 0x3dc84023,
1192 0x090bf41e,
1193 0xf40131f4,
1194 +/* 0x00fa: chsw_finish_load */
1195 0x37f05321,
1196 0x8023d002,
1197 +/* 0x0102: dispatch */
1198 0x37f100f8,
1199 0x32cf1900,
1200 0x0033cf40,
1201 0x07ff24e4,
1202 0xf11024b6,
1203 0xbd010057,
1204 +/* 0x011b: dispatch_loop */
1205 0x5874bd64,
1206 0x57580056,
1207 0x0450b601,
1208 @@ -219,6 +264,7 @@ uint32_t nvc0_pcopy_code[] = {
1209 0xbb0f08f4,
1210 0x74b60276,
1211 0x0057bb03,
1212 +/* 0x013f: dispatch_valid_mthd */
1213 0xbbdf0ef4,
1214 0x44b60246,
1215 0x0045bb03,
1216 @@ -229,24 +275,33 @@ uint32_t nvc0_pcopy_code[] = {
1217 0x64b00146,
1218 0x091bf400,
1219 0xf4005380,
1220 +/* 0x0166: dispatch_cmd */
1221 0x32f4300e,
1222 0xf455f901,
1223 0x0ef40c01,
1224 +/* 0x0171: dispatch_invalid_bitfield */
1225 0x0225f025,
1226 +/* 0x0174: dispatch_illegal_mthd */
1227 +/* 0x0177: dispatch_error */
1228 0xf10125f0,
1229 0xd0100047,
1230 0x43d00042,
1231 0x4027f040,
1232 +/* 0x0187: hostirq_wait */
1233 0xcf0002d0,
1234 0x24f08002,
1235 0x0024b040,
1236 +/* 0x0193: dispatch_done */
1237 0xf1f71bf4,
1238 0xf01d0027,
1239 0x23d00137,
1240 +/* 0x019f: cmd_nop */
1241 0xf800f800,
1242 +/* 0x01a1: cmd_pm_trigger */
1243 0x0027f100,
1244 0xf034bd22,
1245 0x23d00233,
1246 +/* 0x01af: cmd_exec_set_format */
1247 0xf400f800,
1248 0x01b0f030,
1249 0x0101b000,
1250 @@ -258,20 +313,26 @@ uint32_t nvc0_pcopy_code[] = {
1251 0x3847c701,
1252 0xf40170b6,
1253 0x84bd0232,
1254 +/* 0x01da: ncomp_loop */
1255 0x4ac494bd,
1256 0x0445b60f,
1257 +/* 0x01e2: bpc_loop */
1258 0xa430b4bd,
1259 0x0f18f404,
1260 0xbbc0a5ff,
1261 0x31f400cb,
1262 0x220ef402,
1263 +/* 0x01f4: cmp_c0 */
1264 0xf00c1bf4,
1265 0xcbbb10c7,
1266 0x160ef400,
1267 +/* 0x0200: cmp_c1 */
1268 0xf406a430,
1269 0xc7f00c18,
1270 0x00cbbb14,
1271 +/* 0x020f: cmp_zero */
1272 0xf1070ef4,
1273 +/* 0x0213: bpc_next */
1274 0x380080c7,
1275 0x80b601c8,
1276 0x01b0b601,
1277 @@ -283,6 +344,7 @@ uint32_t nvc0_pcopy_code[] = {
1278 0x1d08980e,
1279 0xf40068fd,
1280 0x64bd0502,
1281 +/* 0x023c: dst_xcnt */
1282 0x800075fd,
1283 0x78fd1907,
1284 0x1057f100,
1285 @@ -307,15 +369,18 @@ uint32_t nvc0_pcopy_code[] = {
1286 0x1c069800,
1287 0xf44056d0,
1288 0x00f81030,
1289 +/* 0x029c: cmd_exec_set_surface_tiled */
1290 0xc7075798,
1291 0x78c76879,
1292 0x0380b664,
1293 0xb06077c7,
1294 0x1bf40e76,
1295 0x0477f009,
1296 +/* 0x02b7: xtile64 */
1297 0xf00f0ef4,
1298 0x70b6027c,
1299 0x0947fd11,
1300 +/* 0x02c3: xtileok */
1301 0x980677f0,
1302 0x5b980c5a,
1303 0x00abfd0e,
1304 @@ -374,6 +439,7 @@ uint32_t nvc0_pcopy_code[] = {
1305 0xb70067d0,
1306 0xd0040060,
1307 0x00f80068,
1308 +/* 0x03a8: cmd_exec_set_surface_linear */
1309 0xb7026cf0,
1310 0xb6020260,
1311 0x57980864,
1312 @@ -384,12 +450,15 @@ uint32_t nvc0_pcopy_code[] = {
1313 0x0060b700,
1314 0x06579804,
1315 0xf80067d0,
1316 +/* 0x03d1: cmd_exec_wait */
1317 0xf900f900,
1318 0x0007f110,
1319 0x0604b608,
1320 +/* 0x03dc: loop */
1321 0xf00001cf,
1322 0x1bf40114,
1323 0xfc10fcfa,
1324 +/* 0x03eb: cmd_exec_query */
1325 0xc800f800,
1326 0x1bf40d34,
1327 0xd121f570,
1328 @@ -419,6 +488,7 @@ uint32_t nvc0_pcopy_code[] = {
1329 0x0153f026,
1330 0x080047f1,
1331 0xd00644b6,
1332 +/* 0x045e: query_counter */
1333 0x21f50045,
1334 0x47f103d1,
1335 0x44b6080c,
1336 @@ -442,11 +512,13 @@ uint32_t nvc0_pcopy_code[] = {
1337 0x080047f1,
1338 0xd00644b6,
1339 0x00f80045,
1340 +/* 0x04b8: cmd_exec */
1341 0x03d121f5,
1342 0xf4003fc8,
1343 0x21f50e0b,
1344 0x47f101af,
1345 0x0ef40200,
1346 +/* 0x04cd: cmd_exec_no_format */
1347 0x1067f11e,
1348 0x0664b608,
1349 0x800177f0,
1350 @@ -454,18 +526,23 @@ uint32_t nvc0_pcopy_code[] = {
1351 0x1d079819,
1352 0xd00067d0,
1353 0x44bd4067,
1354 +/* 0x04e8: cmd_exec_init_src_surface */
1355 0xbd0232f4,
1356 0x043fc854,
1357 0xf50a0bf4,
1358 0xf403a821,
1359 +/* 0x04fa: src_tiled */
1360 0x21f50a0e,
1361 0x49f0029c,
1362 +/* 0x0501: cmd_exec_init_dst_surface */
1363 0x0231f407,
1364 0xc82c57f0,
1365 0x0bf4083f,
1366 0xa821f50a,
1367 0x0a0ef403,
1368 +/* 0x0514: dst_tiled */
1369 0x029c21f5,
1370 +/* 0x051b: cmd_exec_kick */
1371 0xf10849f0,
1372 0xb6080057,
1373 0x06980654,
1374 @@ -475,7 +552,9 @@ uint32_t nvc0_pcopy_code[] = {
1375 0x54d00546,
1376 0x0c3fc800,
1377 0xf5070bf4,
1378 +/* 0x053f: cmd_exec_done */
1379 0xf803eb21,
1380 +/* 0x0541: cmd_wrcache_flush */
1381 0x0027f100,
1382 0xf034bd22,
1383 0x23d00133,
1384 diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
1385 index c57d856..886b41f 100644
1386 --- a/drivers/gpu/drm/radeon/atombios_dp.c
1387 +++ b/drivers/gpu/drm/radeon/atombios_dp.c
1388 @@ -22,6 +22,7 @@
1389 *
1390 * Authors: Dave Airlie
1391 * Alex Deucher
1392 + * Jerome Glisse
1393 */
1394 #include "drmP.h"
1395 #include "radeon_drm.h"
1396 @@ -637,7 +638,6 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
1397 ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
1398 link_status, DP_LINK_STATUS_SIZE, 100);
1399 if (ret <= 0) {
1400 - DRM_ERROR("displayport link status failed\n");
1401 return false;
1402 }
1403
1404 @@ -816,8 +816,10 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
1405 else
1406 mdelay(dp_info->rd_interval * 4);
1407
1408 - if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
1409 + if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
1410 + DRM_ERROR("displayport link status failed\n");
1411 break;
1412 + }
1413
1414 if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
1415 clock_recovery = true;
1416 @@ -879,8 +881,10 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
1417 else
1418 mdelay(dp_info->rd_interval * 4);
1419
1420 - if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
1421 + if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
1422 + DRM_ERROR("displayport link status failed\n");
1423 break;
1424 + }
1425
1426 if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
1427 channel_eq = true;
1428 diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
1429 index 2d39f99..a3ae788 100644
1430 --- a/drivers/gpu/drm/radeon/atombios_encoders.c
1431 +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
1432 @@ -1392,10 +1392,18 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1433 case DRM_MODE_DPMS_ON:
1434 /* some early dce3.2 boards have a bug in their transmitter control table */
1435 if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
1436 - ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
1437 + ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
1438 + if (ASIC_IS_DCE6(rdev)) {
1439 + /* It seems we need to call ATOM_ENCODER_CMD_SETUP again
1440 + * before reenabling encoder on DPMS ON, otherwise we never
1441 + * get picture
1442 + */
1443 + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1444 + }
1445 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1446 - else
1447 + } else {
1448 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1449 + }
1450 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1451 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1452 atombios_set_edp_panel_power(connector,
1453 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
1454 index 3c2e7a0..3fb7ca9 100644
1455 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
1456 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
1457 @@ -64,14 +64,33 @@ void radeon_connector_hotplug(struct drm_connector *connector)
1458
1459 /* just deal with DP (not eDP) here. */
1460 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
1461 - int saved_dpms = connector->dpms;
1462 -
1463 - /* Only turn off the display it it's physically disconnected */
1464 - if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
1465 - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1466 - else if (radeon_dp_needs_link_train(radeon_connector))
1467 - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1468 - connector->dpms = saved_dpms;
1469 + struct radeon_connector_atom_dig *dig_connector =
1470 + radeon_connector->con_priv;
1471 +
1472 + /* if existing sink type was not DP no need to retrain */
1473 + if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
1474 + return;
1475 +
1476 + /* first get sink type as it may be reset after (un)plug */
1477 + dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
1478 + /* don't do anything if sink is not display port, i.e.,
1479 + * passive dp->(dvi|hdmi) adaptor
1480 + */
1481 + if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
1482 + int saved_dpms = connector->dpms;
1483 + /* Only turn off the display if it's physically disconnected */
1484 + if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
1485 + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1486 + } else if (radeon_dp_needs_link_train(radeon_connector)) {
1487 + /* set it to OFF so that drm_helper_connector_dpms()
1488 + * won't return immediately since the current state
1489 + * is ON at this point.
1490 + */
1491 + connector->dpms = DRM_MODE_DPMS_OFF;
1492 + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1493 + }
1494 + connector->dpms = saved_dpms;
1495 + }
1496 }
1497 }
1498
1499 diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
1500 index 2418cf6..cf723c4 100644
1501 --- a/drivers/gpu/drm/radeon/radeon_cs.c
1502 +++ b/drivers/gpu/drm/radeon/radeon_cs.c
1503 @@ -377,7 +377,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
1504 if (r) {
1505 DRM_ERROR("Failed to schedule IB !\n");
1506 }
1507 - return 0;
1508 + return r;
1509 }
1510
1511 static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
1512 diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
1513 index 42acc64..711e95a 100644
1514 --- a/drivers/gpu/drm/radeon/radeon_cursor.c
1515 +++ b/drivers/gpu/drm/radeon/radeon_cursor.c
1516 @@ -262,8 +262,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
1517 if (!(cursor_end & 0x7f))
1518 w--;
1519 }
1520 - if (w <= 0)
1521 + if (w <= 0) {
1522 w = 1;
1523 + cursor_end = x - xorigin + w;
1524 + if (!(cursor_end & 0x7f)) {
1525 + x--;
1526 + WARN_ON_ONCE(x < 0);
1527 + }
1528 + }
1529 }
1530 }
1531
1532 diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
1533 index df6a4db..80c6e8b 100644
1534 --- a/drivers/gpu/drm/radeon/radeon_object.c
1535 +++ b/drivers/gpu/drm/radeon/radeon_object.c
1536 @@ -136,7 +136,6 @@ int radeon_bo_create(struct radeon_device *rdev,
1537 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
1538 sizeof(struct radeon_bo));
1539
1540 -retry:
1541 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
1542 if (bo == NULL)
1543 return -ENOMEM;
1544 @@ -150,6 +149,8 @@ retry:
1545 bo->surface_reg = -1;
1546 INIT_LIST_HEAD(&bo->list);
1547 INIT_LIST_HEAD(&bo->va);
1548 +
1549 +retry:
1550 radeon_ttm_placement_from_domain(bo, domain);
1551 /* Kernel allocation are uninterruptible */
1552 mutex_lock(&rdev->vram_mutex);
1553 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1554 index dfe7d37..57ed244 100644
1555 --- a/drivers/iommu/amd_iommu.c
1556 +++ b/drivers/iommu/amd_iommu.c
1557 @@ -2254,6 +2254,18 @@ static int device_change_notifier(struct notifier_block *nb,
1558
1559 iommu_init_device(dev);
1560
1561 + /*
1562 + * dev_data is still NULL and
1563 + * got initialized in iommu_init_device
1564 + */
1565 + dev_data = get_dev_data(dev);
1566 +
1567 + if (iommu_pass_through || dev_data->iommu_v2) {
1568 + dev_data->passthrough = true;
1569 + attach_device(dev, pt_domain);
1570 + break;
1571 + }
1572 +
1573 domain = domain_for_device(dev);
1574
1575 /* allocate a protection domain if a device is added */
1576 @@ -2271,10 +2283,7 @@ static int device_change_notifier(struct notifier_block *nb,
1577
1578 dev_data = get_dev_data(dev);
1579
1580 - if (!dev_data->passthrough)
1581 - dev->archdata.dma_ops = &amd_iommu_dma_ops;
1582 - else
1583 - dev->archdata.dma_ops = &nommu_dma_ops;
1584 + dev->archdata.dma_ops = &amd_iommu_dma_ops;
1585
1586 break;
1587 case BUS_NOTIFY_DEL_DEVICE:
1588 diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
1589 index 036fe9b..a1f1bc8 100644
1590 --- a/drivers/iommu/amd_iommu_v2.c
1591 +++ b/drivers/iommu/amd_iommu_v2.c
1592 @@ -681,6 +681,8 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
1593
1594 atomic_set(&pasid_state->count, 1);
1595 init_waitqueue_head(&pasid_state->wq);
1596 + spin_lock_init(&pasid_state->lock);
1597 +
1598 pasid_state->task = task;
1599 pasid_state->mm = get_task_mm(task);
1600 pasid_state->device_state = dev_state;
1601 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1602 index e0a0ebe..1555f0b 100644
1603 --- a/drivers/md/dm-thin.c
1604 +++ b/drivers/md/dm-thin.c
1605 @@ -19,7 +19,7 @@
1606 /*
1607 * Tunable constants
1608 */
1609 -#define ENDIO_HOOK_POOL_SIZE 10240
1610 +#define ENDIO_HOOK_POOL_SIZE 1024
1611 #define DEFERRED_SET_SIZE 64
1612 #define MAPPING_POOL_SIZE 1024
1613 #define PRISON_CELLS 1024
1614 @@ -855,7 +855,7 @@ static void process_prepared_mapping(struct new_mapping *m)
1615
1616 if (m->err) {
1617 cell_error(m->cell);
1618 - return;
1619 + goto out;
1620 }
1621
1622 /*
1623 @@ -867,7 +867,7 @@ static void process_prepared_mapping(struct new_mapping *m)
1624 if (r) {
1625 DMERR("dm_thin_insert_block() failed");
1626 cell_error(m->cell);
1627 - return;
1628 + goto out;
1629 }
1630
1631 /*
1632 @@ -882,6 +882,7 @@ static void process_prepared_mapping(struct new_mapping *m)
1633 } else
1634 cell_defer(tc, m->cell, m->data_block);
1635
1636 +out:
1637 list_del(&m->list);
1638 mempool_free(m, tc->pool->mapping_pool);
1639 }
1640 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
1641 index 69ef0be..504da71 100644
1642 --- a/drivers/mmc/host/sdhci-pci.c
1643 +++ b/drivers/mmc/host/sdhci-pci.c
1644 @@ -157,6 +157,7 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = {
1645 static const struct sdhci_pci_fixes sdhci_cafe = {
1646 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
1647 SDHCI_QUIRK_NO_BUSY_IRQ |
1648 + SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1649 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
1650 };
1651
1652 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1653 index ccefdeb..3c403aa 100644
1654 --- a/drivers/mmc/host/sdhci.c
1655 +++ b/drivers/mmc/host/sdhci.c
1656 @@ -27,6 +27,7 @@
1657
1658 #include <linux/mmc/mmc.h>
1659 #include <linux/mmc/host.h>
1660 +#include <linux/mmc/card.h>
1661
1662 #include "sdhci.h"
1663
1664 @@ -1245,6 +1246,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1665 struct sdhci_host *host;
1666 bool present;
1667 unsigned long flags;
1668 + u32 tuning_opcode;
1669
1670 host = mmc_priv(mmc);
1671
1672 @@ -1292,8 +1294,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1673 */
1674 if ((host->flags & SDHCI_NEEDS_RETUNING) &&
1675 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
1676 + /* eMMC uses cmd21 while sd and sdio use cmd19 */
1677 + tuning_opcode = mmc->card->type == MMC_TYPE_MMC ?
1678 + MMC_SEND_TUNING_BLOCK_HS200 :
1679 + MMC_SEND_TUNING_BLOCK;
1680 spin_unlock_irqrestore(&host->lock, flags);
1681 - sdhci_execute_tuning(mmc, mrq->cmd->opcode);
1682 + sdhci_execute_tuning(mmc, tuning_opcode);
1683 spin_lock_irqsave(&host->lock, flags);
1684
1685 /* Restore original mmc_request structure */
1686 diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
1687 index 8a3054b..5de74e7 100644
1688 --- a/drivers/net/caif/caif_serial.c
1689 +++ b/drivers/net/caif/caif_serial.c
1690 @@ -325,6 +325,9 @@ static int ldisc_open(struct tty_struct *tty)
1691
1692 sprintf(name, "cf%s", tty->name);
1693 dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
1694 + if (!dev)
1695 + return -ENOMEM;
1696 +
1697 ser = netdev_priv(dev);
1698 ser->tty = tty_kref_get(tty);
1699 ser->dev = dev;
1700 diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1701 index 1ef0c92..65fe632 100644
1702 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1703 +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1704 @@ -267,7 +267,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
1705 dev_warn(&pdev->dev, "stop mac failed\n");
1706 atl1c_set_aspm(hw, false);
1707 netif_carrier_off(netdev);
1708 - netif_stop_queue(netdev);
1709 atl1c_phy_reset(hw);
1710 atl1c_phy_init(&adapter->hw);
1711 } else {
1712 diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
1713 index 8297e28..b8ade57 100644
1714 --- a/drivers/net/ethernet/broadcom/bnx2.c
1715 +++ b/drivers/net/ethernet/broadcom/bnx2.c
1716 @@ -5372,7 +5372,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
1717 int k, last;
1718
1719 if (skb == NULL) {
1720 - j++;
1721 + j = NEXT_TX_BD(j);
1722 continue;
1723 }
1724
1725 @@ -5384,8 +5384,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
1726 tx_buf->skb = NULL;
1727
1728 last = tx_buf->nr_frags;
1729 - j++;
1730 - for (k = 0; k < last; k++, j++) {
1731 + j = NEXT_TX_BD(j);
1732 + for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
1733 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
1734 dma_unmap_page(&bp->pdev->dev,
1735 dma_unmap_addr(tx_buf, mapping),
1736 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1737 index 1a1b29f..689d2a1 100644
1738 --- a/drivers/net/ethernet/broadcom/tg3.c
1739 +++ b/drivers/net/ethernet/broadcom/tg3.c
1740 @@ -298,6 +298,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
1741 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
1742 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
1743 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
1744 + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
1745 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
1746 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
1747 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
1748 @@ -8948,8 +8949,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1749 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
1750 tg3_flag(tp, 57765_PLUS)) {
1751 val = tr32(TG3_RDMA_RSRVCTRL_REG);
1752 - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1753 - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
1754 + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
1755 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
1756 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1757 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
1758 @@ -12255,10 +12255,12 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
1759 {
1760 struct tg3 *tp = netdev_priv(dev);
1761
1762 - if (!tp->hw_stats)
1763 + spin_lock_bh(&tp->lock);
1764 + if (!tp->hw_stats) {
1765 + spin_unlock_bh(&tp->lock);
1766 return &tp->net_stats_prev;
1767 + }
1768
1769 - spin_lock_bh(&tp->lock);
1770 tg3_get_nstats(tp, stats);
1771 spin_unlock_bh(&tp->lock);
1772
1773 diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
1774 index e7bed53..24381e1 100644
1775 --- a/drivers/net/ethernet/freescale/gianfar.c
1776 +++ b/drivers/net/ethernet/freescale/gianfar.c
1777 @@ -2065,10 +2065,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1778 return NETDEV_TX_OK;
1779 }
1780
1781 - /* Steal sock reference for processing TX time stamps */
1782 - swap(skb_new->sk, skb->sk);
1783 - swap(skb_new->destructor, skb->destructor);
1784 - kfree_skb(skb);
1785 + if (skb->sk)
1786 + skb_set_owner_w(skb_new, skb->sk);
1787 + consume_skb(skb);
1788 skb = skb_new;
1789 }
1790
1791 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
1792 index 161e045..a73bbe7 100644
1793 --- a/drivers/net/ethernet/realtek/r8169.c
1794 +++ b/drivers/net/ethernet/realtek/r8169.c
1795 @@ -5000,7 +5000,6 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
1796 {
1797 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
1798 tp->cur_tx = tp->dirty_tx = 0;
1799 - netdev_reset_queue(tp->dev);
1800 }
1801
1802 static void rtl_reset_work(struct rtl8169_private *tp)
1803 @@ -5155,8 +5154,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
1804
1805 txd->opts2 = cpu_to_le32(opts[1]);
1806
1807 - netdev_sent_queue(dev, skb->len);
1808 -
1809 skb_tx_timestamp(skb);
1810
1811 wmb();
1812 @@ -5253,16 +5250,9 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
1813 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
1814 }
1815
1816 -struct rtl_txc {
1817 - int packets;
1818 - int bytes;
1819 -};
1820 -
1821 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
1822 {
1823 - struct rtl8169_stats *tx_stats = &tp->tx_stats;
1824 unsigned int dirty_tx, tx_left;
1825 - struct rtl_txc txc = { 0, 0 };
1826
1827 dirty_tx = tp->dirty_tx;
1828 smp_rmb();
1829 @@ -5281,24 +5271,17 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
1830 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
1831 tp->TxDescArray + entry);
1832 if (status & LastFrag) {
1833 - struct sk_buff *skb = tx_skb->skb;
1834 -
1835 - txc.packets++;
1836 - txc.bytes += skb->len;
1837 - dev_kfree_skb(skb);
1838 + u64_stats_update_begin(&tp->tx_stats.syncp);
1839 + tp->tx_stats.packets++;
1840 + tp->tx_stats.bytes += tx_skb->skb->len;
1841 + u64_stats_update_end(&tp->tx_stats.syncp);
1842 + dev_kfree_skb(tx_skb->skb);
1843 tx_skb->skb = NULL;
1844 }
1845 dirty_tx++;
1846 tx_left--;
1847 }
1848
1849 - u64_stats_update_begin(&tx_stats->syncp);
1850 - tx_stats->packets += txc.packets;
1851 - tx_stats->bytes += txc.bytes;
1852 - u64_stats_update_end(&tx_stats->syncp);
1853 -
1854 - netdev_completed_queue(dev, txc.packets, txc.bytes);
1855 -
1856 if (tp->dirty_tx != dirty_tx) {
1857 tp->dirty_tx = dirty_tx;
1858 /* Sync with rtl8169_start_xmit:
1859 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1860 index bb8c72c..8f13420 100644
1861 --- a/drivers/net/tun.c
1862 +++ b/drivers/net/tun.c
1863 @@ -358,6 +358,8 @@ static void tun_free_netdev(struct net_device *dev)
1864 {
1865 struct tun_struct *tun = netdev_priv(dev);
1866
1867 + BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags));
1868 +
1869 sk_release_kernel(tun->socket.sk);
1870 }
1871
1872 @@ -1115,6 +1117,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1873 tun->flags = flags;
1874 tun->txflt.count = 0;
1875 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
1876 + set_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags);
1877
1878 err = -ENOMEM;
1879 sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
1880 @@ -1252,10 +1255,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1881 int vnet_hdr_sz;
1882 int ret;
1883
1884 - if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
1885 + if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
1886 if (copy_from_user(&ifr, argp, ifreq_len))
1887 return -EFAULT;
1888 -
1889 + } else {
1890 + memset(&ifr, 0, sizeof(ifr));
1891 + }
1892 if (cmd == TUNGETFEATURES) {
1893 /* Currently this just means: "what IFF flags are valid?".
1894 * This is needed because we never checked for invalid flags on
1895 diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
1896 index df2a2cf..7a6ccd6 100644
1897 --- a/drivers/net/usb/kaweth.c
1898 +++ b/drivers/net/usb/kaweth.c
1899 @@ -1302,7 +1302,7 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev,
1900 int retv;
1901 int length = 0; /* shut up GCC */
1902
1903 - urb = usb_alloc_urb(0, GFP_NOIO);
1904 + urb = usb_alloc_urb(0, GFP_ATOMIC);
1905 if (!urb)
1906 return -ENOMEM;
1907
1908 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
1909 index bef3f24..8be535f 100644
1910 --- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
1911 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
1912 @@ -191,6 +191,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
1913 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
1914
1915 IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
1916 + sta->addr,
1917 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
1918 "static" :
1919 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
1920 diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
1921 index baf6919..03560a8 100644
1922 --- a/drivers/net/wireless/mwifiex/cfg80211.c
1923 +++ b/drivers/net/wireless/mwifiex/cfg80211.c
1924 @@ -544,9 +544,9 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
1925
1926 /*
1927 * Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid
1928 - * MCS index values for us are 0 to 7.
1929 + * MCS index values for us are 0 to 15.
1930 */
1931 - if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 8)) {
1932 + if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
1933 sinfo->txrate.mcs = priv->tx_rate;
1934 sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
1935 /* 40MHz rate */
1936 diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
1937 index 5601302..f388b65 100644
1938 --- a/drivers/net/wireless/rt2x00/rt2800usb.c
1939 +++ b/drivers/net/wireless/rt2x00/rt2800usb.c
1940 @@ -1137,6 +1137,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
1941 #ifdef CONFIG_RT2800USB_RT33XX
1942 /* Belkin */
1943 { USB_DEVICE(0x050d, 0x945b) },
1944 + /* D-Link */
1945 + { USB_DEVICE(0x2001, 0x3c17) },
1946 /* Panasonic */
1947 { USB_DEVICE(0x083a, 0xb511) },
1948 /* Philips */
1949 @@ -1237,7 +1239,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
1950 /* D-Link */
1951 { USB_DEVICE(0x07d1, 0x3c0b) },
1952 { USB_DEVICE(0x07d1, 0x3c17) },
1953 - { USB_DEVICE(0x2001, 0x3c17) },
1954 /* Encore */
1955 { USB_DEVICE(0x203d, 0x14a1) },
1956 /* Gemtek */
1957 diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
1958 index 28fc5fb..46f7917 100644
1959 --- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
1960 +++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
1961 @@ -3344,21 +3344,21 @@ void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw)
1962 switch (rtlhal->macphymode) {
1963 case DUALMAC_SINGLEPHY:
1964 rtlphy->rf_type = RF_2T2R;
1965 - rtlhal->version |= CHIP_92D_SINGLEPHY;
1966 + rtlhal->version |= RF_TYPE_2T2R;
1967 rtlhal->bandset = BAND_ON_BOTH;
1968 rtlhal->current_bandtype = BAND_ON_2_4G;
1969 break;
1970
1971 case SINGLEMAC_SINGLEPHY:
1972 rtlphy->rf_type = RF_2T2R;
1973 - rtlhal->version |= CHIP_92D_SINGLEPHY;
1974 + rtlhal->version |= RF_TYPE_2T2R;
1975 rtlhal->bandset = BAND_ON_BOTH;
1976 rtlhal->current_bandtype = BAND_ON_2_4G;
1977 break;
1978
1979 case DUALMAC_DUALPHY:
1980 rtlphy->rf_type = RF_1T1R;
1981 - rtlhal->version &= (~CHIP_92D_SINGLEPHY);
1982 + rtlhal->version &= RF_TYPE_1T1R;
1983 /* Now we let MAC0 run on 5G band. */
1984 if (rtlhal->interfaceindex == 0) {
1985 rtlhal->bandset = BAND_ON_5G;
1986 diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
1987 index a6049d7..aa970fc 100644
1988 --- a/drivers/net/wireless/rtlwifi/usb.c
1989 +++ b/drivers/net/wireless/rtlwifi/usb.c
1990 @@ -131,15 +131,19 @@ static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
1991 u8 request;
1992 u16 wvalue;
1993 u16 index;
1994 - __le32 *data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
1995 + __le32 *data;
1996 + unsigned long flags;
1997
1998 + spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags);
1999 + if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
2000 + rtlpriv->usb_data_index = 0;
2001 + data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
2002 + spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
2003 request = REALTEK_USB_VENQT_CMD_REQ;
2004 index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
2005
2006 wvalue = (u16)addr;
2007 _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
2008 - if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
2009 - rtlpriv->usb_data_index = 0;
2010 return le32_to_cpu(*data);
2011 }
2012
2013 @@ -951,6 +955,10 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
2014 GFP_KERNEL);
2015 if (!rtlpriv->usb_data)
2016 return -ENOMEM;
2017 +
2018 + /* this spin lock must be initialized early */
2019 + spin_lock_init(&rtlpriv->locks.usb_lock);
2020 +
2021 rtlpriv->usb_data_index = 0;
2022 init_completion(&rtlpriv->firmware_loading_complete);
2023 SET_IEEE80211_DEV(hw, &intf->dev);
2024 diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
2025 index 28ebc69..717d3ba 100644
2026 --- a/drivers/net/wireless/rtlwifi/wifi.h
2027 +++ b/drivers/net/wireless/rtlwifi/wifi.h
2028 @@ -1555,6 +1555,7 @@ struct rtl_locks {
2029 spinlock_t rf_ps_lock;
2030 spinlock_t rf_lock;
2031 spinlock_t waitq_lock;
2032 + spinlock_t usb_lock;
2033
2034 /*Dual mac*/
2035 spinlock_t cck_and_rw_pagea_lock;
2036 diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
2037 index f859216..951b6cf 100644
2038 --- a/drivers/s390/net/qeth_l3_main.c
2039 +++ b/drivers/s390/net/qeth_l3_main.c
2040 @@ -1818,6 +1818,8 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
2041 QETH_CARD_TEXT(card, 4, "frvaddr4");
2042
2043 netdev = __vlan_find_dev_deep(card->dev, vid);
2044 + if (!netdev)
2045 + return;
2046 in_dev = in_dev_get(netdev);
2047 if (!in_dev)
2048 return;
2049 @@ -1846,6 +1848,8 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
2050 QETH_CARD_TEXT(card, 4, "frvaddr6");
2051
2052 netdev = __vlan_find_dev_deep(card->dev, vid);
2053 + if (!netdev)
2054 + return;
2055 in6_dev = in6_dev_get(netdev);
2056 if (!in6_dev)
2057 return;
2058 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
2059 index a3a056a..b48c24f 100644
2060 --- a/drivers/scsi/hosts.c
2061 +++ b/drivers/scsi/hosts.c
2062 @@ -290,6 +290,7 @@ static void scsi_host_dev_release(struct device *dev)
2063 struct Scsi_Host *shost = dev_to_shost(dev);
2064 struct device *parent = dev->parent;
2065 struct request_queue *q;
2066 + void *queuedata;
2067
2068 scsi_proc_hostdir_rm(shost->hostt);
2069
2070 @@ -299,9 +300,9 @@ static void scsi_host_dev_release(struct device *dev)
2071 destroy_workqueue(shost->work_q);
2072 q = shost->uspace_req_q;
2073 if (q) {
2074 - kfree(q->queuedata);
2075 - q->queuedata = NULL;
2076 - scsi_free_queue(q);
2077 + queuedata = q->queuedata;
2078 + blk_cleanup_queue(q);
2079 + kfree(queuedata);
2080 }
2081
2082 scsi_destroy_command_freelist(shost);
2083 diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
2084 index caa0525..101b28e 100644
2085 --- a/drivers/scsi/libsas/sas_expander.c
2086 +++ b/drivers/scsi/libsas/sas_expander.c
2087 @@ -868,7 +868,7 @@ static struct domain_device *sas_ex_discover_end_dev(
2088 }
2089
2090 /* See if this phy is part of a wide port */
2091 -static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
2092 +static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
2093 {
2094 struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
2095 int i;
2096 @@ -884,11 +884,11 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
2097 sas_port_add_phy(ephy->port, phy->phy);
2098 phy->port = ephy->port;
2099 phy->phy_state = PHY_DEVICE_DISCOVERED;
2100 - return 0;
2101 + return true;
2102 }
2103 }
2104
2105 - return -ENODEV;
2106 + return false;
2107 }
2108
2109 static struct domain_device *sas_ex_discover_expander(
2110 @@ -1030,8 +1030,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
2111 return res;
2112 }
2113
2114 - res = sas_ex_join_wide_port(dev, phy_id);
2115 - if (!res) {
2116 + if (sas_ex_join_wide_port(dev, phy_id)) {
2117 SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
2118 phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
2119 return res;
2120 @@ -1077,8 +1076,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
2121 if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
2122 SAS_ADDR(child->sas_addr)) {
2123 ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
2124 - res = sas_ex_join_wide_port(dev, i);
2125 - if (!res)
2126 + if (sas_ex_join_wide_port(dev, i))
2127 SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
2128 i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
2129
2130 @@ -1943,32 +1941,20 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
2131 {
2132 struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
2133 struct domain_device *child;
2134 - bool found = false;
2135 - int res, i;
2136 + int res;
2137
2138 SAS_DPRINTK("ex %016llx phy%d new device attached\n",
2139 SAS_ADDR(dev->sas_addr), phy_id);
2140 res = sas_ex_phy_discover(dev, phy_id);
2141 if (res)
2142 - goto out;
2143 - /* to support the wide port inserted */
2144 - for (i = 0; i < dev->ex_dev.num_phys; i++) {
2145 - struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i];
2146 - if (i == phy_id)
2147 - continue;
2148 - if (SAS_ADDR(ex_phy_temp->attached_sas_addr) ==
2149 - SAS_ADDR(ex_phy->attached_sas_addr)) {
2150 - found = true;
2151 - break;
2152 - }
2153 - }
2154 - if (found) {
2155 - sas_ex_join_wide_port(dev, phy_id);
2156 + return res;
2157 +
2158 + if (sas_ex_join_wide_port(dev, phy_id))
2159 return 0;
2160 - }
2161 +
2162 res = sas_ex_discover_devices(dev, phy_id);
2163 - if (!res)
2164 - goto out;
2165 + if (res)
2166 + return res;
2167 list_for_each_entry(child, &dev->ex_dev.children, siblings) {
2168 if (SAS_ADDR(child->sas_addr) ==
2169 SAS_ADDR(ex_phy->attached_sas_addr)) {
2170 @@ -1978,7 +1964,6 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
2171 break;
2172 }
2173 }
2174 -out:
2175 return res;
2176 }
2177
2178 @@ -2109,9 +2094,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
2179 struct domain_device *dev = NULL;
2180
2181 res = sas_find_bcast_dev(port_dev, &dev);
2182 - if (res)
2183 - goto out;
2184 - if (dev) {
2185 + while (res == 0 && dev) {
2186 struct expander_device *ex = &dev->ex_dev;
2187 int i = 0, phy_id;
2188
2189 @@ -2123,8 +2106,10 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
2190 res = sas_rediscover(dev, phy_id);
2191 i = phy_id + 1;
2192 } while (i < ex->num_phys);
2193 +
2194 + dev = NULL;
2195 + res = sas_find_bcast_dev(port_dev, &dev);
2196 }
2197 -out:
2198 return res;
2199 }
2200
2201 diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
2202 index 386f0c5..cc8dc8c 100644
2203 --- a/drivers/scsi/scsi_error.c
2204 +++ b/drivers/scsi/scsi_error.c
2205 @@ -1687,6 +1687,20 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
2206 * requests are started.
2207 */
2208 scsi_run_host_queues(shost);
2209 +
2210 + /*
2211 + * if eh is active and host_eh_scheduled is pending we need to re-run
2212 + * recovery. we do this check after scsi_run_host_queues() to allow
2213 + * everything pent up since the last eh run a chance to make forward
2214 + * progress before we sync again. Either we'll immediately re-run
2215 + * recovery or scsi_device_unbusy() will wake us again when these
2216 + * pending commands complete.
2217 + */
2218 + spin_lock_irqsave(shost->host_lock, flags);
2219 + if (shost->host_eh_scheduled)
2220 + if (scsi_host_set_state(shost, SHOST_RECOVERY))
2221 + WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
2222 + spin_unlock_irqrestore(shost->host_lock, flags);
2223 }
2224
2225 /**
2226 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2227 index 4037fd5..1929146 100644
2228 --- a/drivers/scsi/scsi_lib.c
2229 +++ b/drivers/scsi/scsi_lib.c
2230 @@ -406,10 +406,6 @@ static void scsi_run_queue(struct request_queue *q)
2231 LIST_HEAD(starved_list);
2232 unsigned long flags;
2233
2234 - /* if the device is dead, sdev will be NULL, so no queue to run */
2235 - if (!sdev)
2236 - return;
2237 -
2238 shost = sdev->host;
2239 if (scsi_target(sdev)->single_lun)
2240 scsi_single_lun_run(sdev);
2241 @@ -483,15 +479,26 @@ void scsi_requeue_run_queue(struct work_struct *work)
2242 */
2243 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
2244 {
2245 + struct scsi_device *sdev = cmd->device;
2246 struct request *req = cmd->request;
2247 unsigned long flags;
2248
2249 + /*
2250 + * We need to hold a reference on the device to avoid the queue being
2251 + * killed after the unlock and before scsi_run_queue is invoked which
2252 + * may happen because scsi_unprep_request() puts the command which
2253 + * releases its reference on the device.
2254 + */
2255 + get_device(&sdev->sdev_gendev);
2256 +
2257 spin_lock_irqsave(q->queue_lock, flags);
2258 scsi_unprep_request(req);
2259 blk_requeue_request(q, req);
2260 spin_unlock_irqrestore(q->queue_lock, flags);
2261
2262 scsi_run_queue(q);
2263 +
2264 + put_device(&sdev->sdev_gendev);
2265 }
2266
2267 void scsi_next_command(struct scsi_cmnd *cmd)
2268 @@ -1370,16 +1377,16 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
2269 * may be changed after request stacking drivers call the function,
2270 * regardless of taking lock or not.
2271 *
2272 - * When scsi can't dispatch I/Os anymore and needs to kill I/Os
2273 - * (e.g. !sdev), scsi needs to return 'not busy'.
2274 - * Otherwise, request stacking drivers may hold requests forever.
2275 + * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
2276 + * needs to return 'not busy'. Otherwise, request stacking drivers
2277 + * may hold requests forever.
2278 */
2279 static int scsi_lld_busy(struct request_queue *q)
2280 {
2281 struct scsi_device *sdev = q->queuedata;
2282 struct Scsi_Host *shost;
2283
2284 - if (!sdev)
2285 + if (blk_queue_dead(q))
2286 return 0;
2287
2288 shost = sdev->host;
2289 @@ -1490,12 +1497,6 @@ static void scsi_request_fn(struct request_queue *q)
2290 struct scsi_cmnd *cmd;
2291 struct request *req;
2292
2293 - if (!sdev) {
2294 - while ((req = blk_peek_request(q)) != NULL)
2295 - scsi_kill_request(req, q);
2296 - return;
2297 - }
2298 -
2299 if(!get_device(&sdev->sdev_gendev))
2300 /* We must be tearing the block queue down already */
2301 return;
2302 @@ -1697,20 +1698,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
2303 return q;
2304 }
2305
2306 -void scsi_free_queue(struct request_queue *q)
2307 -{
2308 - unsigned long flags;
2309 -
2310 - WARN_ON(q->queuedata);
2311 -
2312 - /* cause scsi_request_fn() to kill all non-finished requests */
2313 - spin_lock_irqsave(q->queue_lock, flags);
2314 - q->request_fn(q);
2315 - spin_unlock_irqrestore(q->queue_lock, flags);
2316 -
2317 - blk_cleanup_queue(q);
2318 -}
2319 -
2320 /*
2321 * Function: scsi_block_requests()
2322 *
2323 diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
2324 index be4fa6d..fd9f57f 100644
2325 --- a/drivers/scsi/scsi_priv.h
2326 +++ b/drivers/scsi/scsi_priv.h
2327 @@ -84,7 +84,6 @@ extern void scsi_next_command(struct scsi_cmnd *cmd);
2328 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
2329 extern void scsi_run_host_queues(struct Scsi_Host *shost);
2330 extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
2331 -extern void scsi_free_queue(struct request_queue *q);
2332 extern int scsi_init_queue(void);
2333 extern void scsi_exit_queue(void);
2334 struct request_queue;
2335 diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
2336 index 01b0374..8906557 100644
2337 --- a/drivers/scsi/scsi_scan.c
2338 +++ b/drivers/scsi/scsi_scan.c
2339 @@ -1714,6 +1714,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
2340 {
2341 struct scsi_device *sdev;
2342 shost_for_each_device(sdev, shost) {
2343 + /* target removed before the device could be added */
2344 + if (sdev->sdev_state == SDEV_DEL)
2345 + continue;
2346 if (!scsi_host_scan_allowed(shost) ||
2347 scsi_sysfs_add_sdev(sdev) != 0)
2348 __scsi_remove_device(sdev);
2349 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
2350 index 04c2a27..bb7c482 100644
2351 --- a/drivers/scsi/scsi_sysfs.c
2352 +++ b/drivers/scsi/scsi_sysfs.c
2353 @@ -971,11 +971,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
2354 sdev->host->hostt->slave_destroy(sdev);
2355 transport_destroy_device(dev);
2356
2357 - /* cause the request function to reject all I/O requests */
2358 - sdev->request_queue->queuedata = NULL;
2359 -
2360 /* Freeing the queue signals to block that we're done */
2361 - scsi_free_queue(sdev->request_queue);
2362 + blk_cleanup_queue(sdev->request_queue);
2363 put_device(dev);
2364 }
2365
2366 @@ -1000,7 +997,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
2367 struct scsi_device *sdev;
2368
2369 spin_lock_irqsave(shost->host_lock, flags);
2370 - starget->reap_ref++;
2371 restart:
2372 list_for_each_entry(sdev, &shost->__devices, siblings) {
2373 if (sdev->channel != starget->channel ||
2374 @@ -1014,14 +1010,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
2375 goto restart;
2376 }
2377 spin_unlock_irqrestore(shost->host_lock, flags);
2378 - scsi_target_reap(starget);
2379 -}
2380 -
2381 -static int __remove_child (struct device * dev, void * data)
2382 -{
2383 - if (scsi_is_target_device(dev))
2384 - __scsi_remove_target(to_scsi_target(dev));
2385 - return 0;
2386 }
2387
2388 /**
2389 @@ -1034,14 +1022,34 @@ static int __remove_child (struct device * dev, void * data)
2390 */
2391 void scsi_remove_target(struct device *dev)
2392 {
2393 - if (scsi_is_target_device(dev)) {
2394 - __scsi_remove_target(to_scsi_target(dev));
2395 - return;
2396 + struct Scsi_Host *shost = dev_to_shost(dev->parent);
2397 + struct scsi_target *starget, *found;
2398 + unsigned long flags;
2399 +
2400 + restart:
2401 + found = NULL;
2402 + spin_lock_irqsave(shost->host_lock, flags);
2403 + list_for_each_entry(starget, &shost->__targets, siblings) {
2404 + if (starget->state == STARGET_DEL)
2405 + continue;
2406 + if (starget->dev.parent == dev || &starget->dev == dev) {
2407 + found = starget;
2408 + found->reap_ref++;
2409 + break;
2410 + }
2411 }
2412 + spin_unlock_irqrestore(shost->host_lock, flags);
2413
2414 - get_device(dev);
2415 - device_for_each_child(dev, NULL, __remove_child);
2416 - put_device(dev);
2417 + if (found) {
2418 + __scsi_remove_target(found);
2419 + scsi_target_reap(found);
2420 + /* in the case where @dev has multiple starget children,
2421 + * continue removing.
2422 + *
2423 + * FIXME: does such a case exist?
2424 + */
2425 + goto restart;
2426 + }
2427 }
2428 EXPORT_SYMBOL(scsi_remove_target);
2429
2430 diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
2431 index 400ae21..469eb28 100644
2432 --- a/drivers/spi/spi-pl022.c
2433 +++ b/drivers/spi/spi-pl022.c
2434 @@ -489,6 +489,11 @@ static void giveback(struct pl022 *pl022)
2435 pl022->cur_transfer = NULL;
2436 pl022->cur_chip = NULL;
2437 spi_finalize_current_message(pl022->master);
2438 +
2439 + /* disable the SPI/SSP operation */
2440 + writew((readw(SSP_CR1(pl022->virtbase)) &
2441 + (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
2442 +
2443 }
2444
2445 /**
2446 diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
2447 index 917461c..175b3c9 100644
2448 --- a/drivers/staging/zsmalloc/zsmalloc-main.c
2449 +++ b/drivers/staging/zsmalloc/zsmalloc-main.c
2450 @@ -426,12 +426,6 @@ static struct page *find_get_zspage(struct size_class *class)
2451 }
2452
2453
2454 -/*
2455 - * If this becomes a separate module, register zs_init() with
2456 - * module_init(), zs_exit with module_exit(), and remove zs_initialized
2457 -*/
2458 -static int zs_initialized;
2459 -
2460 static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
2461 void *pcpu)
2462 {
2463 @@ -490,7 +484,7 @@ fail:
2464
2465 struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
2466 {
2467 - int i, error, ovhd_size;
2468 + int i, ovhd_size;
2469 struct zs_pool *pool;
2470
2471 if (!name)
2472 @@ -517,28 +511,9 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
2473
2474 }
2475
2476 - /*
2477 - * If this becomes a separate module, register zs_init with
2478 - * module_init, and remove this block
2479 - */
2480 - if (!zs_initialized) {
2481 - error = zs_init();
2482 - if (error)
2483 - goto cleanup;
2484 - zs_initialized = 1;
2485 - }
2486 -
2487 pool->flags = flags;
2488 pool->name = name;
2489
2490 - error = 0; /* Success */
2491 -
2492 -cleanup:
2493 - if (error) {
2494 - zs_destroy_pool(pool);
2495 - pool = NULL;
2496 - }
2497 -
2498 return pool;
2499 }
2500 EXPORT_SYMBOL_GPL(zs_create_pool);
2501 @@ -749,3 +724,9 @@ u64 zs_get_total_size_bytes(struct zs_pool *pool)
2502 return npages << PAGE_SHIFT;
2503 }
2504 EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
2505 +
2506 +module_init(zs_init);
2507 +module_exit(zs_exit);
2508 +
2509 +MODULE_LICENSE("Dual BSD/GPL");
2510 +MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2511 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
2512 index 8b1d5e6..e326d17 100644
2513 --- a/drivers/target/iscsi/iscsi_target.c
2514 +++ b/drivers/target/iscsi/iscsi_target.c
2515 @@ -427,19 +427,8 @@ int iscsit_reset_np_thread(
2516
2517 int iscsit_del_np_comm(struct iscsi_np *np)
2518 {
2519 - if (!np->np_socket)
2520 - return 0;
2521 -
2522 - /*
2523 - * Some network transports allocate their own struct sock->file,
2524 - * see if we need to free any additional allocated resources.
2525 - */
2526 - if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
2527 - kfree(np->np_socket->file);
2528 - np->np_socket->file = NULL;
2529 - }
2530 -
2531 - sock_release(np->np_socket);
2532 + if (np->np_socket)
2533 + sock_release(np->np_socket);
2534 return 0;
2535 }
2536
2537 @@ -4094,13 +4083,8 @@ int iscsit_close_connection(
2538 kfree(conn->conn_ops);
2539 conn->conn_ops = NULL;
2540
2541 - if (conn->sock) {
2542 - if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
2543 - kfree(conn->sock->file);
2544 - conn->sock->file = NULL;
2545 - }
2546 + if (conn->sock)
2547 sock_release(conn->sock);
2548 - }
2549 conn->thread_set = NULL;
2550
2551 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
2552 diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
2553 index 2aaee7e..d1c4bc2 100644
2554 --- a/drivers/target/iscsi/iscsi_target_core.h
2555 +++ b/drivers/target/iscsi/iscsi_target_core.h
2556 @@ -224,7 +224,6 @@ enum iscsi_timer_flags_table {
2557 /* Used for struct iscsi_np->np_flags */
2558 enum np_flags_table {
2559 NPF_IP_NETWORK = 0x00,
2560 - NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */
2561 };
2562
2563 /* Used for struct iscsi_np->np_thread_state */
2564 @@ -511,7 +510,6 @@ struct iscsi_conn {
2565 u16 local_port;
2566 int net_size;
2567 u32 auth_id;
2568 -#define CONNFLAG_SCTP_STRUCT_FILE 0x01
2569 u32 conn_flags;
2570 /* Used for iscsi_tx_login_rsp() */
2571 u32 login_itt;
2572 diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
2573 index a3656c9..ae30424 100644
2574 --- a/drivers/target/iscsi/iscsi_target_login.c
2575 +++ b/drivers/target/iscsi/iscsi_target_login.c
2576 @@ -795,22 +795,6 @@ int iscsi_target_setup_login_socket(
2577 }
2578 np->np_socket = sock;
2579 /*
2580 - * The SCTP stack needs struct socket->file.
2581 - */
2582 - if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
2583 - (np->np_network_transport == ISCSI_SCTP_UDP)) {
2584 - if (!sock->file) {
2585 - sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
2586 - if (!sock->file) {
2587 - pr_err("Unable to allocate struct"
2588 - " file for SCTP\n");
2589 - ret = -ENOMEM;
2590 - goto fail;
2591 - }
2592 - np->np_flags |= NPF_SCTP_STRUCT_FILE;
2593 - }
2594 - }
2595 - /*
2596 * Setup the np->np_sockaddr from the passed sockaddr setup
2597 * in iscsi_target_configfs.c code..
2598 */
2599 @@ -869,21 +853,15 @@ int iscsi_target_setup_login_socket(
2600
2601 fail:
2602 np->np_socket = NULL;
2603 - if (sock) {
2604 - if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
2605 - kfree(sock->file);
2606 - sock->file = NULL;
2607 - }
2608 -
2609 + if (sock)
2610 sock_release(sock);
2611 - }
2612 return ret;
2613 }
2614
2615 static int __iscsi_target_login_thread(struct iscsi_np *np)
2616 {
2617 u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
2618 - int err, ret = 0, set_sctp_conn_flag, stop;
2619 + int err, ret = 0, stop;
2620 struct iscsi_conn *conn = NULL;
2621 struct iscsi_login *login;
2622 struct iscsi_portal_group *tpg = NULL;
2623 @@ -894,7 +872,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
2624 struct sockaddr_in6 sock_in6;
2625
2626 flush_signals(current);
2627 - set_sctp_conn_flag = 0;
2628 sock = np->np_socket;
2629
2630 spin_lock_bh(&np->np_thread_lock);
2631 @@ -917,35 +894,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
2632 spin_unlock_bh(&np->np_thread_lock);
2633 goto out;
2634 }
2635 - /*
2636 - * The SCTP stack needs struct socket->file.
2637 - */
2638 - if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
2639 - (np->np_network_transport == ISCSI_SCTP_UDP)) {
2640 - if (!new_sock->file) {
2641 - new_sock->file = kzalloc(
2642 - sizeof(struct file), GFP_KERNEL);
2643 - if (!new_sock->file) {
2644 - pr_err("Unable to allocate struct"
2645 - " file for SCTP\n");
2646 - sock_release(new_sock);
2647 - /* Get another socket */
2648 - return 1;
2649 - }
2650 - set_sctp_conn_flag = 1;
2651 - }
2652 - }
2653 -
2654 iscsi_start_login_thread_timer(np);
2655
2656 conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
2657 if (!conn) {
2658 pr_err("Could not allocate memory for"
2659 " new connection\n");
2660 - if (set_sctp_conn_flag) {
2661 - kfree(new_sock->file);
2662 - new_sock->file = NULL;
2663 - }
2664 sock_release(new_sock);
2665 /* Get another socket */
2666 return 1;
2667 @@ -955,9 +909,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
2668 conn->conn_state = TARG_CONN_STATE_FREE;
2669 conn->sock = new_sock;
2670
2671 - if (set_sctp_conn_flag)
2672 - conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
2673 -
2674 pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
2675 conn->conn_state = TARG_CONN_STATE_XPT_UP;
2676
2677 @@ -1205,13 +1156,8 @@ old_sess_out:
2678 iscsi_release_param_list(conn->param_list);
2679 conn->param_list = NULL;
2680 }
2681 - if (conn->sock) {
2682 - if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
2683 - kfree(conn->sock->file);
2684 - conn->sock->file = NULL;
2685 - }
2686 + if (conn->sock)
2687 sock_release(conn->sock);
2688 - }
2689 kfree(conn);
2690
2691 if (tpg) {
2692 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2693 index 443704f..0686d61 100644
2694 --- a/drivers/target/target_core_transport.c
2695 +++ b/drivers/target/target_core_transport.c
2696 @@ -1976,6 +1976,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
2697 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
2698 case TCM_UNKNOWN_MODE_PAGE:
2699 case TCM_WRITE_PROTECTED:
2700 + case TCM_ADDRESS_OUT_OF_RANGE:
2701 case TCM_CHECK_CONDITION_ABORT_CMD:
2702 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
2703 case TCM_CHECK_CONDITION_NOT_READY:
2704 @@ -4656,6 +4657,15 @@ int transport_send_check_condition_and_sense(
2705 /* WRITE PROTECTED */
2706 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
2707 break;
2708 + case TCM_ADDRESS_OUT_OF_RANGE:
2709 + /* CURRENT ERROR */
2710 + buffer[offset] = 0x70;
2711 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
2712 + /* ILLEGAL REQUEST */
2713 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2714 + /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
2715 + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21;
2716 + break;
2717 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
2718 /* CURRENT ERROR */
2719 buffer[offset] = 0x70;
2720 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
2721 index 4e57772..404413b 100644
2722 --- a/drivers/usb/core/devio.c
2723 +++ b/drivers/usb/core/devio.c
2724 @@ -1615,10 +1615,14 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
2725 void __user *addr = as->userurb;
2726 unsigned int i;
2727
2728 - if (as->userbuffer && urb->actual_length)
2729 - if (copy_to_user(as->userbuffer, urb->transfer_buffer,
2730 - urb->actual_length))
2731 + if (as->userbuffer && urb->actual_length) {
2732 + if (urb->number_of_packets > 0) /* Isochronous */
2733 + i = urb->transfer_buffer_length;
2734 + else /* Non-Isoc */
2735 + i = urb->actual_length;
2736 + if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
2737 return -EFAULT;
2738 + }
2739 if (put_user(as->status, &userurb->status))
2740 return -EFAULT;
2741 if (put_user(urb->actual_length, &userurb->actual_length))
2742 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
2743 index 1fc8f12..347bb05 100644
2744 --- a/drivers/usb/early/ehci-dbgp.c
2745 +++ b/drivers/usb/early/ehci-dbgp.c
2746 @@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
2747 writel(FLAG_CF, &ehci_regs->configured_flag);
2748
2749 /* Wait until the controller is no longer halted */
2750 - loop = 10;
2751 + loop = 1000;
2752 do {
2753 status = readl(&ehci_regs->status);
2754 if (!(status & STS_HALT))
2755 diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
2756 index 29c854b..4e1f0aa 100644
2757 --- a/drivers/usb/gadget/u_ether.c
2758 +++ b/drivers/usb/gadget/u_ether.c
2759 @@ -796,12 +796,6 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
2760
2761 SET_ETHTOOL_OPS(net, &ops);
2762
2763 - /* two kinds of host-initiated state changes:
2764 - * - iff DATA transfer is active, carrier is "on"
2765 - * - tx queueing enabled if open *and* carrier is "on"
2766 - */
2767 - netif_carrier_off(net);
2768 -
2769 dev->gadget = g;
2770 SET_NETDEV_DEV(net, &g->dev);
2771 SET_NETDEV_DEVTYPE(net, &gadget_type);
2772 @@ -815,6 +809,12 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
2773 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
2774
2775 the_dev = dev;
2776 +
2777 + /* two kinds of host-initiated state changes:
2778 + * - iff DATA transfer is active, carrier is "on"
2779 + * - tx queueing enabled if open *and* carrier is "on"
2780 + */
2781 + netif_carrier_off(net);
2782 }
2783
2784 return status;
2785 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2786 index 49484b3..ae1a4b8 100644
2787 --- a/drivers/usb/serial/option.c
2788 +++ b/drivers/usb/serial/option.c
2789 @@ -936,6 +936,8 @@ static const struct usb_device_id option_ids[] = {
2790 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
2791 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
2792 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2793 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
2794 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2795 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
2796 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2797 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
2798 diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
2799 index 8ec8a6e..f98ba40 100644
2800 --- a/drivers/usb/storage/uas.c
2801 +++ b/drivers/usb/storage/uas.c
2802 @@ -58,9 +58,6 @@ enum {
2803 SUBMIT_DATA_OUT_URB = (1 << 5),
2804 ALLOC_CMD_URB = (1 << 6),
2805 SUBMIT_CMD_URB = (1 << 7),
2806 - COMPLETED_DATA_IN = (1 << 8),
2807 - COMPLETED_DATA_OUT = (1 << 9),
2808 - DATA_COMPLETES_CMD = (1 << 10),
2809 };
2810
2811 /* Overrides scsi_pointer */
2812 @@ -114,7 +111,6 @@ static void uas_sense(struct urb *urb, struct scsi_cmnd *cmnd)
2813 {
2814 struct sense_iu *sense_iu = urb->transfer_buffer;
2815 struct scsi_device *sdev = cmnd->device;
2816 - struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
2817
2818 if (urb->actual_length > 16) {
2819 unsigned len = be16_to_cpup(&sense_iu->len);
2820 @@ -132,15 +128,13 @@ static void uas_sense(struct urb *urb, struct scsi_cmnd *cmnd)
2821 }
2822
2823 cmnd->result = sense_iu->status;
2824 - if (!(cmdinfo->state & DATA_COMPLETES_CMD))
2825 - cmnd->scsi_done(cmnd);
2826 + cmnd->scsi_done(cmnd);
2827 }
2828
2829 static void uas_sense_old(struct urb *urb, struct scsi_cmnd *cmnd)
2830 {
2831 struct sense_iu_old *sense_iu = urb->transfer_buffer;
2832 struct scsi_device *sdev = cmnd->device;
2833 - struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
2834
2835 if (urb->actual_length > 8) {
2836 unsigned len = be16_to_cpup(&sense_iu->len) - 2;
2837 @@ -158,8 +152,7 @@ static void uas_sense_old(struct urb *urb, struct scsi_cmnd *cmnd)
2838 }
2839
2840 cmnd->result = sense_iu->status;
2841 - if (!(cmdinfo->state & DATA_COMPLETES_CMD))
2842 - cmnd->scsi_done(cmnd);
2843 + cmnd->scsi_done(cmnd);
2844 }
2845
2846 static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
2847 @@ -184,7 +177,6 @@ static void uas_stat_cmplt(struct urb *urb)
2848 struct Scsi_Host *shost = urb->context;
2849 struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
2850 struct scsi_cmnd *cmnd;
2851 - struct uas_cmd_info *cmdinfo;
2852 u16 tag;
2853 int ret;
2854
2855 @@ -210,32 +202,12 @@ static void uas_stat_cmplt(struct urb *urb)
2856 dev_err(&urb->dev->dev, "failed submit status urb\n");
2857 return;
2858 }
2859 - cmdinfo = (void *)&cmnd->SCp;
2860
2861 switch (iu->iu_id) {
2862 case IU_ID_STATUS:
2863 if (devinfo->cmnd == cmnd)
2864 devinfo->cmnd = NULL;
2865
2866 - if (!(cmdinfo->state & COMPLETED_DATA_IN) &&
2867 - cmdinfo->data_in_urb) {
2868 - if (devinfo->use_streams) {
2869 - cmdinfo->state |= DATA_COMPLETES_CMD;
2870 - usb_unlink_urb(cmdinfo->data_in_urb);
2871 - } else {
2872 - usb_free_urb(cmdinfo->data_in_urb);
2873 - }
2874 - }
2875 - if (!(cmdinfo->state & COMPLETED_DATA_OUT) &&
2876 - cmdinfo->data_out_urb) {
2877 - if (devinfo->use_streams) {
2878 - cmdinfo->state |= DATA_COMPLETES_CMD;
2879 - usb_unlink_urb(cmdinfo->data_in_urb);
2880 - } else {
2881 - usb_free_urb(cmdinfo->data_out_urb);
2882 - }
2883 - }
2884 -
2885 if (urb->actual_length < 16)
2886 devinfo->uas_sense_old = 1;
2887 if (devinfo->uas_sense_old)
2888 @@ -264,59 +236,27 @@ static void uas_stat_cmplt(struct urb *urb)
2889 dev_err(&urb->dev->dev, "failed submit status urb\n");
2890 }
2891
2892 -static void uas_data_out_cmplt(struct urb *urb)
2893 -{
2894 - struct scsi_cmnd *cmnd = urb->context;
2895 - struct scsi_data_buffer *sdb = scsi_out(cmnd);
2896 - struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
2897 -
2898 - cmdinfo->state |= COMPLETED_DATA_OUT;
2899 -
2900 - sdb->resid = sdb->length - urb->actual_length;
2901 - usb_free_urb(urb);
2902 -
2903 - if (cmdinfo->state & DATA_COMPLETES_CMD)
2904 - cmnd->scsi_done(cmnd);
2905 -}
2906 -
2907 -static void uas_data_in_cmplt(struct urb *urb)
2908 +static void uas_data_cmplt(struct urb *urb)
2909 {
2910 - struct scsi_cmnd *cmnd = urb->context;
2911 - struct scsi_data_buffer *sdb = scsi_in(cmnd);
2912 - struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
2913 -
2914 - cmdinfo->state |= COMPLETED_DATA_IN;
2915 -
2916 + struct scsi_data_buffer *sdb = urb->context;
2917 sdb->resid = sdb->length - urb->actual_length;
2918 usb_free_urb(urb);
2919 -
2920 - if (cmdinfo->state & DATA_COMPLETES_CMD)
2921 - cmnd->scsi_done(cmnd);
2922 }
2923
2924 static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
2925 - unsigned int pipe, struct scsi_cmnd *cmnd,
2926 - enum dma_data_direction dir)
2927 + unsigned int pipe, u16 stream_id,
2928 + struct scsi_data_buffer *sdb,
2929 + enum dma_data_direction dir)
2930 {
2931 - struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
2932 struct usb_device *udev = devinfo->udev;
2933 struct urb *urb = usb_alloc_urb(0, gfp);
2934 - struct scsi_data_buffer *sdb;
2935 - usb_complete_t complete_fn;
2936 - u16 stream_id = cmdinfo->stream;
2937
2938 if (!urb)
2939 goto out;
2940 - if (dir == DMA_FROM_DEVICE) {
2941 - sdb = scsi_in(cmnd);
2942 - complete_fn = uas_data_in_cmplt;
2943 - } else {
2944 - sdb = scsi_out(cmnd);
2945 - complete_fn = uas_data_out_cmplt;
2946 - }
2947 - usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length,
2948 - complete_fn, cmnd);
2949 - urb->stream_id = stream_id;
2950 + usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length, uas_data_cmplt,
2951 + sdb);
2952 + if (devinfo->use_streams)
2953 + urb->stream_id = stream_id;
2954 urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0;
2955 urb->sg = sdb->table.sgl;
2956 out:
2957 @@ -418,8 +358,8 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
2958
2959 if (cmdinfo->state & ALLOC_DATA_IN_URB) {
2960 cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, gfp,
2961 - devinfo->data_in_pipe, cmnd,
2962 - DMA_FROM_DEVICE);
2963 + devinfo->data_in_pipe, cmdinfo->stream,
2964 + scsi_in(cmnd), DMA_FROM_DEVICE);
2965 if (!cmdinfo->data_in_urb)
2966 return SCSI_MLQUEUE_DEVICE_BUSY;
2967 cmdinfo->state &= ~ALLOC_DATA_IN_URB;
2968 @@ -436,8 +376,8 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
2969
2970 if (cmdinfo->state & ALLOC_DATA_OUT_URB) {
2971 cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, gfp,
2972 - devinfo->data_out_pipe, cmnd,
2973 - DMA_TO_DEVICE);
2974 + devinfo->data_out_pipe, cmdinfo->stream,
2975 + scsi_out(cmnd), DMA_TO_DEVICE);
2976 if (!cmdinfo->data_out_urb)
2977 return SCSI_MLQUEUE_DEVICE_BUSY;
2978 cmdinfo->state &= ~ALLOC_DATA_OUT_URB;
2979 diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
2980 index 4270414..58b7d14 100644
2981 --- a/fs/btrfs/async-thread.c
2982 +++ b/fs/btrfs/async-thread.c
2983 @@ -206,10 +206,17 @@ static noinline void run_ordered_completions(struct btrfs_workers *workers,
2984
2985 work->ordered_func(work);
2986
2987 - /* now take the lock again and call the freeing code */
2988 + /* now take the lock again and drop our item from the list */
2989 spin_lock(&workers->order_lock);
2990 list_del(&work->order_list);
2991 + spin_unlock(&workers->order_lock);
2992 +
2993 + /*
2994 + * we don't want to call the ordered free functions
2995 + * with the lock held though
2996 + */
2997 work->ordered_free(work);
2998 + spin_lock(&workers->order_lock);
2999 }
3000
3001 spin_unlock(&workers->order_lock);
3002 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3003 index 87ce8af..65a78e9 100644
3004 --- a/fs/cifs/connect.c
3005 +++ b/fs/cifs/connect.c
3006 @@ -238,8 +238,8 @@ static const match_table_t cifs_mount_option_tokens = {
3007 enum {
3008 Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p,
3009 Opt_sec_ntlmsspi, Opt_sec_ntlmssp,
3010 - Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2i,
3011 - Opt_sec_nontlm, Opt_sec_lanman,
3012 + Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2,
3013 + Opt_sec_ntlmv2i, Opt_sec_lanman,
3014 Opt_sec_none,
3015
3016 Opt_sec_err
3017 @@ -253,8 +253,9 @@ static const match_table_t cifs_secflavor_tokens = {
3018 { Opt_sec_ntlmssp, "ntlmssp" },
3019 { Opt_ntlm, "ntlm" },
3020 { Opt_sec_ntlmi, "ntlmi" },
3021 + { Opt_sec_ntlmv2, "nontlm" },
3022 + { Opt_sec_ntlmv2, "ntlmv2" },
3023 { Opt_sec_ntlmv2i, "ntlmv2i" },
3024 - { Opt_sec_nontlm, "nontlm" },
3025 { Opt_sec_lanman, "lanman" },
3026 { Opt_sec_none, "none" },
3027
3028 @@ -1163,7 +1164,7 @@ static int cifs_parse_security_flavors(char *value,
3029 case Opt_sec_ntlmi:
3030 vol->secFlg |= CIFSSEC_MAY_NTLM | CIFSSEC_MUST_SIGN;
3031 break;
3032 - case Opt_sec_nontlm:
3033 + case Opt_sec_ntlmv2:
3034 vol->secFlg |= CIFSSEC_MAY_NTLMV2;
3035 break;
3036 case Opt_sec_ntlmv2i:
3037 diff --git a/fs/exec.c b/fs/exec.c
3038 index 29e5f84..126e01c 100644
3039 --- a/fs/exec.c
3040 +++ b/fs/exec.c
3041 @@ -1024,7 +1024,7 @@ static void flush_old_files(struct files_struct * files)
3042 unsigned long set, i;
3043
3044 j++;
3045 - i = j * __NFDBITS;
3046 + i = j * BITS_PER_LONG;
3047 fdt = files_fdtable(files);
3048 if (i >= fdt->max_fds)
3049 break;
3050 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
3051 index 8da837b..df76291 100644
3052 --- a/fs/ext4/balloc.c
3053 +++ b/fs/ext4/balloc.c
3054 @@ -584,7 +584,8 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
3055 if (bitmap_bh == NULL)
3056 continue;
3057
3058 - x = ext4_count_free(bitmap_bh, sb->s_blocksize);
3059 + x = ext4_count_free(bitmap_bh->b_data,
3060 + EXT4_BLOCKS_PER_GROUP(sb) / 8);
3061 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
3062 i, ext4_free_group_clusters(sb, gdp), x);
3063 bitmap_count += x;
3064 diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
3065 index fa3af81..bbde5d5 100644
3066 --- a/fs/ext4/bitmap.c
3067 +++ b/fs/ext4/bitmap.c
3068 @@ -11,21 +11,15 @@
3069 #include <linux/jbd2.h>
3070 #include "ext4.h"
3071
3072 -#ifdef EXT4FS_DEBUG
3073 -
3074 static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
3075
3076 -unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
3077 +unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
3078 {
3079 unsigned int i, sum = 0;
3080
3081 - if (!map)
3082 - return 0;
3083 for (i = 0; i < numchars; i++)
3084 - sum += nibblemap[map->b_data[i] & 0xf] +
3085 - nibblemap[(map->b_data[i] >> 4) & 0xf];
3086 + sum += nibblemap[bitmap[i] & 0xf] +
3087 + nibblemap[(bitmap[i] >> 4) & 0xf];
3088 return sum;
3089 }
3090
3091 -#endif /* EXT4FS_DEBUG */
3092 -
3093 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3094 index 0e01e90..47d1c8c 100644
3095 --- a/fs/ext4/ext4.h
3096 +++ b/fs/ext4/ext4.h
3097 @@ -1140,8 +1140,7 @@ struct ext4_sb_info {
3098 unsigned long s_desc_per_block; /* Number of group descriptors per block */
3099 ext4_group_t s_groups_count; /* Number of groups in the fs */
3100 ext4_group_t s_blockfile_groups;/* Groups acceptable for non-extent files */
3101 - unsigned long s_overhead_last; /* Last calculated overhead */
3102 - unsigned long s_blocks_last; /* Last seen block count */
3103 + unsigned long s_overhead; /* # of fs overhead clusters */
3104 unsigned int s_cluster_ratio; /* Number of blocks per cluster */
3105 unsigned int s_cluster_bits; /* log2 of s_cluster_ratio */
3106 loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
3107 @@ -1783,7 +1782,7 @@ struct mmpd_data {
3108 # define NORET_AND noreturn,
3109
3110 /* bitmap.c */
3111 -extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
3112 +extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
3113
3114 /* balloc.c */
3115 extern unsigned int ext4_block_group(struct super_block *sb,
3116 @@ -1950,6 +1949,7 @@ extern int ext4_group_extend(struct super_block *sb,
3117 extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
3118
3119 /* super.c */
3120 +extern int ext4_calculate_overhead(struct super_block *sb);
3121 extern void *ext4_kvmalloc(size_t size, gfp_t flags);
3122 extern void *ext4_kvzalloc(size_t size, gfp_t flags);
3123 extern void ext4_kvfree(void *ptr);
3124 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3125 index abcdeab..6b7daa4 100644
3126 --- a/fs/ext4/extents.c
3127 +++ b/fs/ext4/extents.c
3128 @@ -2503,10 +2503,10 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
3129 {
3130 struct super_block *sb = inode->i_sb;
3131 int depth = ext_depth(inode);
3132 - struct ext4_ext_path *path;
3133 + struct ext4_ext_path *path = NULL;
3134 ext4_fsblk_t partial_cluster = 0;
3135 handle_t *handle;
3136 - int i, err;
3137 + int i = 0, err;
3138
3139 ext_debug("truncate since %u to %u\n", start, end);
3140
3141 @@ -2539,8 +2539,12 @@ again:
3142 }
3143 depth = ext_depth(inode);
3144 ex = path[depth].p_ext;
3145 - if (!ex)
3146 + if (!ex) {
3147 + ext4_ext_drop_refs(path);
3148 + kfree(path);
3149 + path = NULL;
3150 goto cont;
3151 + }
3152
3153 ee_block = le32_to_cpu(ex->ee_block);
3154
3155 @@ -2570,8 +2574,6 @@ again:
3156 if (err < 0)
3157 goto out;
3158 }
3159 - ext4_ext_drop_refs(path);
3160 - kfree(path);
3161 }
3162 cont:
3163
3164 @@ -2580,19 +2582,27 @@ cont:
3165 * after i_size and walking into the tree depth-wise.
3166 */
3167 depth = ext_depth(inode);
3168 - path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
3169 - if (path == NULL) {
3170 - ext4_journal_stop(handle);
3171 - return -ENOMEM;
3172 - }
3173 - path[0].p_depth = depth;
3174 - path[0].p_hdr = ext_inode_hdr(inode);
3175 + if (path) {
3176 + int k = i = depth;
3177 + while (--k > 0)
3178 + path[k].p_block =
3179 + le16_to_cpu(path[k].p_hdr->eh_entries)+1;
3180 + } else {
3181 + path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
3182 + GFP_NOFS);
3183 + if (path == NULL) {
3184 + ext4_journal_stop(handle);
3185 + return -ENOMEM;
3186 + }
3187 + path[0].p_depth = depth;
3188 + path[0].p_hdr = ext_inode_hdr(inode);
3189
3190 - if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
3191 - err = -EIO;
3192 - goto out;
3193 + if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
3194 + err = -EIO;
3195 + goto out;
3196 + }
3197 }
3198 - i = err = 0;
3199 + err = 0;
3200
3201 while (i >= 0 && err == 0) {
3202 if (i == depth) {
3203 @@ -2706,8 +2716,10 @@ cont:
3204 out:
3205 ext4_ext_drop_refs(path);
3206 kfree(path);
3207 - if (err == -EAGAIN)
3208 + if (err == -EAGAIN) {
3209 + path = NULL;
3210 goto again;
3211 + }
3212 ext4_journal_stop(handle);
3213
3214 return err;
3215 diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
3216 index 8900f8b..0ee374d 100644
3217 --- a/fs/ext4/ialloc.c
3218 +++ b/fs/ext4/ialloc.c
3219 @@ -1013,7 +1013,8 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
3220 if (!bitmap_bh)
3221 continue;
3222
3223 - x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
3224 + x = ext4_count_free(bitmap_bh->b_data,
3225 + EXT4_INODES_PER_GROUP(sb) / 8);
3226 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
3227 (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
3228 bitmap_count += x;
3229 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3230 index c77b0bd..55a654d 100644
3231 --- a/fs/ext4/inode.c
3232 +++ b/fs/ext4/inode.c
3233 @@ -279,6 +279,15 @@ void ext4_da_update_reserve_space(struct inode *inode,
3234 used = ei->i_reserved_data_blocks;
3235 }
3236
3237 + if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
3238 + ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
3239 + "with only %d reserved metadata blocks\n", __func__,
3240 + inode->i_ino, ei->i_allocated_meta_blocks,
3241 + ei->i_reserved_meta_blocks);
3242 + WARN_ON(1);
3243 + ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
3244 + }
3245 +
3246 /* Update per-inode reservations */
3247 ei->i_reserved_data_blocks -= used;
3248 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
3249 @@ -1104,6 +1113,17 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
3250 struct ext4_inode_info *ei = EXT4_I(inode);
3251 unsigned int md_needed;
3252 int ret;
3253 + ext4_lblk_t save_last_lblock;
3254 + int save_len;
3255 +
3256 + /*
3257 + * We will charge metadata quota at writeout time; this saves
3258 + * us from metadata over-estimation, though we may go over by
3259 + * a small amount in the end. Here we just reserve for data.
3260 + */
3261 + ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
3262 + if (ret)
3263 + return ret;
3264
3265 /*
3266 * recalculate the amount of metadata blocks to reserve
3267 @@ -1112,32 +1132,31 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
3268 */
3269 repeat:
3270 spin_lock(&ei->i_block_reservation_lock);
3271 + /*
3272 + * ext4_calc_metadata_amount() has side effects, which we have
3273 + * to be prepared undo if we fail to claim space.
3274 + */
3275 + save_len = ei->i_da_metadata_calc_len;
3276 + save_last_lblock = ei->i_da_metadata_calc_last_lblock;
3277 md_needed = EXT4_NUM_B2C(sbi,
3278 ext4_calc_metadata_amount(inode, lblock));
3279 trace_ext4_da_reserve_space(inode, md_needed);
3280 - spin_unlock(&ei->i_block_reservation_lock);
3281
3282 /*
3283 - * We will charge metadata quota at writeout time; this saves
3284 - * us from metadata over-estimation, though we may go over by
3285 - * a small amount in the end. Here we just reserve for data.
3286 - */
3287 - ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
3288 - if (ret)
3289 - return ret;
3290 - /*
3291 * We do still charge estimated metadata to the sb though;
3292 * we cannot afford to run out of free blocks.
3293 */
3294 if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
3295 - dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
3296 + ei->i_da_metadata_calc_len = save_len;
3297 + ei->i_da_metadata_calc_last_lblock = save_last_lblock;
3298 + spin_unlock(&ei->i_block_reservation_lock);
3299 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
3300 yield();
3301 goto repeat;
3302 }
3303 + dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
3304 return -ENOSPC;
3305 }
3306 - spin_lock(&ei->i_block_reservation_lock);
3307 ei->i_reserved_data_blocks++;
3308 ei->i_reserved_meta_blocks += md_needed;
3309 spin_unlock(&ei->i_block_reservation_lock);
3310 diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
3311 index 53589ff..3407a62 100644
3312 --- a/fs/ext4/resize.c
3313 +++ b/fs/ext4/resize.c
3314 @@ -1141,7 +1141,7 @@ static void ext4_update_super(struct super_block *sb,
3315 struct ext4_new_group_data *group_data = flex_gd->groups;
3316 struct ext4_sb_info *sbi = EXT4_SB(sb);
3317 struct ext4_super_block *es = sbi->s_es;
3318 - int i;
3319 + int i, ret;
3320
3321 BUG_ON(flex_gd->count == 0 || group_data == NULL);
3322 /*
3323 @@ -1216,6 +1216,11 @@ static void ext4_update_super(struct super_block *sb,
3324 &sbi->s_flex_groups[flex_group].free_inodes);
3325 }
3326
3327 + /*
3328 + * Update the fs overhead information
3329 + */
3330 + ext4_calculate_overhead(sb);
3331 +
3332 if (test_opt(sb, DEBUG))
3333 printk(KERN_DEBUG "EXT4-fs: added group %u:"
3334 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
3335 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3336 index a68703a..7fe3869 100644
3337 --- a/fs/ext4/super.c
3338 +++ b/fs/ext4/super.c
3339 @@ -2944,6 +2944,114 @@ static void ext4_destroy_lazyinit_thread(void)
3340 kthread_stop(ext4_lazyinit_task);
3341 }
3342
3343 +/*
3344 + * Note: calculating the overhead so we can be compatible with
3345 + * historical BSD practice is quite difficult in the face of
3346 + * clusters/bigalloc. This is because multiple metadata blocks from
3347 + * different block group can end up in the same allocation cluster.
3348 + * Calculating the exact overhead in the face of clustered allocation
3349 + * requires either O(all block bitmaps) in memory or O(number of block
3350 + * groups**2) in time. We will still calculate the superblock for
3351 + * older file systems --- and if we come across with a bigalloc file
3352 + * system with zero in s_overhead_clusters the estimate will be close to
3353 + * correct especially for very large cluster sizes --- but for newer
3354 + * file systems, it's better to calculate this figure once at mkfs
3355 + * time, and store it in the superblock. If the superblock value is
3356 + * present (even for non-bigalloc file systems), we will use it.
3357 + */
3358 +static int count_overhead(struct super_block *sb, ext4_group_t grp,
3359 + char *buf)
3360 +{
3361 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3362 + struct ext4_group_desc *gdp;
3363 + ext4_fsblk_t first_block, last_block, b;
3364 + ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3365 + int s, j, count = 0;
3366 +
3367 + first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
3368 + (grp * EXT4_BLOCKS_PER_GROUP(sb));
3369 + last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
3370 + for (i = 0; i < ngroups; i++) {
3371 + gdp = ext4_get_group_desc(sb, i, NULL);
3372 + b = ext4_block_bitmap(sb, gdp);
3373 + if (b >= first_block && b <= last_block) {
3374 + ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3375 + count++;
3376 + }
3377 + b = ext4_inode_bitmap(sb, gdp);
3378 + if (b >= first_block && b <= last_block) {
3379 + ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3380 + count++;
3381 + }
3382 + b = ext4_inode_table(sb, gdp);
3383 + if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
3384 + for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
3385 + int c = EXT4_B2C(sbi, b - first_block);
3386 + ext4_set_bit(c, buf);
3387 + count++;
3388 + }
3389 + if (i != grp)
3390 + continue;
3391 + s = 0;
3392 + if (ext4_bg_has_super(sb, grp)) {
3393 + ext4_set_bit(s++, buf);
3394 + count++;
3395 + }
3396 + for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
3397 + ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3398 + count++;
3399 + }
3400 + }
3401 + if (!count)
3402 + return 0;
3403 + return EXT4_CLUSTERS_PER_GROUP(sb) -
3404 + ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
3405 +}
3406 +
3407 +/*
3408 + * Compute the overhead and stash it in sbi->s_overhead
3409 + */
3410 +int ext4_calculate_overhead(struct super_block *sb)
3411 +{
3412 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3413 + struct ext4_super_block *es = sbi->s_es;
3414 + ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3415 + ext4_fsblk_t overhead = 0;
3416 + char *buf = (char *) get_zeroed_page(GFP_KERNEL);
3417 +
3418 + memset(buf, 0, PAGE_SIZE);
3419 + if (!buf)
3420 + return -ENOMEM;
3421 +
3422 + /*
3423 + * Compute the overhead (FS structures). This is constant
3424 + * for a given filesystem unless the number of block groups
3425 + * changes so we cache the previous value until it does.
3426 + */
3427 +
3428 + /*
3429 + * All of the blocks before first_data_block are overhead
3430 + */
3431 + overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
3432 +
3433 + /*
3434 + * Add the overhead found in each block group
3435 + */
3436 + for (i = 0; i < ngroups; i++) {
3437 + int blks;
3438 +
3439 + blks = count_overhead(sb, i, buf);
3440 + overhead += blks;
3441 + if (blks)
3442 + memset(buf, 0, PAGE_SIZE);
3443 + cond_resched();
3444 + }
3445 + sbi->s_overhead = overhead;
3446 + smp_wmb();
3447 + free_page((unsigned long) buf);
3448 + return 0;
3449 +}
3450 +
3451 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3452 {
3453 char *orig_data = kstrdup(data, GFP_KERNEL);
3454 @@ -3559,6 +3667,18 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3455
3456 no_journal:
3457 /*
3458 + * Get the # of file system overhead blocks from the
3459 + * superblock if present.
3460 + */
3461 + if (es->s_overhead_clusters)
3462 + sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
3463 + else {
3464 + ret = ext4_calculate_overhead(sb);
3465 + if (ret)
3466 + goto failed_mount_wq;
3467 + }
3468 +
3469 + /*
3470 * The maximum number of concurrent works can be high and
3471 * concurrency isn't really necessary. Limit it to 1.
3472 */
3473 @@ -4421,67 +4541,21 @@ restore_opts:
3474 return err;
3475 }
3476
3477 -/*
3478 - * Note: calculating the overhead so we can be compatible with
3479 - * historical BSD practice is quite difficult in the face of
3480 - * clusters/bigalloc. This is because multiple metadata blocks from
3481 - * different block group can end up in the same allocation cluster.
3482 - * Calculating the exact overhead in the face of clustered allocation
3483 - * requires either O(all block bitmaps) in memory or O(number of block
3484 - * groups**2) in time. We will still calculate the superblock for
3485 - * older file systems --- and if we come across with a bigalloc file
3486 - * system with zero in s_overhead_clusters the estimate will be close to
3487 - * correct especially for very large cluster sizes --- but for newer
3488 - * file systems, it's better to calculate this figure once at mkfs
3489 - * time, and store it in the superblock. If the superblock value is
3490 - * present (even for non-bigalloc file systems), we will use it.
3491 - */
3492 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
3493 {
3494 struct super_block *sb = dentry->d_sb;
3495 struct ext4_sb_info *sbi = EXT4_SB(sb);
3496 struct ext4_super_block *es = sbi->s_es;
3497 - struct ext4_group_desc *gdp;
3498 + ext4_fsblk_t overhead = 0;
3499 u64 fsid;
3500 s64 bfree;
3501
3502 - if (test_opt(sb, MINIX_DF)) {
3503 - sbi->s_overhead_last = 0;
3504 - } else if (es->s_overhead_clusters) {
3505 - sbi->s_overhead_last = le32_to_cpu(es->s_overhead_clusters);
3506 - } else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
3507 - ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3508 - ext4_fsblk_t overhead = 0;
3509 -
3510 - /*
3511 - * Compute the overhead (FS structures). This is constant
3512 - * for a given filesystem unless the number of block groups
3513 - * changes so we cache the previous value until it does.
3514 - */
3515 -
3516 - /*
3517 - * All of the blocks before first_data_block are
3518 - * overhead
3519 - */
3520 - overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
3521 -
3522 - /*
3523 - * Add the overhead found in each block group
3524 - */
3525 - for (i = 0; i < ngroups; i++) {
3526 - gdp = ext4_get_group_desc(sb, i, NULL);
3527 - overhead += ext4_num_overhead_clusters(sb, i, gdp);
3528 - cond_resched();
3529 - }
3530 - sbi->s_overhead_last = overhead;
3531 - smp_wmb();
3532 - sbi->s_blocks_last = ext4_blocks_count(es);
3533 - }
3534 + if (!test_opt(sb, MINIX_DF))
3535 + overhead = sbi->s_overhead;
3536
3537 buf->f_type = EXT4_SUPER_MAGIC;
3538 buf->f_bsize = sb->s_blocksize;
3539 - buf->f_blocks = (ext4_blocks_count(es) -
3540 - EXT4_C2B(sbi, sbi->s_overhead_last));
3541 + buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, sbi->s_overhead);
3542 bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
3543 percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
3544 /* prevent underflow in case that few free space is available */
3545 diff --git a/fs/locks.c b/fs/locks.c
3546 index 6a64f15..fcc50ab 100644
3547 --- a/fs/locks.c
3548 +++ b/fs/locks.c
3549 @@ -308,7 +308,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
3550 return 0;
3551 }
3552
3553 -static int assign_type(struct file_lock *fl, int type)
3554 +static int assign_type(struct file_lock *fl, long type)
3555 {
3556 switch (type) {
3557 case F_RDLCK:
3558 @@ -445,7 +445,7 @@ static const struct lock_manager_operations lease_manager_ops = {
3559 /*
3560 * Initialize a lease, use the default lock manager operations
3561 */
3562 -static int lease_init(struct file *filp, int type, struct file_lock *fl)
3563 +static int lease_init(struct file *filp, long type, struct file_lock *fl)
3564 {
3565 if (assign_type(fl, type) != 0)
3566 return -EINVAL;
3567 @@ -463,7 +463,7 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
3568 }
3569
3570 /* Allocate a file_lock initialised to this type of lease */
3571 -static struct file_lock *lease_alloc(struct file *filp, int type)
3572 +static struct file_lock *lease_alloc(struct file *filp, long type)
3573 {
3574 struct file_lock *fl = locks_alloc_lock();
3575 int error = -ENOMEM;
3576 diff --git a/fs/nfs/file.c b/fs/nfs/file.c
3577 index aa9b709..f0f439d 100644
3578 --- a/fs/nfs/file.c
3579 +++ b/fs/nfs/file.c
3580 @@ -451,8 +451,11 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
3581
3582 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
3583
3584 - /* Only do I/O if gfp is a superset of GFP_KERNEL */
3585 - if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
3586 + /* Only do I/O if gfp is a superset of GFP_KERNEL, and we're not
3587 + * doing this memory reclaim for a fs-related allocation.
3588 + */
3589 + if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL &&
3590 + !(current->flags & PF_FSTRANS)) {
3591 int how = FLUSH_SYNC;
3592
3593 /* Don't let kswapd deadlock waiting for OOM RPC calls */
3594 diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
3595 index 93aa3a4..929ba01 100644
3596 --- a/fs/nfs/idmap.c
3597 +++ b/fs/nfs/idmap.c
3598 @@ -205,12 +205,18 @@ static int nfs_idmap_init_keyring(void)
3599 if (ret < 0)
3600 goto failed_put_key;
3601
3602 + ret = register_key_type(&key_type_id_resolver_legacy);
3603 + if (ret < 0)
3604 + goto failed_reg_legacy;
3605 +
3606 set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
3607 cred->thread_keyring = keyring;
3608 cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
3609 id_resolver_cache = cred;
3610 return 0;
3611
3612 +failed_reg_legacy:
3613 + unregister_key_type(&key_type_id_resolver);
3614 failed_put_key:
3615 key_put(keyring);
3616 failed_put_cred:
3617 @@ -222,6 +228,7 @@ static void nfs_idmap_quit_keyring(void)
3618 {
3619 key_revoke(id_resolver_cache->thread_keyring);
3620 unregister_key_type(&key_type_id_resolver);
3621 + unregister_key_type(&key_type_id_resolver_legacy);
3622 put_cred(id_resolver_cache);
3623 }
3624
3625 @@ -385,7 +392,7 @@ static const struct rpc_pipe_ops idmap_upcall_ops = {
3626 };
3627
3628 static struct key_type key_type_id_resolver_legacy = {
3629 - .name = "id_resolver",
3630 + .name = "id_legacy",
3631 .instantiate = user_instantiate,
3632 .match = user_match,
3633 .revoke = user_revoke,
3634 @@ -658,6 +665,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
3635 if (ret < 0)
3636 goto out2;
3637
3638 + BUG_ON(idmap->idmap_key_cons != NULL);
3639 idmap->idmap_key_cons = cons;
3640
3641 ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
3642 @@ -671,8 +679,7 @@ out2:
3643 out1:
3644 kfree(msg);
3645 out0:
3646 - key_revoke(cons->key);
3647 - key_revoke(cons->authkey);
3648 + complete_request_key(cons, ret);
3649 return ret;
3650 }
3651
3652 @@ -706,11 +713,18 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
3653 {
3654 struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
3655 struct idmap *idmap = (struct idmap *)rpci->private;
3656 - struct key_construction *cons = idmap->idmap_key_cons;
3657 + struct key_construction *cons;
3658 struct idmap_msg im;
3659 size_t namelen_in;
3660 int ret;
3661
3662 + /* If instantiation is successful, anyone waiting for key construction
3663 + * will have been woken up and someone else may now have used
3664 + * idmap_key_cons - so after this point we may no longer touch it.
3665 + */
3666 + cons = ACCESS_ONCE(idmap->idmap_key_cons);
3667 + idmap->idmap_key_cons = NULL;
3668 +
3669 if (mlen != sizeof(im)) {
3670 ret = -ENOSPC;
3671 goto out;
3672 @@ -723,7 +737,7 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
3673
3674 if (!(im.im_status & IDMAP_STATUS_SUCCESS)) {
3675 ret = mlen;
3676 - complete_request_key(idmap->idmap_key_cons, -ENOKEY);
3677 + complete_request_key(cons, -ENOKEY);
3678 goto out_incomplete;
3679 }
3680
3681 @@ -740,7 +754,7 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
3682 }
3683
3684 out:
3685 - complete_request_key(idmap->idmap_key_cons, ret);
3686 + complete_request_key(cons, ret);
3687 out_incomplete:
3688 return ret;
3689 }
3690 diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
3691 index 42ac1bf..1afe74c 100644
3692 --- a/fs/nfs/objlayout/objio_osd.c
3693 +++ b/fs/nfs/objlayout/objio_osd.c
3694 @@ -487,8 +487,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
3695 struct objio_state *objios = priv;
3696 struct nfs_write_data *wdata = objios->oir.rpcdata;
3697 pgoff_t index = offset / PAGE_SIZE;
3698 - struct page *page = find_get_page(wdata->inode->i_mapping, index);
3699 + struct page *page;
3700 + loff_t i_size = i_size_read(wdata->inode);
3701
3702 + if (offset >= i_size) {
3703 + *uptodate = true;
3704 + dprintk("%s: g_zero_page index=0x%lx\n", __func__, index);
3705 + return ZERO_PAGE(0);
3706 + }
3707 +
3708 + page = find_get_page(wdata->inode->i_mapping, index);
3709 if (!page) {
3710 page = find_or_create_page(wdata->inode->i_mapping,
3711 index, GFP_NOFS);
3712 @@ -509,8 +517,10 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
3713
3714 static void __r4w_put_page(void *priv, struct page *page)
3715 {
3716 - dprintk("%s: index=0x%lx\n", __func__, page->index);
3717 - page_cache_release(page);
3718 + dprintk("%s: index=0x%lx\n", __func__,
3719 + (page == ZERO_PAGE(0)) ? -1UL : page->index);
3720 + if (ZERO_PAGE(0) != page)
3721 + page_cache_release(page);
3722 return;
3723 }
3724
3725 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
3726 index 74c00bc..283d15e 100644
3727 --- a/fs/nfsd/nfs4xdr.c
3728 +++ b/fs/nfsd/nfs4xdr.c
3729 @@ -2233,7 +2233,7 @@ out_acl:
3730 if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
3731 if ((buflen -= 4) < 0)
3732 goto out_resource;
3733 - WRITE32(1);
3734 + WRITE32(0);
3735 }
3736 if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
3737 if ((buflen -= 4) < 0)
3738 diff --git a/fs/select.c b/fs/select.c
3739 index 17d33d0..0baa0a3 100644
3740 --- a/fs/select.c
3741 +++ b/fs/select.c
3742 @@ -345,8 +345,8 @@ static int max_select_fd(unsigned long n, fd_set_bits *fds)
3743 struct fdtable *fdt;
3744
3745 /* handle last in-complete long-word first */
3746 - set = ~(~0UL << (n & (__NFDBITS-1)));
3747 - n /= __NFDBITS;
3748 + set = ~(~0UL << (n & (BITS_PER_LONG-1)));
3749 + n /= BITS_PER_LONG;
3750 fdt = files_fdtable(current->files);
3751 open_fds = fdt->open_fds + n;
3752 max = 0;
3753 @@ -373,7 +373,7 @@ get_max:
3754 max++;
3755 set >>= 1;
3756 } while (set);
3757 - max += n * __NFDBITS;
3758 + max += n * BITS_PER_LONG;
3759 }
3760
3761 return max;
3762 @@ -435,11 +435,11 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
3763 in = *inp++; out = *outp++; ex = *exp++;
3764 all_bits = in | out | ex;
3765 if (all_bits == 0) {
3766 - i += __NFDBITS;
3767 + i += BITS_PER_LONG;
3768 continue;
3769 }
3770
3771 - for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) {
3772 + for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
3773 int fput_needed;
3774 if (i >= n)
3775 break;
3776 diff --git a/fs/udf/super.c b/fs/udf/super.c
3777 index 8d86a87..e660ffd 100644
3778 --- a/fs/udf/super.c
3779 +++ b/fs/udf/super.c
3780 @@ -1283,7 +1283,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
3781 BUG_ON(ident != TAG_IDENT_LVD);
3782 lvd = (struct logicalVolDesc *)bh->b_data;
3783 table_len = le32_to_cpu(lvd->mapTableLength);
3784 - if (sizeof(*lvd) + table_len > sb->s_blocksize) {
3785 + if (table_len > sb->s_blocksize - sizeof(*lvd)) {
3786 udf_err(sb, "error loading logical volume descriptor: "
3787 "Partition table too long (%u > %lu)\n", table_len,
3788 sb->s_blocksize - sizeof(*lvd));
3789 diff --git a/include/linux/cpu.h b/include/linux/cpu.h
3790 index ee28844..78ed62f 100644
3791 --- a/include/linux/cpu.h
3792 +++ b/include/linux/cpu.h
3793 @@ -75,8 +75,9 @@ enum {
3794 /* migration should happen before other stuff but after perf */
3795 CPU_PRI_PERF = 20,
3796 CPU_PRI_MIGRATION = 10,
3797 - /* prepare workqueues for other notifiers */
3798 - CPU_PRI_WORKQUEUE = 5,
3799 + /* bring up workqueues before normal notifiers and down after */
3800 + CPU_PRI_WORKQUEUE_UP = 5,
3801 + CPU_PRI_WORKQUEUE_DOWN = -5,
3802 };
3803
3804 #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
3805 diff --git a/include/linux/mm.h b/include/linux/mm.h
3806 index 74aa71b..441a564 100644
3807 --- a/include/linux/mm.h
3808 +++ b/include/linux/mm.h
3809 @@ -1595,6 +1595,7 @@ void vmemmap_populate_print_last(void);
3810 enum mf_flags {
3811 MF_COUNT_INCREASED = 1 << 0,
3812 MF_ACTION_REQUIRED = 1 << 1,
3813 + MF_MUST_KILL = 1 << 2,
3814 };
3815 extern int memory_failure(unsigned long pfn, int trapno, int flags);
3816 extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
3817 diff --git a/include/linux/net.h b/include/linux/net.h
3818 index be60c7f..95fea14 100644
3819 --- a/include/linux/net.h
3820 +++ b/include/linux/net.h
3821 @@ -72,6 +72,7 @@ struct net;
3822 #define SOCK_NOSPACE 2
3823 #define SOCK_PASSCRED 3
3824 #define SOCK_PASSSEC 4
3825 +#define SOCK_EXTERNALLY_ALLOCATED 5
3826
3827 #ifndef ARCH_HAS_SOCKET_TYPES
3828 /**
3829 diff --git a/include/linux/posix_types.h b/include/linux/posix_types.h
3830 index f04c98c..988f76e 100644
3831 --- a/include/linux/posix_types.h
3832 +++ b/include/linux/posix_types.h
3833 @@ -15,26 +15,14 @@
3834 */
3835
3836 /*
3837 - * Those macros may have been defined in <gnu/types.h>. But we always
3838 - * use the ones here.
3839 + * This macro may have been defined in <gnu/types.h>. But we always
3840 + * use the one here.
3841 */
3842 -#undef __NFDBITS
3843 -#define __NFDBITS (8 * sizeof(unsigned long))
3844 -
3845 #undef __FD_SETSIZE
3846 #define __FD_SETSIZE 1024
3847
3848 -#undef __FDSET_LONGS
3849 -#define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS)
3850 -
3851 -#undef __FDELT
3852 -#define __FDELT(d) ((d) / __NFDBITS)
3853 -
3854 -#undef __FDMASK
3855 -#define __FDMASK(d) (1UL << ((d) % __NFDBITS))
3856 -
3857 typedef struct {
3858 - unsigned long fds_bits [__FDSET_LONGS];
3859 + unsigned long fds_bits[__FD_SETSIZE / (8 * sizeof(long))];
3860 } __kernel_fd_set;
3861
3862 /* Type of a signal handler. */
3863 diff --git a/include/linux/time.h b/include/linux/time.h
3864 index 33a92ea..8da5129 100644
3865 --- a/include/linux/time.h
3866 +++ b/include/linux/time.h
3867 @@ -258,14 +258,6 @@ static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
3868
3869 #endif /* __KERNEL__ */
3870
3871 -#define NFDBITS __NFDBITS
3872 -
3873 -#define FD_SETSIZE __FD_SETSIZE
3874 -#define FD_SET(fd,fdsetp) __FD_SET(fd,fdsetp)
3875 -#define FD_CLR(fd,fdsetp) __FD_CLR(fd,fdsetp)
3876 -#define FD_ISSET(fd,fdsetp) __FD_ISSET(fd,fdsetp)
3877 -#define FD_ZERO(fdsetp) __FD_ZERO(fdsetp)
3878 -
3879 /*
3880 * Names of the interval timers, and structure
3881 * defining a timer setting:
3882 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
3883 index aaccc5f..3ad5b33 100644
3884 --- a/include/target/target_core_base.h
3885 +++ b/include/target/target_core_base.h
3886 @@ -229,6 +229,7 @@ enum tcm_sense_reason_table {
3887 TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
3888 TCM_CHECK_CONDITION_NOT_READY = 0x0f,
3889 TCM_RESERVATION_CONFLICT = 0x10,
3890 + TCM_ADDRESS_OUT_OF_RANGE = 0x11,
3891 };
3892
3893 enum target_sc_flags_table {
3894 diff --git a/kernel/exit.c b/kernel/exit.c
3895 index 9d81012..bfbd856 100644
3896 --- a/kernel/exit.c
3897 +++ b/kernel/exit.c
3898 @@ -471,7 +471,7 @@ static void close_files(struct files_struct * files)
3899 rcu_read_unlock();
3900 for (;;) {
3901 unsigned long set;
3902 - i = j * __NFDBITS;
3903 + i = j * BITS_PER_LONG;
3904 if (i >= fdt->max_fds)
3905 break;
3906 set = fdt->open_fds[j++];
3907 diff --git a/kernel/futex.c b/kernel/futex.c
3908 index e2b0fb9..3717e7b 100644
3909 --- a/kernel/futex.c
3910 +++ b/kernel/futex.c
3911 @@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
3912 * @uaddr2: the pi futex we will take prior to returning to user-space
3913 *
3914 * The caller will wait on uaddr and will be requeued by futex_requeue() to
3915 - * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
3916 - * complete the acquisition of the rt_mutex prior to returning to userspace.
3917 - * This ensures the rt_mutex maintains an owner when it has waiters; without
3918 - * one, the pi logic wouldn't know which task to boost/deboost, if there was a
3919 - * need to.
3920 + * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
3921 + * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
3922 + * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
3923 + * without one, the pi logic would not know which task to boost/deboost, if
3924 + * there was a need to.
3925 *
3926 * We call schedule in futex_wait_queue_me() when we enqueue and return there
3927 * via the following:
3928 @@ -2272,6 +2272,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3929 struct futex_q q = futex_q_init;
3930 int res, ret;
3931
3932 + if (uaddr == uaddr2)
3933 + return -EINVAL;
3934 +
3935 if (!bitset)
3936 return -EINVAL;
3937
3938 @@ -2343,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3939 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
3940 * the pi_state.
3941 */
3942 - WARN_ON(!&q.pi_state);
3943 + WARN_ON(!q.pi_state);
3944 pi_mutex = &q.pi_state->pi_mutex;
3945 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
3946 debug_rt_mutex_free_waiter(&rt_waiter);
3947 @@ -2370,7 +2373,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3948 * fault, unlock the rt_mutex and return the fault to userspace.
3949 */
3950 if (ret == -EFAULT) {
3951 - if (rt_mutex_owner(pi_mutex) == current)
3952 + if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
3953 rt_mutex_unlock(pi_mutex);
3954 } else if (ret == -EINTR) {
3955 /*
3956 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
3957 index e09dfbf..52a1817 100644
3958 --- a/kernel/power/hibernate.c
3959 +++ b/kernel/power/hibernate.c
3960 @@ -352,6 +352,7 @@ int hibernation_snapshot(int platform_mode)
3961 }
3962
3963 suspend_console();
3964 + ftrace_stop();
3965 pm_restrict_gfp_mask();
3966
3967 error = dpm_suspend(PMSG_FREEZE);
3968 @@ -377,6 +378,7 @@ int hibernation_snapshot(int platform_mode)
3969 if (error || !in_suspend)
3970 pm_restore_gfp_mask();
3971
3972 + ftrace_start();
3973 resume_console();
3974 dpm_complete(msg);
3975
3976 @@ -479,6 +481,7 @@ int hibernation_restore(int platform_mode)
3977
3978 pm_prepare_console();
3979 suspend_console();
3980 + ftrace_stop();
3981 pm_restrict_gfp_mask();
3982 error = dpm_suspend_start(PMSG_QUIESCE);
3983 if (!error) {
3984 @@ -486,6 +489,7 @@ int hibernation_restore(int platform_mode)
3985 dpm_resume_end(PMSG_RECOVER);
3986 }
3987 pm_restore_gfp_mask();
3988 + ftrace_start();
3989 resume_console();
3990 pm_restore_console();
3991 return error;
3992 @@ -512,6 +516,7 @@ int hibernation_platform_enter(void)
3993
3994 entering_platform_hibernation = true;
3995 suspend_console();
3996 + ftrace_stop();
3997 error = dpm_suspend_start(PMSG_HIBERNATE);
3998 if (error) {
3999 if (hibernation_ops->recover)
4000 @@ -555,6 +560,7 @@ int hibernation_platform_enter(void)
4001 Resume_devices:
4002 entering_platform_hibernation = false;
4003 dpm_resume_end(PMSG_RESTORE);
4004 + ftrace_start();
4005 resume_console();
4006
4007 Close:
4008 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
4009 index 396d262..c8b7446 100644
4010 --- a/kernel/power/suspend.c
4011 +++ b/kernel/power/suspend.c
4012 @@ -24,6 +24,7 @@
4013 #include <linux/export.h>
4014 #include <linux/suspend.h>
4015 #include <linux/syscore_ops.h>
4016 +#include <linux/ftrace.h>
4017 #include <trace/events/power.h>
4018
4019 #include "power.h"
4020 @@ -212,6 +213,7 @@ int suspend_devices_and_enter(suspend_state_t state)
4021 goto Close;
4022 }
4023 suspend_console();
4024 + ftrace_stop();
4025 suspend_test_start();
4026 error = dpm_suspend_start(PMSG_SUSPEND);
4027 if (error) {
4028 @@ -231,6 +233,7 @@ int suspend_devices_and_enter(suspend_state_t state)
4029 suspend_test_start();
4030 dpm_resume_end(PMSG_RESUME);
4031 suspend_test_finish("resume devices");
4032 + ftrace_start();
4033 resume_console();
4034 Close:
4035 if (suspend_ops->end)
4036 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
4037 index 7da267c..bfe3f8a 100644
4038 --- a/kernel/workqueue.c
4039 +++ b/kernel/workqueue.c
4040 @@ -3582,6 +3582,41 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
4041 return notifier_from_errno(0);
4042 }
4043
4044 +/*
4045 + * Workqueues should be brought up before normal priority CPU notifiers.
4046 + * This will be registered high priority CPU notifier.
4047 + */
4048 +static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
4049 + unsigned long action,
4050 + void *hcpu)
4051 +{
4052 + switch (action & ~CPU_TASKS_FROZEN) {
4053 + case CPU_UP_PREPARE:
4054 + case CPU_UP_CANCELED:
4055 + case CPU_DOWN_FAILED:
4056 + case CPU_ONLINE:
4057 + return workqueue_cpu_callback(nfb, action, hcpu);
4058 + }
4059 + return NOTIFY_OK;
4060 +}
4061 +
4062 +/*
4063 + * Workqueues should be brought down after normal priority CPU notifiers.
4064 + * This will be registered as low priority CPU notifier.
4065 + */
4066 +static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
4067 + unsigned long action,
4068 + void *hcpu)
4069 +{
4070 + switch (action & ~CPU_TASKS_FROZEN) {
4071 + case CPU_DOWN_PREPARE:
4072 + case CPU_DYING:
4073 + case CPU_POST_DEAD:
4074 + return workqueue_cpu_callback(nfb, action, hcpu);
4075 + }
4076 + return NOTIFY_OK;
4077 +}
4078 +
4079 #ifdef CONFIG_SMP
4080
4081 struct work_for_cpu {
4082 @@ -3775,7 +3810,8 @@ static int __init init_workqueues(void)
4083 unsigned int cpu;
4084 int i;
4085
4086 - cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
4087 + cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
4088 + cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
4089
4090 /* initialize gcwqs */
4091 for_each_gcwq_cpu(cpu) {
4092 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
4093 index 97cc273..0de20d7 100644
4094 --- a/mm/memory-failure.c
4095 +++ b/mm/memory-failure.c
4096 @@ -345,14 +345,14 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
4097 * Also when FAIL is set do a force kill because something went
4098 * wrong earlier.
4099 */
4100 -static void kill_procs(struct list_head *to_kill, int doit, int trapno,
4101 +static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
4102 int fail, struct page *page, unsigned long pfn,
4103 int flags)
4104 {
4105 struct to_kill *tk, *next;
4106
4107 list_for_each_entry_safe (tk, next, to_kill, nd) {
4108 - if (doit) {
4109 + if (forcekill) {
4110 /*
4111 * In case something went wrong with munmapping
4112 * make sure the process doesn't catch the
4113 @@ -858,7 +858,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
4114 struct address_space *mapping;
4115 LIST_HEAD(tokill);
4116 int ret;
4117 - int kill = 1;
4118 + int kill = 1, forcekill;
4119 struct page *hpage = compound_head(p);
4120 struct page *ppage;
4121
4122 @@ -888,7 +888,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
4123 * be called inside page lock (it's recommended but not enforced).
4124 */
4125 mapping = page_mapping(hpage);
4126 - if (!PageDirty(hpage) && mapping &&
4127 + if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
4128 mapping_cap_writeback_dirty(mapping)) {
4129 if (page_mkclean(hpage)) {
4130 SetPageDirty(hpage);
4131 @@ -965,12 +965,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
4132 * Now that the dirty bit has been propagated to the
4133 * struct page and all unmaps done we can decide if
4134 * killing is needed or not. Only kill when the page
4135 - * was dirty, otherwise the tokill list is merely
4136 + * was dirty or the process is not restartable,
4137 + * otherwise the tokill list is merely
4138 * freed. When there was a problem unmapping earlier
4139 * use a more force-full uncatchable kill to prevent
4140 * any accesses to the poisoned memory.
4141 */
4142 - kill_procs(&tokill, !!PageDirty(ppage), trapno,
4143 + forcekill = PageDirty(ppage) || (flags & MF_MUST_KILL);
4144 + kill_procs(&tokill, forcekill, trapno,
4145 ret != SWAP_SUCCESS, p, pfn, flags);
4146
4147 return ret;
4148 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
4149 index efea35b..cf4a49c 100644
4150 --- a/net/8021q/vlan.c
4151 +++ b/net/8021q/vlan.c
4152 @@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
4153 break;
4154
4155 case NETDEV_DOWN:
4156 + if (dev->features & NETIF_F_HW_VLAN_FILTER)
4157 + vlan_vid_del(dev, 0);
4158 +
4159 /* Put all VLANs for this dev in the down state too. */
4160 for (i = 0; i < VLAN_N_VID; i++) {
4161 vlandev = vlan_group_get_device(grp, i);
4162 diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
4163 index aa6f716..7bf4c21 100644
4164 --- a/net/caif/caif_dev.c
4165 +++ b/net/caif/caif_dev.c
4166 @@ -562,9 +562,9 @@ static int __init caif_device_init(void)
4167
4168 static void __exit caif_device_exit(void)
4169 {
4170 - unregister_pernet_subsys(&caif_net_ops);
4171 unregister_netdevice_notifier(&caif_device_notifier);
4172 dev_remove_pack(&caif_packet_type);
4173 + unregister_pernet_subsys(&caif_net_ops);
4174 }
4175
4176 module_init(caif_device_init);
4177 diff --git a/net/compat.c b/net/compat.c
4178 index e055708..ae6d67a 100644
4179 --- a/net/compat.c
4180 +++ b/net/compat.c
4181 @@ -221,6 +221,8 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
4182 {
4183 struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
4184 struct compat_cmsghdr cmhdr;
4185 + struct compat_timeval ctv;
4186 + struct compat_timespec cts[3];
4187 int cmlen;
4188
4189 if (cm == NULL || kmsg->msg_controllen < sizeof(*cm)) {
4190 @@ -229,8 +231,6 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
4191 }
4192
4193 if (!COMPAT_USE_64BIT_TIME) {
4194 - struct compat_timeval ctv;
4195 - struct compat_timespec cts[3];
4196 if (level == SOL_SOCKET && type == SCM_TIMESTAMP) {
4197 struct timeval *tv = (struct timeval *)data;
4198 ctv.tv_sec = tv->tv_sec;
4199 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
4200 index 90430b7..b8052ba 100644
4201 --- a/net/core/rtnetlink.c
4202 +++ b/net/core/rtnetlink.c
4203 @@ -671,6 +671,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
4204 }
4205 }
4206
4207 +static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
4208 +{
4209 + return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
4210 + (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
4211 +}
4212 +
4213 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
4214 const struct ifinfomsg *ifm)
4215 {
4216 @@ -679,7 +685,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
4217 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
4218 if (ifm->ifi_change)
4219 flags = (flags & ifm->ifi_change) |
4220 - (dev->flags & ~ifm->ifi_change);
4221 + (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
4222
4223 return flags;
4224 }
4225 diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
4226 index c48adc5..667c1d4 100644
4227 --- a/net/ipv4/cipso_ipv4.c
4228 +++ b/net/ipv4/cipso_ipv4.c
4229 @@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
4230 case CIPSO_V4_TAG_LOCAL:
4231 /* This is a non-standard tag that we only allow for
4232 * local connections, so if the incoming interface is
4233 - * not the loopback device drop the packet. */
4234 - if (!(skb->dev->flags & IFF_LOOPBACK)) {
4235 + * not the loopback device drop the packet. Further,
4236 + * there is no legitimate reason for setting this from
4237 + * userspace so reject it if skb is NULL. */
4238 + if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
4239 err_offset = opt_iter;
4240 goto validate_return_locked;
4241 }
4242 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
4243 index 6589e11..d6feb1e 100644
4244 --- a/net/ipv4/tcp.c
4245 +++ b/net/ipv4/tcp.c
4246 @@ -2408,7 +2408,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
4247 /* Cap the max timeout in ms TCP will retry/retrans
4248 * before giving up and aborting (ETIMEDOUT) a connection.
4249 */
4250 - icsk->icsk_user_timeout = msecs_to_jiffies(val);
4251 + if (val < 0)
4252 + err = -EINVAL;
4253 + else
4254 + icsk->icsk_user_timeout = msecs_to_jiffies(val);
4255 break;
4256 default:
4257 err = -ENOPROTOOPT;
4258 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
4259 index 257b617..56a9c8d 100644
4260 --- a/net/ipv4/tcp_input.c
4261 +++ b/net/ipv4/tcp_input.c
4262 @@ -5441,7 +5441,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4263 if (tp->copied_seq == tp->rcv_nxt &&
4264 len - tcp_header_len <= tp->ucopy.len) {
4265 #ifdef CONFIG_NET_DMA
4266 - if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
4267 + if (tp->ucopy.task == current &&
4268 + sock_owned_by_user(sk) &&
4269 + tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
4270 copied_early = 1;
4271 eaten = 1;
4272 }
4273 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
4274 index d132b98..25be683 100644
4275 --- a/net/mac80211/mlme.c
4276 +++ b/net/mac80211/mlme.c
4277 @@ -1813,7 +1813,8 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
4278 if (status_code != WLAN_STATUS_SUCCESS) {
4279 printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
4280 sdata->name, mgmt->sa, status_code);
4281 - goto out;
4282 + ieee80211_destroy_auth_data(sdata, false);
4283 + return RX_MGMT_CFG80211_RX_AUTH;
4284 }
4285
4286 switch (ifmgd->auth_data->algorithm) {
4287 @@ -1835,7 +1836,6 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
4288 }
4289
4290 printk(KERN_DEBUG "%s: authenticated\n", sdata->name);
4291 - out:
4292 ifmgd->auth_data->done = true;
4293 ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
4294 run_again(ifmgd, ifmgd->auth_data->timeout);
4295 diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
4296 index ebd2296..992acaa 100644
4297 --- a/net/sched/sch_netem.c
4298 +++ b/net/sched/sch_netem.c
4299 @@ -329,29 +329,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
4300 return PSCHED_NS2TICKS(ticks);
4301 }
4302
4303 -static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
4304 +static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
4305 {
4306 struct sk_buff_head *list = &sch->q;
4307 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
4308 - struct sk_buff *skb;
4309 -
4310 - if (likely(skb_queue_len(list) < sch->limit)) {
4311 - skb = skb_peek_tail(list);
4312 - /* Optimize for add at tail */
4313 - if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
4314 - return qdisc_enqueue_tail(nskb, sch);
4315 + struct sk_buff *skb = skb_peek_tail(list);
4316
4317 - skb_queue_reverse_walk(list, skb) {
4318 - if (tnext >= netem_skb_cb(skb)->time_to_send)
4319 - break;
4320 - }
4321 + /* Optimize for add at tail */
4322 + if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
4323 + return __skb_queue_tail(list, nskb);
4324
4325 - __skb_queue_after(list, skb, nskb);
4326 - sch->qstats.backlog += qdisc_pkt_len(nskb);
4327 - return NET_XMIT_SUCCESS;
4328 + skb_queue_reverse_walk(list, skb) {
4329 + if (tnext >= netem_skb_cb(skb)->time_to_send)
4330 + break;
4331 }
4332
4333 - return qdisc_reshape_fail(nskb, sch);
4334 + __skb_queue_after(list, skb, nskb);
4335 }
4336
4337 /*
4338 @@ -366,7 +359,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
4339 /* We don't fill cb now as skb_unshare() may invalidate it */
4340 struct netem_skb_cb *cb;
4341 struct sk_buff *skb2;
4342 - int ret;
4343 int count = 1;
4344
4345 /* Random duplication */
4346 @@ -414,6 +406,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
4347 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
4348 }
4349
4350 + if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
4351 + return qdisc_reshape_fail(skb, sch);
4352 +
4353 + sch->qstats.backlog += qdisc_pkt_len(skb);
4354 +
4355 cb = netem_skb_cb(skb);
4356 if (q->gap == 0 || /* not doing reordering */
4357 q->counter < q->gap - 1 || /* inside last reordering gap */
4358 @@ -445,7 +442,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
4359
4360 cb->time_to_send = now + delay;
4361 ++q->counter;
4362 - ret = tfifo_enqueue(skb, sch);
4363 + tfifo_enqueue(skb, sch);
4364 } else {
4365 /*
4366 * Do re-ordering by putting one out of N packets at the front
4367 @@ -455,16 +452,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
4368 q->counter = 0;
4369
4370 __skb_queue_head(&sch->q, skb);
4371 - sch->qstats.backlog += qdisc_pkt_len(skb);
4372 sch->qstats.requeues++;
4373 - ret = NET_XMIT_SUCCESS;
4374 - }
4375 -
4376 - if (ret != NET_XMIT_SUCCESS) {
4377 - if (net_xmit_drop_count(ret)) {
4378 - sch->qstats.drops++;
4379 - return ret;
4380 - }
4381 }
4382
4383 return NET_XMIT_SUCCESS;
4384 diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
4385 index d7eea99..c6a5867 100644
4386 --- a/net/sched/sch_sfb.c
4387 +++ b/net/sched/sch_sfb.c
4388 @@ -570,6 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
4389
4390 sch->qstats.backlog = q->qdisc->qstats.backlog;
4391 opts = nla_nest_start(skb, TCA_OPTIONS);
4392 + if (opts == NULL)
4393 + goto nla_put_failure;
4394 NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
4395 return nla_nest_end(skb, opts);
4396
4397 diff --git a/net/sctp/input.c b/net/sctp/input.c
4398 index 80f71af..be772c0 100644
4399 --- a/net/sctp/input.c
4400 +++ b/net/sctp/input.c
4401 @@ -736,15 +736,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
4402
4403 epb = &ep->base;
4404
4405 - if (hlist_unhashed(&epb->node))
4406 - return;
4407 -
4408 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
4409
4410 head = &sctp_ep_hashtable[epb->hashent];
4411
4412 sctp_write_lock(&head->lock);
4413 - __hlist_del(&epb->node);
4414 + hlist_del_init(&epb->node);
4415 sctp_write_unlock(&head->lock);
4416 }
4417
4418 @@ -825,7 +822,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
4419 head = &sctp_assoc_hashtable[epb->hashent];
4420
4421 sctp_write_lock(&head->lock);
4422 - __hlist_del(&epb->node);
4423 + hlist_del_init(&epb->node);
4424 sctp_write_unlock(&head->lock);
4425 }
4426
4427 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
4428 index 92ba71d..dba20d6 100644
4429 --- a/net/sctp/socket.c
4430 +++ b/net/sctp/socket.c
4431 @@ -1231,8 +1231,14 @@ out_free:
4432 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
4433 " kaddrs: %p err: %d\n",
4434 asoc, kaddrs, err);
4435 - if (asoc)
4436 + if (asoc) {
4437 + /* sctp_primitive_ASSOCIATE may have added this association
4438 + * To the hash table, try to unhash it, just in case, its a noop
4439 + * if it wasn't hashed so we're safe
4440 + */
4441 + sctp_unhash_established(asoc);
4442 sctp_association_free(asoc);
4443 + }
4444 return err;
4445 }
4446
4447 @@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
4448 goto out_unlock;
4449
4450 out_free:
4451 - if (new_asoc)
4452 + if (new_asoc) {
4453 + sctp_unhash_established(asoc);
4454 sctp_association_free(asoc);
4455 + }
4456 out_unlock:
4457 sctp_release_sock(sk);
4458
4459 diff --git a/net/socket.c b/net/socket.c
4460 index 851edcd..573b261 100644
4461 --- a/net/socket.c
4462 +++ b/net/socket.c
4463 @@ -522,6 +522,9 @@ void sock_release(struct socket *sock)
4464 if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
4465 printk(KERN_ERR "sock_release: fasync list not empty!\n");
4466
4467 + if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags))
4468 + return;
4469 +
4470 percpu_sub(sockets_in_use, 1);
4471 if (!sock->file) {
4472 iput(SOCK_INODE(sock));
4473 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
4474 index 994cfea..eda32ae 100644
4475 --- a/net/sunrpc/sched.c
4476 +++ b/net/sunrpc/sched.c
4477 @@ -790,7 +790,9 @@ void rpc_execute(struct rpc_task *task)
4478
4479 static void rpc_async_schedule(struct work_struct *work)
4480 {
4481 + current->flags |= PF_FSTRANS;
4482 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
4483 + current->flags &= ~PF_FSTRANS;
4484 }
4485
4486 /**
4487 diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
4488 index b446e10..06cdbff 100644
4489 --- a/net/sunrpc/xprtrdma/transport.c
4490 +++ b/net/sunrpc/xprtrdma/transport.c
4491 @@ -200,6 +200,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
4492 int rc = 0;
4493
4494 if (!xprt->shutdown) {
4495 + current->flags |= PF_FSTRANS;
4496 xprt_clear_connected(xprt);
4497
4498 dprintk("RPC: %s: %sconnect\n", __func__,
4499 @@ -212,10 +213,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
4500
4501 out:
4502 xprt_wake_pending_tasks(xprt, rc);
4503 -
4504 out_clear:
4505 dprintk("RPC: %s: exit\n", __func__);
4506 xprt_clear_connecting(xprt);
4507 + current->flags &= ~PF_FSTRANS;
4508 }
4509
4510 /*
4511 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
4512 index 890b03f..b88c6bf 100644
4513 --- a/net/sunrpc/xprtsock.c
4514 +++ b/net/sunrpc/xprtsock.c
4515 @@ -1895,6 +1895,8 @@ static void xs_local_setup_socket(struct work_struct *work)
4516 if (xprt->shutdown)
4517 goto out;
4518
4519 + current->flags |= PF_FSTRANS;
4520 +
4521 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
4522 status = __sock_create(xprt->xprt_net, AF_LOCAL,
4523 SOCK_STREAM, 0, &sock, 1);
4524 @@ -1928,6 +1930,7 @@ static void xs_local_setup_socket(struct work_struct *work)
4525 out:
4526 xprt_clear_connecting(xprt);
4527 xprt_wake_pending_tasks(xprt, status);
4528 + current->flags &= ~PF_FSTRANS;
4529 }
4530
4531 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
4532 @@ -1970,6 +1973,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
4533 if (xprt->shutdown)
4534 goto out;
4535
4536 + current->flags |= PF_FSTRANS;
4537 +
4538 /* Start by resetting any existing state */
4539 xs_reset_transport(transport);
4540 sock = xs_create_sock(xprt, transport,
4541 @@ -1988,6 +1993,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
4542 out:
4543 xprt_clear_connecting(xprt);
4544 xprt_wake_pending_tasks(xprt, status);
4545 + current->flags &= ~PF_FSTRANS;
4546 }
4547
4548 /*
4549 @@ -2113,6 +2119,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
4550 if (xprt->shutdown)
4551 goto out;
4552
4553 + current->flags |= PF_FSTRANS;
4554 +
4555 if (!sock) {
4556 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
4557 sock = xs_create_sock(xprt, transport,
4558 @@ -2162,6 +2170,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
4559 case -EINPROGRESS:
4560 case -EALREADY:
4561 xprt_clear_connecting(xprt);
4562 + current->flags &= ~PF_FSTRANS;
4563 return;
4564 case -EINVAL:
4565 /* Happens, for instance, if the user specified a link
4566 @@ -2174,6 +2183,7 @@ out_eagain:
4567 out:
4568 xprt_clear_connecting(xprt);
4569 xprt_wake_pending_tasks(xprt, status);
4570 + current->flags &= ~PF_FSTRANS;
4571 }
4572
4573 /**
4574 diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
4575 index 788a12c..2ab7850 100644
4576 --- a/net/wanrouter/wanmain.c
4577 +++ b/net/wanrouter/wanmain.c
4578 @@ -602,36 +602,31 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
4579 * successfully, add it to the interface list.
4580 */
4581
4582 - if (dev->name == NULL) {
4583 - err = -EINVAL;
4584 - } else {
4585 +#ifdef WANDEBUG
4586 + printk(KERN_INFO "%s: registering interface %s...\n",
4587 + wanrouter_modname, dev->name);
4588 +#endif
4589
4590 - #ifdef WANDEBUG
4591 - printk(KERN_INFO "%s: registering interface %s...\n",
4592 - wanrouter_modname, dev->name);
4593 - #endif
4594 -
4595 - err = register_netdev(dev);
4596 - if (!err) {
4597 - struct net_device *slave = NULL;
4598 - unsigned long smp_flags=0;
4599 -
4600 - lock_adapter_irq(&wandev->lock, &smp_flags);
4601 -
4602 - if (wandev->dev == NULL) {
4603 - wandev->dev = dev;
4604 - } else {
4605 - for (slave=wandev->dev;
4606 - DEV_TO_SLAVE(slave);
4607 - slave = DEV_TO_SLAVE(slave))
4608 - DEV_TO_SLAVE(slave) = dev;
4609 - }
4610 - ++wandev->ndev;
4611 -
4612 - unlock_adapter_irq(&wandev->lock, &smp_flags);
4613 - err = 0; /* done !!! */
4614 - goto out;
4615 + err = register_netdev(dev);
4616 + if (!err) {
4617 + struct net_device *slave = NULL;
4618 + unsigned long smp_flags=0;
4619 +
4620 + lock_adapter_irq(&wandev->lock, &smp_flags);
4621 +
4622 + if (wandev->dev == NULL) {
4623 + wandev->dev = dev;
4624 + } else {
4625 + for (slave=wandev->dev;
4626 + DEV_TO_SLAVE(slave);
4627 + slave = DEV_TO_SLAVE(slave))
4628 + DEV_TO_SLAVE(slave) = dev;
4629 }
4630 + ++wandev->ndev;
4631 +
4632 + unlock_adapter_irq(&wandev->lock, &smp_flags);
4633 + err = 0; /* done !!! */
4634 + goto out;
4635 }
4636 if (wandev->del_if)
4637 wandev->del_if(wandev, dev);
4638 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
4639 index d85b793..5626222 100644
4640 --- a/security/selinux/hooks.c
4641 +++ b/security/selinux/hooks.c
4642 @@ -2162,7 +2162,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
4643 int fd;
4644
4645 j++;
4646 - i = j * __NFDBITS;
4647 + i = j * BITS_PER_LONG;
4648 fdt = files_fdtable(files);
4649 if (i >= fdt->max_fds)
4650 break;
4651 diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c
4652 index 1cff331..4608c2c 100644
4653 --- a/sound/drivers/mpu401/mpu401_uart.c
4654 +++ b/sound/drivers/mpu401/mpu401_uart.c
4655 @@ -554,6 +554,7 @@ int snd_mpu401_uart_new(struct snd_card *card, int device,
4656 spin_lock_init(&mpu->output_lock);
4657 spin_lock_init(&mpu->timer_lock);
4658 mpu->hardware = hardware;
4659 + mpu->irq = -1;
4660 if (! (info_flags & MPU401_INFO_INTEGRATED)) {
4661 int res_size = hardware == MPU401_HW_PC98II ? 4 : 2;
4662 mpu->res = request_region(port, res_size, "MPU401 UART");
4663 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4664 index 83f345f..d1b805a 100644
4665 --- a/sound/pci/hda/patch_hdmi.c
4666 +++ b/sound/pci/hda/patch_hdmi.c
4667 @@ -876,7 +876,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
4668 struct hdmi_spec_per_pin *per_pin;
4669 struct hdmi_eld *eld;
4670 struct hdmi_spec_per_cvt *per_cvt = NULL;
4671 - int pinctl;
4672
4673 /* Validate hinfo */
4674 pin_idx = hinfo_to_pin_index(spec, hinfo);
4675 @@ -912,11 +911,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
4676 snd_hda_codec_write(codec, per_pin->pin_nid, 0,
4677 AC_VERB_SET_CONNECT_SEL,
4678 mux_idx);
4679 - pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
4680 - AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
4681 - snd_hda_codec_write(codec, per_pin->pin_nid, 0,
4682 - AC_VERB_SET_PIN_WIDGET_CONTROL,
4683 - pinctl | PIN_OUT);
4684 snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
4685
4686 /* Initially set the converter's capabilities */
4687 @@ -1153,11 +1147,17 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
4688 struct hdmi_spec *spec = codec->spec;
4689 int pin_idx = hinfo_to_pin_index(spec, hinfo);
4690 hda_nid_t pin_nid = spec->pins[pin_idx].pin_nid;
4691 + int pinctl;
4692
4693 hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
4694
4695 hdmi_setup_audio_infoframe(codec, pin_idx, substream);
4696
4697 + pinctl = snd_hda_codec_read(codec, pin_nid, 0,
4698 + AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
4699 + snd_hda_codec_write(codec, pin_nid, 0,
4700 + AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl | PIN_OUT);
4701 +
4702 return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
4703 }
4704
4705 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4706 index c43264f..62e1627 100644
4707 --- a/sound/pci/hda/patch_realtek.c
4708 +++ b/sound/pci/hda/patch_realtek.c
4709 @@ -5858,6 +5858,15 @@ static int alc269_resume(struct hda_codec *codec)
4710 }
4711 #endif /* CONFIG_PM */
4712
4713 +static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
4714 + const struct alc_fixup *fix, int action)
4715 +{
4716 + struct alc_spec *spec = codec->spec;
4717 +
4718 + if (action == ALC_FIXUP_ACT_PRE_PROBE)
4719 + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
4720 +}
4721 +
4722 static void alc269_fixup_hweq(struct hda_codec *codec,
4723 const struct alc_fixup *fix, int action)
4724 {
4725 @@ -5984,6 +5993,8 @@ enum {
4726 ALC269VB_FIXUP_AMIC,
4727 ALC269VB_FIXUP_DMIC,
4728 ALC269_FIXUP_MIC2_MUTE_LED,
4729 + ALC269_FIXUP_LENOVO_DOCK,
4730 + ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
4731 };
4732
4733 static const struct alc_fixup alc269_fixups[] = {
4734 @@ -6108,6 +6119,20 @@ static const struct alc_fixup alc269_fixups[] = {
4735 .type = ALC_FIXUP_FUNC,
4736 .v.func = alc269_fixup_mic2_mute,
4737 },
4738 + [ALC269_FIXUP_LENOVO_DOCK] = {
4739 + .type = ALC_FIXUP_PINS,
4740 + .v.pins = (const struct alc_pincfg[]) {
4741 + { 0x19, 0x23a11040 }, /* dock mic */
4742 + { 0x1b, 0x2121103f }, /* dock headphone */
4743 + { }
4744 + },
4745 + .chained = true,
4746 + .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT
4747 + },
4748 + [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
4749 + .type = ALC_FIXUP_FUNC,
4750 + .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
4751 + },
4752 };
4753
4754 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4755 @@ -6131,6 +6156,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4756 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
4757 SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
4758 SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
4759 + SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
4760 + SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
4761 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
4762 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
4763 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
4764 @@ -6189,6 +6216,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4765 static const struct alc_model_fixup alc269_fixup_models[] = {
4766 {.id = ALC269_FIXUP_AMIC, .name = "laptop-amic"},
4767 {.id = ALC269_FIXUP_DMIC, .name = "laptop-dmic"},
4768 + {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
4769 {}
4770 };
4771
4772 @@ -6977,6 +7005,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
4773 { .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
4774 { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
4775 { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
4776 + { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
4777 { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
4778 .patch = patch_alc861 },
4779 { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
4780 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
4781 index 7494fbc..fd53312 100644
4782 --- a/sound/pci/hda/patch_sigmatel.c
4783 +++ b/sound/pci/hda/patch_sigmatel.c
4784 @@ -100,6 +100,8 @@ enum {
4785 STAC_92HD83XXX_HP_cNB11_INTQUAD,
4786 STAC_HP_DV7_4000,
4787 STAC_HP_ZEPHYR,
4788 + STAC_92HD83XXX_HP_LED,
4789 + STAC_92HD83XXX_HP_INV_LED,
4790 STAC_92HD83XXX_MODELS
4791 };
4792
4793 @@ -1672,6 +1674,8 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = {
4794 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad",
4795 [STAC_HP_DV7_4000] = "hp-dv7-4000",
4796 [STAC_HP_ZEPHYR] = "hp-zephyr",
4797 + [STAC_92HD83XXX_HP_LED] = "hp-led",
4798 + [STAC_92HD83XXX_HP_INV_LED] = "hp-inv-led",
4799 };
4800
4801 static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
4802 @@ -1726,6 +1730,8 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
4803 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
4804 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3561,
4805 "HP", STAC_HP_ZEPHYR),
4806 + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3660,
4807 + "HP Mini", STAC_92HD83XXX_HP_LED),
4808 {} /* terminator */
4809 };
4810
4811 @@ -4431,7 +4437,13 @@ static int stac92xx_init(struct hda_codec *codec)
4812 snd_hda_jack_report_sync(codec);
4813
4814 /* sync mute LED */
4815 - snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
4816 + if (spec->gpio_led) {
4817 + if (spec->vmaster_mute.hook)
4818 + snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
4819 + else /* the very first init call doesn't have vmaster yet */
4820 + stac92xx_update_led_status(codec, false);
4821 + }
4822 +
4823 if (spec->dac_list)
4824 stac92xx_power_down(codec);
4825 return 0;
4826 @@ -5528,6 +5540,7 @@ static void stac92hd8x_fill_auto_spec(struct hda_codec *codec)
4827 static int patch_stac92hd83xxx(struct hda_codec *codec)
4828 {
4829 struct sigmatel_spec *spec;
4830 + int default_polarity = -1; /* no default cfg */
4831 int err;
4832
4833 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
4834 @@ -5576,9 +5589,15 @@ again:
4835 case STAC_HP_ZEPHYR:
4836 spec->init = stac92hd83xxx_hp_zephyr_init;
4837 break;
4838 + case STAC_92HD83XXX_HP_LED:
4839 + default_polarity = 0;
4840 + break;
4841 + case STAC_92HD83XXX_HP_INV_LED:
4842 + default_polarity = 1;
4843 + break;
4844 }
4845
4846 - if (find_mute_led_cfg(codec, -1/*no default cfg*/))
4847 + if (find_mute_led_cfg(codec, default_polarity))
4848 snd_printd("mute LED gpio %d polarity %d\n",
4849 spec->gpio_led,
4850 spec->gpio_led_polarity);
4851 diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
4852 index 06214fd..3998d09b 100644
4853 --- a/sound/pci/hda/patch_via.c
4854 +++ b/sound/pci/hda/patch_via.c
4855 @@ -3233,7 +3233,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
4856 {
4857 struct via_spec *spec = codec->spec;
4858 int imux_is_smixer;
4859 - unsigned int parm;
4860 + unsigned int parm, parm2;
4861 /* MUX6 (1eh) = stereo mixer */
4862 imux_is_smixer =
4863 snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 5;
4864 @@ -3256,7 +3256,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
4865 parm = AC_PWRST_D3;
4866 set_pin_power_state(codec, 0x27, &parm);
4867 update_power_state(codec, 0x1a, parm);
4868 - update_power_state(codec, 0xb, parm);
4869 + parm2 = parm; /* for pin 0x0b */
4870
4871 /* PW2 (26h), AOW2 (ah) */
4872 parm = AC_PWRST_D3;
4873 @@ -3271,6 +3271,9 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
4874 if (!spec->hp_independent_mode) /* check for redirected HP */
4875 set_pin_power_state(codec, 0x28, &parm);
4876 update_power_state(codec, 0x8, parm);
4877 + if (!spec->hp_independent_mode && parm2 != AC_PWRST_D3)
4878 + parm = parm2;
4879 + update_power_state(codec, 0xb, parm);
4880 /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */
4881 update_power_state(codec, 0x21, imux_is_smixer ? AC_PWRST_D0 : parm);
4882
4883 diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
4884 index 15d467f..96f6f9f 100644
4885 --- a/sound/soc/codecs/wm8962.c
4886 +++ b/sound/soc/codecs/wm8962.c
4887 @@ -2488,6 +2488,9 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec,
4888 /* VMID 2*250k */
4889 snd_soc_update_bits(codec, WM8962_PWR_MGMT_1,
4890 WM8962_VMID_SEL_MASK, 0x100);
4891 +
4892 + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
4893 + msleep(100);
4894 break;
4895
4896 case SND_SOC_BIAS_OFF:
4897 @@ -3710,6 +3713,9 @@ static int wm8962_runtime_resume(struct device *dev)
4898 }
4899
4900 regcache_cache_only(wm8962->regmap, false);
4901 +
4902 + wm8962_reset(wm8962);
4903 +
4904 regcache_sync(wm8962->regmap);
4905
4906 regmap_update_bits(wm8962->regmap, WM8962_ANTI_POP,
4907 diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
4908 index f351b93..4c471a5 100644
4909 --- a/sound/soc/codecs/wm8994.c
4910 +++ b/sound/soc/codecs/wm8994.c
4911 @@ -2695,7 +2695,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
4912 return -EINVAL;
4913 }
4914
4915 - bclk_rate = params_rate(params) * 2;
4916 + bclk_rate = params_rate(params) * 4;
4917 switch (params_format(params)) {
4918 case SNDRV_PCM_FORMAT_S16_LE:
4919 bclk_rate *= 16;
4920 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
4921 index 1bb6d4a..c41efe0 100644
4922 --- a/sound/soc/soc-dapm.c
4923 +++ b/sound/soc/soc-dapm.c
4924 @@ -1442,7 +1442,15 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
4925 }
4926
4927 list_for_each_entry(w, &card->widgets, list) {
4928 - list_del_init(&w->dirty);
4929 + switch (w->id) {
4930 + case snd_soc_dapm_pre:
4931 + case snd_soc_dapm_post:
4932 + /* These widgets always need to be powered */
4933 + break;
4934 + default:
4935 + list_del_init(&w->dirty);
4936 + break;
4937 + }
4938
4939 if (w->power) {
4940 d = w->dapm;
4941 diff --git a/sound/usb/clock.c b/sound/usb/clock.c
4942 index 379baad..5e634a2 100644
4943 --- a/sound/usb/clock.c
4944 +++ b/sound/usb/clock.c
4945 @@ -111,7 +111,8 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, int source_id)
4946 return 0;
4947
4948 /* If a clock source can't tell us whether it's valid, we assume it is */
4949 - if (!uac2_control_is_readable(cs_desc->bmControls, UAC2_CS_CONTROL_CLOCK_VALID))
4950 + if (!uac2_control_is_readable(cs_desc->bmControls,
4951 + UAC2_CS_CONTROL_CLOCK_VALID - 1))
4952 return 1;
4953
4954 err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,