Magellan Linux

Annotation of /trunk/kernel26-alx/patches-3.10/0141-3.10.42-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2672 - (hide annotations) (download)
Tue Jul 21 16:46:35 2015 UTC (8 years, 9 months ago) by niro
File size: 131660 byte(s)
-3.10.84-alx-r1
1 niro 2672 diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
2     index d29dea0f3232..babe2ef16139 100644
3     --- a/Documentation/i2c/busses/i2c-i801
4     +++ b/Documentation/i2c/busses/i2c-i801
5     @@ -25,6 +25,8 @@ Supported adapters:
6     * Intel Avoton (SOC)
7     * Intel Wellsburg (PCH)
8     * Intel Coleto Creek (PCH)
9     + * Intel Wildcat Point-LP (PCH)
10     + * Intel BayTrail (SOC)
11     Datasheets: Publicly available at the Intel website
12    
13     On Intel Patsburg and later chipsets, both the normal host SMBus controller
14     diff --git a/Documentation/input/elantech.txt b/Documentation/input/elantech.txt
15     index 5602eb71ad5d..e1ae127ed099 100644
16     --- a/Documentation/input/elantech.txt
17     +++ b/Documentation/input/elantech.txt
18     @@ -504,9 +504,12 @@ byte 5:
19     * reg_10
20    
21     bit 7 6 5 4 3 2 1 0
22     - 0 0 0 0 0 0 0 A
23     + 0 0 0 0 R F T A
24    
25     A: 1 = enable absolute tracking
26     + T: 1 = enable two finger mode auto correct
27     + F: 1 = disable ABS Position Filter
28     + R: 1 = enable real hardware resolution
29    
30     6.2 Native absolute mode 6 byte packet format
31     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
32     diff --git a/Documentation/ja_JP/HOWTO b/Documentation/ja_JP/HOWTO
33     index 050d37fe6d40..46ed73593465 100644
34     --- a/Documentation/ja_JP/HOWTO
35     +++ b/Documentation/ja_JP/HOWTO
36     @@ -315,7 +315,7 @@ Andrew Morton が Linux-kernel メーリングリストにカーネルリリー
37     もし、2.6.x.y カーネルが存在しない場合には、番号が一番大きい 2.6.x が
38     最新の安定版カーネルです。
39    
40     -2.6.x.y は "stable" チーム <stable@kernel.org> でメンテされており、必
41     +2.6.x.y は "stable" チーム <stable@vger.kernel.org> でメンテされており、必
42     要に応じてリリースされます。通常のリリース期間は 2週間毎ですが、差し迫っ
43     た問題がなければもう少し長くなることもあります。セキュリティ関連の問題
44     の場合はこれに対してだいたいの場合、すぐにリリースがされます。
45     diff --git a/Documentation/ja_JP/stable_kernel_rules.txt b/Documentation/ja_JP/stable_kernel_rules.txt
46     index 14265837c4ce..9dbda9b5d21e 100644
47     --- a/Documentation/ja_JP/stable_kernel_rules.txt
48     +++ b/Documentation/ja_JP/stable_kernel_rules.txt
49     @@ -50,16 +50,16 @@ linux-2.6.29/Documentation/stable_kernel_rules.txt
50    
51     -stable ツリーにパッチを送付する手続き-
52    
53     - - 上記の規則に従っているかを確認した後に、stable@kernel.org にパッチ
54     + - 上記の規則に従っているかを確認した後に、stable@vger.kernel.org にパッチ
55     を送る。
56     - 送信者はパッチがキューに受け付けられた際には ACK を、却下された場合
57     には NAK を受け取る。この反応は開発者たちのスケジュールによって、数
58     日かかる場合がある。
59     - もし受け取られたら、パッチは他の開発者たちと関連するサブシステムの
60     メンテナーによるレビューのために -stable キューに追加される。
61     - - パッチに stable@kernel.org のアドレスが付加されているときには、それ
62     + - パッチに stable@vger.kernel.org のアドレスが付加されているときには、それ
63     が Linus のツリーに入る時に自動的に stable チームに email される。
64     - - セキュリティパッチはこのエイリアス (stable@kernel.org) に送られるべ
65     + - セキュリティパッチはこのエイリアス (stable@vger.kernel.org) に送られるべ
66     きではなく、代わりに security@kernel.org のアドレスに送られる。
67    
68     レビューサイクル-
69     diff --git a/Documentation/zh_CN/HOWTO b/Documentation/zh_CN/HOWTO
70     index 7fba5aab9ef9..7599eb38b764 100644
71     --- a/Documentation/zh_CN/HOWTO
72     +++ b/Documentation/zh_CN/HOWTO
73     @@ -237,7 +237,7 @@ kernel.org网站的pub/linux/kernel/v2.6/目录下找到它。它的开发遵循
74     如果没有2.6.x.y版本内核存在,那么最新的2.6.x版本内核就相当于是当前的稳定
75     版内核。
76    
77     -2.6.x.y版本由“稳定版”小组(邮件地址<stable@kernel.org>)维护,一般隔周发
78     +2.6.x.y版本由“稳定版”小组(邮件地址<stable@vger.kernel.org>)维护,一般隔周发
79     布新版本。
80    
81     内核源码中的Documentation/stable_kernel_rules.txt文件具体描述了可被稳定
82     diff --git a/Documentation/zh_CN/stable_kernel_rules.txt b/Documentation/zh_CN/stable_kernel_rules.txt
83     index b5b9b0ab02fd..26ea5ed7cd9c 100644
84     --- a/Documentation/zh_CN/stable_kernel_rules.txt
85     +++ b/Documentation/zh_CN/stable_kernel_rules.txt
86     @@ -42,7 +42,7 @@ Documentation/stable_kernel_rules.txt 的中文翻译
87    
88     向稳定版代码树提交补丁的过程:
89    
90     - - 在确认了补丁符合以上的规则后,将补丁发送到stable@kernel.org。
91     + - 在确认了补丁符合以上的规则后,将补丁发送到stable@vger.kernel.org。
92     - 如果补丁被接受到队列里,发送者会收到一个ACK回复,如果没有被接受,收
93     到的是NAK回复。回复需要几天的时间,这取决于开发者的时间安排。
94     - 被接受的补丁会被加到稳定版本队列里,等待其他开发者的审查。
95     diff --git a/Makefile b/Makefile
96     index 370cc01afb07..4634015fed68 100644
97     --- a/Makefile
98     +++ b/Makefile
99     @@ -1,6 +1,6 @@
100     VERSION = 3
101     PATCHLEVEL = 10
102     -SUBLEVEL = 41
103     +SUBLEVEL = 42
104     EXTRAVERSION =
105     NAME = TOSSUG Baby Fish
106    
107     diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
108     index eb83aa039b8b..e524316998f4 100644
109     --- a/arch/arm/boot/dts/imx53.dtsi
110     +++ b/arch/arm/boot/dts/imx53.dtsi
111     @@ -71,7 +71,7 @@
112     ipu: ipu@18000000 {
113     #crtc-cells = <1>;
114     compatible = "fsl,imx53-ipu";
115     - reg = <0x18000000 0x080000000>;
116     + reg = <0x18000000 0x08000000>;
117     interrupts = <11 10>;
118     clocks = <&clks 59>, <&clks 110>, <&clks 61>;
119     clock-names = "bus", "di0", "di1";
120     diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c
121     index 90c50d4b43f7..5d1286d51154 100644
122     --- a/arch/arm/kernel/crash_dump.c
123     +++ b/arch/arm/kernel/crash_dump.c
124     @@ -39,7 +39,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
125     if (!csize)
126     return 0;
127    
128     - vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
129     + vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
130     if (!vaddr)
131     return -ENOMEM;
132    
133     diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
134     index c90bfc6bf648..e355a4c10968 100644
135     --- a/arch/metag/include/asm/barrier.h
136     +++ b/arch/metag/include/asm/barrier.h
137     @@ -15,6 +15,7 @@ static inline void wr_fence(void)
138     volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
139     barrier();
140     *flushptr = 0;
141     + barrier();
142     }
143    
144     #else /* CONFIG_METAG_META21 */
145     @@ -35,6 +36,7 @@ static inline void wr_fence(void)
146     *flushptr = 0;
147     *flushptr = 0;
148     *flushptr = 0;
149     + barrier();
150     }
151    
152     #endif /* !CONFIG_METAG_META21 */
153     @@ -68,6 +70,7 @@ static inline void fence(void)
154     volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
155     barrier();
156     *flushptr = 0;
157     + barrier();
158     }
159     #define smp_mb() fence()
160     #define smp_rmb() fence()
161     diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
162     index 9b029a7911c3..579e3d93a5ca 100644
163     --- a/arch/metag/include/asm/processor.h
164     +++ b/arch/metag/include/asm/processor.h
165     @@ -22,6 +22,8 @@
166     /* Add an extra page of padding at the top of the stack for the guard page. */
167     #define STACK_TOP (TASK_SIZE - PAGE_SIZE)
168     #define STACK_TOP_MAX STACK_TOP
169     +/* Maximum virtual space for stack */
170     +#define STACK_SIZE_MAX (1 << 28) /* 256 MB */
171    
172     /* This decides where the kernel will search for a free chunk of vm
173     * space during mmap's.
174     diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
175     index a22f06a6f7ca..45c1a6caa206 100644
176     --- a/arch/mips/cavium-octeon/octeon-irq.c
177     +++ b/arch/mips/cavium-octeon/octeon-irq.c
178     @@ -635,7 +635,7 @@ static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
179     cpumask_clear(&new_affinity);
180     cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
181     }
182     - __irq_set_affinity_locked(data, &new_affinity);
183     + irq_set_affinity_locked(data, &new_affinity, false);
184     }
185    
186     static int octeon_irq_ciu_set_affinity(struct irq_data *data,
187     diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts
188     index fac1f5b178eb..143b8a37b5e4 100644
189     --- a/arch/mips/lantiq/dts/easy50712.dts
190     +++ b/arch/mips/lantiq/dts/easy50712.dts
191     @@ -8,6 +8,7 @@
192     };
193    
194     memory@0 {
195     + device_type = "memory";
196     reg = <0x0 0x2000000>;
197     };
198    
199     diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts
200     index 35eb874ab7f1..709f58132f5c 100644
201     --- a/arch/mips/ralink/dts/mt7620a_eval.dts
202     +++ b/arch/mips/ralink/dts/mt7620a_eval.dts
203     @@ -7,6 +7,7 @@
204     model = "Ralink MT7620A evaluation board";
205    
206     memory@0 {
207     + device_type = "memory";
208     reg = <0x0 0x2000000>;
209     };
210    
211     diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts
212     index 322d7002595b..0a685db093d4 100644
213     --- a/arch/mips/ralink/dts/rt2880_eval.dts
214     +++ b/arch/mips/ralink/dts/rt2880_eval.dts
215     @@ -7,6 +7,7 @@
216     model = "Ralink RT2880 evaluation board";
217    
218     memory@0 {
219     + device_type = "memory";
220     reg = <0x8000000 0x2000000>;
221     };
222    
223     diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
224     index 0ac73ea28198..ec9e9a035541 100644
225     --- a/arch/mips/ralink/dts/rt3052_eval.dts
226     +++ b/arch/mips/ralink/dts/rt3052_eval.dts
227     @@ -7,6 +7,7 @@
228     model = "Ralink RT3052 evaluation board";
229    
230     memory@0 {
231     + device_type = "memory";
232     reg = <0x0 0x2000000>;
233     };
234    
235     diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts
236     index 2fa6b330bf4f..e8df21a5d10d 100644
237     --- a/arch/mips/ralink/dts/rt3883_eval.dts
238     +++ b/arch/mips/ralink/dts/rt3883_eval.dts
239     @@ -7,6 +7,7 @@
240     model = "Ralink RT3883 evaluation board";
241    
242     memory@0 {
243     + device_type = "memory";
244     reg = <0x0 0x2000000>;
245     };
246    
247     diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
248     index cc2290a3cace..c6ee86542fec 100644
249     --- a/arch/parisc/include/asm/processor.h
250     +++ b/arch/parisc/include/asm/processor.h
251     @@ -53,6 +53,8 @@
252     #define STACK_TOP TASK_SIZE
253     #define STACK_TOP_MAX DEFAULT_TASK_SIZE
254    
255     +#define STACK_SIZE_MAX (1 << 30) /* 1 GB */
256     +
257     #endif
258    
259     #ifndef __ASSEMBLY__
260     diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
261     index 967fd23ace78..56a4a5d205af 100644
262     --- a/arch/powerpc/Makefile
263     +++ b/arch/powerpc/Makefile
264     @@ -97,7 +97,9 @@ CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7)
265    
266     CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
267    
268     -KBUILD_CPPFLAGS += -Iarch/$(ARCH)
269     +asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
270     +
271     +KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr)
272     KBUILD_AFLAGS += -Iarch/$(ARCH)
273     KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
274     CPP = $(CC) -E $(KBUILD_CFLAGS)
275     diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
276     index 2f1b6c5f8174..22cee04a47fc 100644
277     --- a/arch/powerpc/include/asm/ppc_asm.h
278     +++ b/arch/powerpc/include/asm/ppc_asm.h
279     @@ -390,11 +390,16 @@ n:
280     * ld rY,ADDROFF(name)(rX)
281     */
282     #ifdef __powerpc64__
283     +#ifdef HAVE_AS_ATHIGH
284     +#define __AS_ATHIGH high
285     +#else
286     +#define __AS_ATHIGH h
287     +#endif
288     #define LOAD_REG_IMMEDIATE(reg,expr) \
289     lis reg,(expr)@highest; \
290     ori reg,reg,(expr)@higher; \
291     rldicr reg,reg,32,31; \
292     - oris reg,reg,(expr)@h; \
293     + oris reg,reg,(expr)@__AS_ATHIGH; \
294     ori reg,reg,(expr)@l;
295    
296     #define LOAD_REG_ADDR(reg,name) \
297     diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
298     index 1e1c995ddacc..d55357ee9028 100644
299     --- a/arch/powerpc/kernel/process.c
300     +++ b/arch/powerpc/kernel/process.c
301     @@ -948,6 +948,16 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
302     flush_altivec_to_thread(src);
303     flush_vsx_to_thread(src);
304     flush_spe_to_thread(src);
305     + /*
306     + * Flush TM state out so we can copy it. __switch_to_tm() does this
307     + * flush but it removes the checkpointed state from the current CPU and
308     + * transitions the CPU out of TM mode. Hence we need to call
309     + * tm_recheckpoint_new_task() (on the same task) to restore the
310     + * checkpointed state back and the TM mode.
311     + */
312     + __switch_to_tm(src);
313     + tm_recheckpoint_new_task(src);
314     +
315     *dst = *src;
316     return 0;
317     }
318     diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
319     index 2a245b55bb71..fd104db9cea1 100644
320     --- a/arch/s390/crypto/aes_s390.c
321     +++ b/arch/s390/crypto/aes_s390.c
322     @@ -818,6 +818,9 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
323     else
324     memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
325     spin_unlock(&ctrblk_lock);
326     + } else {
327     + if (!nbytes)
328     + memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
329     }
330     /*
331     * final block may be < AES_BLOCK_SIZE, copy only nbytes
332     diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
333     index 2d96e68febb2..f2d6cccddcf8 100644
334     --- a/arch/s390/crypto/des_s390.c
335     +++ b/arch/s390/crypto/des_s390.c
336     @@ -429,6 +429,9 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
337     else
338     memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
339     spin_unlock(&ctrblk_lock);
340     + } else {
341     + if (!nbytes)
342     + memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
343     }
344     /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
345     if (nbytes) {
346     diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
347     index a8091216963b..68c05398bba9 100644
348     --- a/arch/x86/include/asm/hugetlb.h
349     +++ b/arch/x86/include/asm/hugetlb.h
350     @@ -52,6 +52,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
351     static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
352     unsigned long addr, pte_t *ptep)
353     {
354     + ptep_clear_flush(vma, addr, ptep);
355     }
356    
357     static inline int huge_pte_none(pte_t pte)
358     diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
359     index af1d14a9ebda..dcbbaa165bde 100644
360     --- a/arch/x86/kernel/ldt.c
361     +++ b/arch/x86/kernel/ldt.c
362     @@ -20,6 +20,8 @@
363     #include <asm/mmu_context.h>
364     #include <asm/syscalls.h>
365    
366     +int sysctl_ldt16 = 0;
367     +
368     #ifdef CONFIG_SMP
369     static void flush_ldt(void *current_mm)
370     {
371     @@ -234,7 +236,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
372     * IRET leaking the high bits of the kernel stack address.
373     */
374     #ifdef CONFIG_X86_64
375     - if (!ldt_info.seg_32bit) {
376     + if (!ldt_info.seg_32bit && !sysctl_ldt16) {
377     error = -EINVAL;
378     goto out_unlock;
379     }
380     diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
381     index 0faad646f5fd..0f134c7cfc24 100644
382     --- a/arch/x86/vdso/vdso32-setup.c
383     +++ b/arch/x86/vdso/vdso32-setup.c
384     @@ -41,6 +41,7 @@ enum {
385     #ifdef CONFIG_X86_64
386     #define vdso_enabled sysctl_vsyscall32
387     #define arch_setup_additional_pages syscall32_setup_pages
388     +extern int sysctl_ldt16;
389     #endif
390    
391     /*
392     @@ -380,6 +381,13 @@ static ctl_table abi_table2[] = {
393     .mode = 0644,
394     .proc_handler = proc_dointvec
395     },
396     + {
397     + .procname = "ldt16",
398     + .data = &sysctl_ldt16,
399     + .maxlen = sizeof(int),
400     + .mode = 0644,
401     + .proc_handler = proc_dointvec
402     + },
403     {}
404     };
405    
406     diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c
407     index adad92a44ba2..2f1b8d12952a 100644
408     --- a/crypto/crypto_wq.c
409     +++ b/crypto/crypto_wq.c
410     @@ -33,7 +33,7 @@ static void __exit crypto_wq_exit(void)
411     destroy_workqueue(kcrypto_wq);
412     }
413    
414     -module_init(crypto_wq_init);
415     +subsys_initcall(crypto_wq_init);
416     module_exit(crypto_wq_exit);
417    
418     MODULE_LICENSE("GPL");
419     diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
420     index cb9629638def..76da257cfc28 100644
421     --- a/drivers/acpi/blacklist.c
422     +++ b/drivers/acpi/blacklist.c
423     @@ -327,6 +327,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
424     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
425     },
426     },
427     + /*
428     + * Without this this EEEpc exports a non working WMI interface, with
429     + * this it exports a working "good old" eeepc_laptop interface, fixing
430     + * both brightness control, and rfkill not working.
431     + */
432     + {
433     + .callback = dmi_enable_osi_linux,
434     + .ident = "Asus EEE PC 1015PX",
435     + .matches = {
436     + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
437     + DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
438     + },
439     + },
440     {}
441     };
442    
443     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
444     index 9cf616b5210b..bf00fbcde8ad 100644
445     --- a/drivers/ata/libata-core.c
446     +++ b/drivers/ata/libata-core.c
447     @@ -6300,6 +6300,8 @@ int ata_host_activate(struct ata_host *host, int irq,
448     static void ata_port_detach(struct ata_port *ap)
449     {
450     unsigned long flags;
451     + struct ata_link *link;
452     + struct ata_device *dev;
453    
454     if (!ap->ops->error_handler)
455     goto skip_eh;
456     @@ -6319,6 +6321,13 @@ static void ata_port_detach(struct ata_port *ap)
457     cancel_delayed_work_sync(&ap->hotplug_task);
458    
459     skip_eh:
460     + /* clean up zpodd on port removal */
461     + ata_for_each_link(link, ap, HOST_FIRST) {
462     + ata_for_each_dev(dev, link, ALL) {
463     + if (zpodd_dev_enabled(dev))
464     + zpodd_exit(dev);
465     + }
466     + }
467     if (ap->pmp_link) {
468     int i;
469     for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
470     diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
471     index 033f3f4c20ad..fa288597f01b 100644
472     --- a/drivers/ata/pata_at91.c
473     +++ b/drivers/ata/pata_at91.c
474     @@ -408,12 +408,13 @@ static int pata_at91_probe(struct platform_device *pdev)
475    
476     host->private_data = info;
477    
478     - return ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
479     - gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
480     - irq_flags, &pata_at91_sht);
481     + ret = ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
482     + gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
483     + irq_flags, &pata_at91_sht);
484     + if (ret)
485     + goto err_put;
486    
487     - if (!ret)
488     - return 0;
489     + return 0;
490    
491     err_put:
492     clk_put(info->mck);
493     diff --git a/drivers/base/dd.c b/drivers/base/dd.c
494     index 06051767393f..8a8d611f2021 100644
495     --- a/drivers/base/dd.c
496     +++ b/drivers/base/dd.c
497     @@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
498     static LIST_HEAD(deferred_probe_pending_list);
499     static LIST_HEAD(deferred_probe_active_list);
500     static struct workqueue_struct *deferred_wq;
501     +static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
502    
503     /**
504     * deferred_probe_work_func() - Retry probing devices in the active list.
505     @@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;
506     * This functions moves all devices from the pending list to the active
507     * list and schedules the deferred probe workqueue to process them. It
508     * should be called anytime a driver is successfully bound to a device.
509     + *
510     + * Note, there is a race condition in multi-threaded probe. In the case where
511     + * more than one device is probing at the same time, it is possible for one
512     + * probe to complete successfully while another is about to defer. If the second
513     + * depends on the first, then it will get put on the pending list after the
514     + * trigger event has already occured and will be stuck there.
515     + *
516     + * The atomic 'deferred_trigger_count' is used to determine if a successful
517     + * trigger has occurred in the midst of probing a driver. If the trigger count
518     + * changes in the midst of a probe, then deferred processing should be triggered
519     + * again.
520     */
521     static void driver_deferred_probe_trigger(void)
522     {
523     @@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)
524     * into the active list so they can be retried by the workqueue
525     */
526     mutex_lock(&deferred_probe_mutex);
527     + atomic_inc(&deferred_trigger_count);
528     list_splice_tail_init(&deferred_probe_pending_list,
529     &deferred_probe_active_list);
530     mutex_unlock(&deferred_probe_mutex);
531     @@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
532     static int really_probe(struct device *dev, struct device_driver *drv)
533     {
534     int ret = 0;
535     + int local_trigger_count = atomic_read(&deferred_trigger_count);
536    
537     atomic_inc(&probe_count);
538     pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
539     @@ -310,6 +324,9 @@ probe_failed:
540     /* Driver requested deferred probing */
541     dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
542     driver_deferred_probe_add(dev);
543     + /* Did a trigger occur while probing? Need to re-trigger if yes */
544     + if (local_trigger_count != atomic_read(&deferred_trigger_count))
545     + driver_deferred_probe_trigger();
546     } else if (ret != -ENODEV && ret != -ENXIO) {
547     /* driver matched but the probe failed */
548     printk(KERN_WARNING
549     diff --git a/drivers/base/topology.c b/drivers/base/topology.c
550     index ae989c57cd5e..bcd19886fa1a 100644
551     --- a/drivers/base/topology.c
552     +++ b/drivers/base/topology.c
553     @@ -40,8 +40,7 @@
554     static ssize_t show_##name(struct device *dev, \
555     struct device_attribute *attr, char *buf) \
556     { \
557     - unsigned int cpu = dev->id; \
558     - return sprintf(buf, "%d\n", topology_##name(cpu)); \
559     + return sprintf(buf, "%d\n", topology_##name(dev->id)); \
560     }
561    
562     #if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \
563     diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
564     index 1735b0d17e29..ddd9a098bc67 100644
565     --- a/drivers/block/xen-blkfront.c
566     +++ b/drivers/block/xen-blkfront.c
567     @@ -104,7 +104,7 @@ struct blkfront_info
568     struct work_struct work;
569     struct gnttab_free_callback callback;
570     struct blk_shadow shadow[BLK_RING_SIZE];
571     - struct list_head persistent_gnts;
572     + struct list_head grants;
573     unsigned int persistent_gnts_c;
574     unsigned long shadow_free;
575     unsigned int feature_flush;
576     @@ -175,15 +175,17 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
577     if (!gnt_list_entry)
578     goto out_of_memory;
579    
580     - granted_page = alloc_page(GFP_NOIO);
581     - if (!granted_page) {
582     - kfree(gnt_list_entry);
583     - goto out_of_memory;
584     + if (info->feature_persistent) {
585     + granted_page = alloc_page(GFP_NOIO);
586     + if (!granted_page) {
587     + kfree(gnt_list_entry);
588     + goto out_of_memory;
589     + }
590     + gnt_list_entry->pfn = page_to_pfn(granted_page);
591     }
592    
593     - gnt_list_entry->pfn = page_to_pfn(granted_page);
594     gnt_list_entry->gref = GRANT_INVALID_REF;
595     - list_add(&gnt_list_entry->node, &info->persistent_gnts);
596     + list_add(&gnt_list_entry->node, &info->grants);
597     i++;
598     }
599    
600     @@ -191,9 +193,10 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
601    
602     out_of_memory:
603     list_for_each_entry_safe(gnt_list_entry, n,
604     - &info->persistent_gnts, node) {
605     + &info->grants, node) {
606     list_del(&gnt_list_entry->node);
607     - __free_page(pfn_to_page(gnt_list_entry->pfn));
608     + if (info->feature_persistent)
609     + __free_page(pfn_to_page(gnt_list_entry->pfn));
610     kfree(gnt_list_entry);
611     i--;
612     }
613     @@ -202,14 +205,14 @@ out_of_memory:
614     }
615    
616     static struct grant *get_grant(grant_ref_t *gref_head,
617     + unsigned long pfn,
618     struct blkfront_info *info)
619     {
620     struct grant *gnt_list_entry;
621     unsigned long buffer_mfn;
622    
623     - BUG_ON(list_empty(&info->persistent_gnts));
624     - gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant,
625     - node);
626     + BUG_ON(list_empty(&info->grants));
627     + gnt_list_entry = list_first_entry(&info->grants, struct grant, node);
628     list_del(&gnt_list_entry->node);
629    
630     if (gnt_list_entry->gref != GRANT_INVALID_REF) {
631     @@ -220,6 +223,10 @@ static struct grant *get_grant(grant_ref_t *gref_head,
632     /* Assign a gref to this page */
633     gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
634     BUG_ON(gnt_list_entry->gref == -ENOSPC);
635     + if (!info->feature_persistent) {
636     + BUG_ON(!pfn);
637     + gnt_list_entry->pfn = pfn;
638     + }
639     buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
640     gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
641     info->xbdev->otherend_id,
642     @@ -430,12 +437,12 @@ static int blkif_queue_request(struct request *req)
643     fsect = sg->offset >> 9;
644     lsect = fsect + (sg->length >> 9) - 1;
645    
646     - gnt_list_entry = get_grant(&gref_head, info);
647     + gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
648     ref = gnt_list_entry->gref;
649    
650     info->shadow[id].grants_used[i] = gnt_list_entry;
651    
652     - if (rq_data_dir(req)) {
653     + if (rq_data_dir(req) && info->feature_persistent) {
654     char *bvec_data;
655     void *shared_data;
656    
657     @@ -828,16 +835,17 @@ static void blkif_free(struct blkfront_info *info, int suspend)
658     blk_stop_queue(info->rq);
659    
660     /* Remove all persistent grants */
661     - if (!list_empty(&info->persistent_gnts)) {
662     + if (!list_empty(&info->grants)) {
663     list_for_each_entry_safe(persistent_gnt, n,
664     - &info->persistent_gnts, node) {
665     + &info->grants, node) {
666     list_del(&persistent_gnt->node);
667     if (persistent_gnt->gref != GRANT_INVALID_REF) {
668     gnttab_end_foreign_access(persistent_gnt->gref,
669     0, 0UL);
670     info->persistent_gnts_c--;
671     }
672     - __free_page(pfn_to_page(persistent_gnt->pfn));
673     + if (info->feature_persistent)
674     + __free_page(pfn_to_page(persistent_gnt->pfn));
675     kfree(persistent_gnt);
676     }
677     }
678     @@ -874,7 +882,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
679    
680     nseg = s->req.u.rw.nr_segments;
681    
682     - if (bret->operation == BLKIF_OP_READ) {
683     + if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
684     /*
685     * Copy the data received from the backend into the bvec.
686     * Since bv_offset can be different than 0, and bv_len different
687     @@ -894,9 +902,30 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
688     }
689     }
690     /* Add the persistent grant into the list of free grants */
691     - for (i = 0; i < s->req.u.rw.nr_segments; i++) {
692     - list_add(&s->grants_used[i]->node, &info->persistent_gnts);
693     - info->persistent_gnts_c++;
694     + for (i = 0; i < nseg; i++) {
695     + if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
696     + /*
697     + * If the grant is still mapped by the backend (the
698     + * backend has chosen to make this grant persistent)
699     + * we add it at the head of the list, so it will be
700     + * reused first.
701     + */
702     + if (!info->feature_persistent)
703     + pr_alert_ratelimited("backed has not unmapped grant: %u\n",
704     + s->grants_used[i]->gref);
705     + list_add(&s->grants_used[i]->node, &info->grants);
706     + info->persistent_gnts_c++;
707     + } else {
708     + /*
709     + * If the grant is not mapped by the backend we end the
710     + * foreign access and add it to the tail of the list,
711     + * so it will not be picked again unless we run out of
712     + * persistent grants.
713     + */
714     + gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
715     + s->grants_used[i]->gref = GRANT_INVALID_REF;
716     + list_add_tail(&s->grants_used[i]->node, &info->grants);
717     + }
718     }
719     }
720    
721     @@ -1034,12 +1063,6 @@ static int setup_blkring(struct xenbus_device *dev,
722     for (i = 0; i < BLK_RING_SIZE; i++)
723     sg_init_table(info->shadow[i].sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
724    
725     - /* Allocate memory for grants */
726     - err = fill_grant_buffer(info, BLK_RING_SIZE *
727     - BLKIF_MAX_SEGMENTS_PER_REQUEST);
728     - if (err)
729     - goto fail;
730     -
731     err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
732     if (err < 0) {
733     free_page((unsigned long)sring);
734     @@ -1198,7 +1221,7 @@ static int blkfront_probe(struct xenbus_device *dev,
735     spin_lock_init(&info->io_lock);
736     info->xbdev = dev;
737     info->vdevice = vdevice;
738     - INIT_LIST_HEAD(&info->persistent_gnts);
739     + INIT_LIST_HEAD(&info->grants);
740     info->persistent_gnts_c = 0;
741     info->connected = BLKIF_STATE_DISCONNECTED;
742     INIT_WORK(&info->work, blkif_restart_queue);
743     @@ -1227,7 +1250,8 @@ static int blkif_recover(struct blkfront_info *info)
744     int i;
745     struct blkif_request *req;
746     struct blk_shadow *copy;
747     - int j;
748     + unsigned int persistent;
749     + int j, rc;
750    
751     /* Stage 1: Make a safe copy of the shadow state. */
752     copy = kmemdup(info->shadow, sizeof(info->shadow),
753     @@ -1242,6 +1266,24 @@ static int blkif_recover(struct blkfront_info *info)
754     info->shadow_free = info->ring.req_prod_pvt;
755     info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
756    
757     + /* Check if the backend supports persistent grants */
758     + rc = xenbus_gather(XBT_NIL, info->xbdev->otherend,
759     + "feature-persistent", "%u", &persistent,
760     + NULL);
761     + if (rc)
762     + info->feature_persistent = 0;
763     + else
764     + info->feature_persistent = persistent;
765     +
766     + /* Allocate memory for grants */
767     + rc = fill_grant_buffer(info, BLK_RING_SIZE *
768     + BLKIF_MAX_SEGMENTS_PER_REQUEST);
769     + if (rc) {
770     + xenbus_dev_fatal(info->xbdev, rc, "setting grant buffer failed");
771     + kfree(copy);
772     + return rc;
773     + }
774     +
775     /* Stage 3: Find pending requests and requeue them. */
776     for (i = 0; i < BLK_RING_SIZE; i++) {
777     /* Not in use? */
778     @@ -1306,8 +1348,12 @@ static int blkfront_resume(struct xenbus_device *dev)
779     blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
780    
781     err = talk_to_blkback(dev, info);
782     - if (info->connected == BLKIF_STATE_SUSPENDED && !err)
783     - err = blkif_recover(info);
784     +
785     + /*
786     + * We have to wait for the backend to switch to
787     + * connected state, since we want to read which
788     + * features it supports.
789     + */
790    
791     return err;
792     }
793     @@ -1411,9 +1457,16 @@ static void blkfront_connect(struct blkfront_info *info)
794     sectors);
795     set_capacity(info->gd, sectors);
796     revalidate_disk(info->gd);
797     + return;
798    
799     - /* fall through */
800     case BLKIF_STATE_SUSPENDED:
801     + /*
802     + * If we are recovering from suspension, we need to wait
803     + * for the backend to announce it's features before
804     + * reconnecting, we need to know if the backend supports
805     + * persistent grants.
806     + */
807     + blkif_recover(info);
808     return;
809    
810     default:
811     @@ -1481,6 +1534,14 @@ static void blkfront_connect(struct blkfront_info *info)
812     else
813     info->feature_persistent = persistent;
814    
815     + /* Allocate memory for grants */
816     + err = fill_grant_buffer(info, BLK_RING_SIZE *
817     + BLKIF_MAX_SEGMENTS_PER_REQUEST);
818     + if (err) {
819     + xenbus_dev_fatal(info->xbdev, err, "setting grant buffer failed");
820     + return;
821     + }
822     +
823     err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
824     if (err) {
825     xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
826     diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
827     index 0a327f4154a2..2acabdaecec8 100644
828     --- a/drivers/bluetooth/ath3k.c
829     +++ b/drivers/bluetooth/ath3k.c
830     @@ -82,6 +82,7 @@ static struct usb_device_id ath3k_table[] = {
831     { USB_DEVICE(0x04CA, 0x3004) },
832     { USB_DEVICE(0x04CA, 0x3005) },
833     { USB_DEVICE(0x04CA, 0x3006) },
834     + { USB_DEVICE(0x04CA, 0x3007) },
835     { USB_DEVICE(0x04CA, 0x3008) },
836     { USB_DEVICE(0x13d3, 0x3362) },
837     { USB_DEVICE(0x0CF3, 0xE004) },
838     @@ -124,6 +125,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
839     { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
840     { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
841     { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
842     + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
843     { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
844     { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
845     { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
846     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
847     index 58491f1b2799..45aa8e760124 100644
848     --- a/drivers/bluetooth/btusb.c
849     +++ b/drivers/bluetooth/btusb.c
850     @@ -146,6 +146,7 @@ static struct usb_device_id blacklist_table[] = {
851     { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
852     { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
853     { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
854     + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
855     { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
856     { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
857     { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
858     diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
859     index 8740f46b4d0d..5dcc8305abd1 100644
860     --- a/drivers/bus/mvebu-mbus.c
861     +++ b/drivers/bus/mvebu-mbus.c
862     @@ -250,12 +250,6 @@ static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
863     */
864     if ((u64)base < wend && end > wbase)
865     return 0;
866     -
867     - /*
868     - * Check if target/attribute conflicts
869     - */
870     - if (target == wtarget && attr == wattr)
871     - return 0;
872     }
873    
874     return 1;
875     diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
876     index e53fc24c6af3..e1ddcf938519 100644
877     --- a/drivers/char/ipmi/ipmi_kcs_sm.c
878     +++ b/drivers/char/ipmi/ipmi_kcs_sm.c
879     @@ -251,8 +251,9 @@ static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
880     if (!GET_STATUS_OBF(status)) {
881     kcs->obf_timeout -= time;
882     if (kcs->obf_timeout < 0) {
883     - start_error_recovery(kcs, "OBF not ready in time");
884     - return 1;
885     + kcs->obf_timeout = OBF_RETRY_TIMEOUT;
886     + start_error_recovery(kcs, "OBF not ready in time");
887     + return 1;
888     }
889     return 0;
890     }
891     diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
892     index af4b23ffc5a6..40b3f756f904 100644
893     --- a/drivers/char/ipmi/ipmi_si_intf.c
894     +++ b/drivers/char/ipmi/ipmi_si_intf.c
895     @@ -244,6 +244,9 @@ struct smi_info {
896     /* The timer for this si. */
897     struct timer_list si_timer;
898    
899     + /* This flag is set, if the timer is running (timer_pending() isn't enough) */
900     + bool timer_running;
901     +
902     /* The time (in jiffies) the last timeout occurred at. */
903     unsigned long last_timeout_jiffies;
904    
905     @@ -427,6 +430,13 @@ static void start_clear_flags(struct smi_info *smi_info)
906     smi_info->si_state = SI_CLEARING_FLAGS;
907     }
908    
909     +static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
910     +{
911     + smi_info->last_timeout_jiffies = jiffies;
912     + mod_timer(&smi_info->si_timer, new_val);
913     + smi_info->timer_running = true;
914     +}
915     +
916     /*
917     * When we have a situtaion where we run out of memory and cannot
918     * allocate messages, we just leave them in the BMC and run the system
919     @@ -439,8 +449,7 @@ static inline void disable_si_irq(struct smi_info *smi_info)
920     start_disable_irq(smi_info);
921     smi_info->interrupt_disabled = 1;
922     if (!atomic_read(&smi_info->stop_operation))
923     - mod_timer(&smi_info->si_timer,
924     - jiffies + SI_TIMEOUT_JIFFIES);
925     + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
926     }
927     }
928    
929     @@ -900,15 +909,7 @@ static void sender(void *send_info,
930     list_add_tail(&msg->link, &smi_info->xmit_msgs);
931    
932     if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
933     - /*
934     - * last_timeout_jiffies is updated here to avoid
935     - * smi_timeout() handler passing very large time_diff
936     - * value to smi_event_handler() that causes
937     - * the send command to abort.
938     - */
939     - smi_info->last_timeout_jiffies = jiffies;
940     -
941     - mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
942     + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
943    
944     if (smi_info->thread)
945     wake_up_process(smi_info->thread);
946     @@ -997,6 +998,17 @@ static int ipmi_thread(void *data)
947    
948     spin_lock_irqsave(&(smi_info->si_lock), flags);
949     smi_result = smi_event_handler(smi_info, 0);
950     +
951     + /*
952     + * If the driver is doing something, there is a possible
953     + * race with the timer. If the timer handler see idle,
954     + * and the thread here sees something else, the timer
955     + * handler won't restart the timer even though it is
956     + * required. So start it here if necessary.
957     + */
958     + if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
959     + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
960     +
961     spin_unlock_irqrestore(&(smi_info->si_lock), flags);
962     busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
963     &busy_until);
964     @@ -1066,10 +1078,6 @@ static void smi_timeout(unsigned long data)
965     * SI_USEC_PER_JIFFY);
966     smi_result = smi_event_handler(smi_info, time_diff);
967    
968     - spin_unlock_irqrestore(&(smi_info->si_lock), flags);
969     -
970     - smi_info->last_timeout_jiffies = jiffies_now;
971     -
972     if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
973     /* Running with interrupts, only do long timeouts. */
974     timeout = jiffies + SI_TIMEOUT_JIFFIES;
975     @@ -1091,7 +1099,10 @@ static void smi_timeout(unsigned long data)
976    
977     do_mod_timer:
978     if (smi_result != SI_SM_IDLE)
979     - mod_timer(&(smi_info->si_timer), timeout);
980     + smi_mod_timer(smi_info, timeout);
981     + else
982     + smi_info->timer_running = false;
983     + spin_unlock_irqrestore(&(smi_info->si_lock), flags);
984     }
985    
986     static irqreturn_t si_irq_handler(int irq, void *data)
987     @@ -1139,8 +1150,7 @@ static int smi_start_processing(void *send_info,
988    
989     /* Set up the timer that drives the interface. */
990     setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
991     - new_smi->last_timeout_jiffies = jiffies;
992     - mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
993     + smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
994    
995     /*
996     * Check if the user forcefully enabled the daemon.
997     diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
998     index 256c8be74df8..8b8798bb93f3 100644
999     --- a/drivers/clk/versatile/clk-vexpress-osc.c
1000     +++ b/drivers/clk/versatile/clk-vexpress-osc.c
1001     @@ -102,7 +102,7 @@ void __init vexpress_osc_of_setup(struct device_node *node)
1002    
1003     osc = kzalloc(sizeof(*osc), GFP_KERNEL);
1004     if (!osc)
1005     - goto error;
1006     + return;
1007    
1008     osc->func = vexpress_config_func_get_by_node(node);
1009     if (!osc->func) {
1010     diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
1011     index 662fcc065821..b7960185919d 100644
1012     --- a/drivers/clocksource/exynos_mct.c
1013     +++ b/drivers/clocksource/exynos_mct.c
1014     @@ -429,8 +429,6 @@ static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
1015     evt->set_mode = exynos4_tick_set_mode;
1016     evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
1017     evt->rating = 450;
1018     - clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
1019     - 0xf, 0x7fffffff);
1020    
1021     exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
1022    
1023     @@ -448,6 +446,8 @@ static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
1024     } else {
1025     enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
1026     }
1027     + clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
1028     + 0xf, 0x7fffffff);
1029    
1030     return 0;
1031     }
1032     diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
1033     index 9f25f5296029..0eabd81e1a90 100644
1034     --- a/drivers/crypto/caam/error.c
1035     +++ b/drivers/crypto/caam/error.c
1036     @@ -16,9 +16,13 @@
1037     char *tmp; \
1038     \
1039     tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
1040     - sprintf(tmp, format, param); \
1041     - strcat(str, tmp); \
1042     - kfree(tmp); \
1043     + if (likely(tmp)) { \
1044     + sprintf(tmp, format, param); \
1045     + strcat(str, tmp); \
1046     + kfree(tmp); \
1047     + } else { \
1048     + strcat(str, "kmalloc failure in SPRINTFCAT"); \
1049     + } \
1050     }
1051    
1052     static void report_jump_idx(u32 status, char *outstr)
1053     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1054     index 54ae96f7bec6..8814b0dbfc4f 100644
1055     --- a/drivers/gpu/drm/i915/intel_display.c
1056     +++ b/drivers/gpu/drm/i915/intel_display.c
1057     @@ -9123,15 +9123,6 @@ void intel_modeset_init(struct drm_device *dev)
1058     intel_disable_fbc(dev);
1059     }
1060    
1061     -static void
1062     -intel_connector_break_all_links(struct intel_connector *connector)
1063     -{
1064     - connector->base.dpms = DRM_MODE_DPMS_OFF;
1065     - connector->base.encoder = NULL;
1066     - connector->encoder->connectors_active = false;
1067     - connector->encoder->base.crtc = NULL;
1068     -}
1069     -
1070     static void intel_enable_pipe_a(struct drm_device *dev)
1071     {
1072     struct intel_connector *connector;
1073     @@ -9213,8 +9204,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
1074     if (connector->encoder->base.crtc != &crtc->base)
1075     continue;
1076    
1077     - intel_connector_break_all_links(connector);
1078     + connector->base.dpms = DRM_MODE_DPMS_OFF;
1079     + connector->base.encoder = NULL;
1080     }
1081     + /* multiple connectors may have the same encoder:
1082     + * handle them and break crtc link separately */
1083     + list_for_each_entry(connector, &dev->mode_config.connector_list,
1084     + base.head)
1085     + if (connector->encoder->base.crtc == &crtc->base) {
1086     + connector->encoder->base.crtc = NULL;
1087     + connector->encoder->connectors_active = false;
1088     + }
1089    
1090     WARN_ON(crtc->active);
1091     crtc->base.enabled = false;
1092     @@ -9285,6 +9285,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
1093     drm_get_encoder_name(&encoder->base));
1094     encoder->disable(encoder);
1095     }
1096     + encoder->base.crtc = NULL;
1097     + encoder->connectors_active = false;
1098    
1099     /* Inconsistent output/port/pipe state happens presumably due to
1100     * a bug in one of the get_hw_state functions. Or someplace else
1101     @@ -9295,8 +9297,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
1102     base.head) {
1103     if (connector->encoder != encoder)
1104     continue;
1105     -
1106     - intel_connector_break_all_links(connector);
1107     + connector->base.dpms = DRM_MODE_DPMS_OFF;
1108     + connector->base.encoder = NULL;
1109     }
1110     }
1111     /* Enabled encoders without active connectors will be fixed in
1112     diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
1113     index c728380d3d62..ea19acd20784 100644
1114     --- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
1115     +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
1116     @@ -54,8 +54,10 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
1117    
1118     /* check that we're not already at the target duty cycle */
1119     duty = fan->get(therm);
1120     - if (duty == target)
1121     - goto done;
1122     + if (duty == target) {
1123     + spin_unlock_irqrestore(&fan->lock, flags);
1124     + return 0;
1125     + }
1126    
1127     /* smooth out the fanspeed increase/decrease */
1128     if (!immediate && duty >= 0) {
1129     @@ -73,8 +75,15 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
1130    
1131     nv_debug(therm, "FAN update: %d\n", duty);
1132     ret = fan->set(therm, duty);
1133     - if (ret)
1134     - goto done;
1135     + if (ret) {
1136     + spin_unlock_irqrestore(&fan->lock, flags);
1137     + return ret;
1138     + }
1139     +
1140     + /* fan speed updated, drop the fan lock before grabbing the
1141     + * alarm-scheduling lock and risking a deadlock
1142     + */
1143     + spin_unlock_irqrestore(&fan->lock, flags);
1144    
1145     /* schedule next fan update, if not at target speed already */
1146     if (list_empty(&fan->alarm.head) && target != duty) {
1147     @@ -92,8 +101,6 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
1148     ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
1149     }
1150    
1151     -done:
1152     - spin_unlock_irqrestore(&fan->lock, flags);
1153     return ret;
1154     }
1155    
1156     diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
1157     index d97f20069d3e..5cec3a0c6c85 100644
1158     --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
1159     +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
1160     @@ -372,9 +372,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
1161     acpi_status status;
1162     acpi_handle dhandle, rom_handle;
1163    
1164     - if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
1165     - return false;
1166     -
1167     dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
1168     if (!dhandle)
1169     return false;
1170     diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1171     index cbb06d7c89b5..8c44ef57864b 100644
1172     --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1173     +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1174     @@ -523,6 +523,13 @@ static bool radeon_atpx_detect(void)
1175     has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
1176     }
1177    
1178     + /* some newer PX laptops mark the dGPU as a non-VGA display device */
1179     + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
1180     + vga_count++;
1181     +
1182     + has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
1183     + }
1184     +
1185     if (has_atpx && vga_count == 2) {
1186     acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
1187     printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
1188     diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
1189     index 21d2d5280fc1..5715429279fb 100644
1190     --- a/drivers/gpu/drm/radeon/radeon_uvd.c
1191     +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
1192     @@ -449,6 +449,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
1193     cmd = radeon_get_ib_value(p, p->idx) >> 1;
1194    
1195     if (cmd < 0x4) {
1196     + if (end <= start) {
1197     + DRM_ERROR("invalid reloc offset %X!\n", offset);
1198     + return -EINVAL;
1199     + }
1200     if ((end - start) < buf_sizes[cmd]) {
1201     DRM_ERROR("buffer to small (%d / %d)!\n",
1202     (unsigned)(end - start), buf_sizes[cmd]);
1203     diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
1204     index b592eef1efcb..b083509325e4 100644
1205     --- a/drivers/gpu/host1x/hw/intr_hw.c
1206     +++ b/drivers/gpu/host1x/hw/intr_hw.c
1207     @@ -48,7 +48,7 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
1208     unsigned long reg;
1209     int i, id;
1210    
1211     - for (i = 0; i <= BIT_WORD(host->info->nb_pts); i++) {
1212     + for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
1213     reg = host1x_sync_readl(host,
1214     HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
1215     for_each_set_bit(id, &reg, BITS_PER_LONG) {
1216     @@ -65,7 +65,7 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
1217     {
1218     u32 i;
1219    
1220     - for (i = 0; i <= BIT_WORD(host->info->nb_pts); ++i) {
1221     + for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
1222     host1x_sync_writel(host, 0xffffffffu,
1223     HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
1224     host1x_sync_writel(host, 0xffffffffu,
1225     diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
1226     index d4fac934b220..fd02cb79a99c 100644
1227     --- a/drivers/hv/connection.c
1228     +++ b/drivers/hv/connection.c
1229     @@ -55,6 +55,9 @@ static __u32 vmbus_get_next_version(__u32 current_version)
1230     case (VERSION_WIN8):
1231     return VERSION_WIN7;
1232    
1233     + case (VERSION_WIN8_1):
1234     + return VERSION_WIN8;
1235     +
1236     case (VERSION_WS2008):
1237     default:
1238     return VERSION_INVAL;
1239     @@ -80,6 +83,9 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
1240     (void *)((unsigned long)vmbus_connection.monitor_pages +
1241     PAGE_SIZE));
1242    
1243     + if (version == VERSION_WIN8_1)
1244     + msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
1245     +
1246     /*
1247     * Add to list before we send the request since we may
1248     * receive the response before returning from this routine
1249     diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
1250     index 142e1cb8dea7..361f50b221bd 100644
1251     --- a/drivers/hwmon/emc1403.c
1252     +++ b/drivers/hwmon/emc1403.c
1253     @@ -162,7 +162,7 @@ static ssize_t store_hyst(struct device *dev,
1254     if (retval < 0)
1255     goto fail;
1256    
1257     - hyst = val - retval * 1000;
1258     + hyst = retval * 1000 - val;
1259     hyst = DIV_ROUND_CLOSEST(hyst, 1000);
1260     if (hyst < 0 || hyst > 255) {
1261     retval = -ERANGE;
1262     @@ -295,7 +295,7 @@ static int emc1403_detect(struct i2c_client *client,
1263     }
1264    
1265     id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
1266     - if (id != 0x01)
1267     + if (id < 0x01 || id > 0x04)
1268     return -ENODEV;
1269    
1270     return 0;
1271     diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
1272     index 49423e913459..d4fe13ee543e 100644
1273     --- a/drivers/i2c/busses/Kconfig
1274     +++ b/drivers/i2c/busses/Kconfig
1275     @@ -109,6 +109,8 @@ config I2C_I801
1276     Avoton (SOC)
1277     Wellsburg (PCH)
1278     Coleto Creek (PCH)
1279     + Wildcat Point-LP (PCH)
1280     + BayTrail (SOC)
1281    
1282     This driver can also be built as a module. If so, the module
1283     will be called i2c-i801.
1284     diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
1285     index c41ca6354fc5..f24a7385260a 100644
1286     --- a/drivers/i2c/busses/i2c-designware-core.c
1287     +++ b/drivers/i2c/busses/i2c-designware-core.c
1288     @@ -380,6 +380,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
1289     ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
1290     dw_writel(dev, ic_con, DW_IC_CON);
1291    
1292     + /* enforce disabled interrupts (due to HW issues) */
1293     + i2c_dw_disable_int(dev);
1294     +
1295     /* Enable the adapter */
1296     __i2c_dw_enable(dev, true);
1297    
1298     diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
1299     index 4ebceed6bc66..783fa75e13ae 100644
1300     --- a/drivers/i2c/busses/i2c-i801.c
1301     +++ b/drivers/i2c/busses/i2c-i801.c
1302     @@ -59,6 +59,8 @@
1303     Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes
1304     Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes
1305     Coleto Creek (PCH) 0x23b0 32 hard yes yes yes
1306     + Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
1307     + BayTrail (SOC) 0x0f12 32 hard yes yes yes
1308    
1309     Features supported by this driver:
1310     Software PEC no
1311     @@ -161,6 +163,7 @@
1312     STATUS_ERROR_FLAGS)
1313    
1314     /* Older devices have their ID defined in <linux/pci_ids.h> */
1315     +#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
1316     #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
1317     #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
1318     /* Patsburg also has three 'Integrated Device Function' SMBus controllers */
1319     @@ -178,6 +181,7 @@
1320     #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e
1321     #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f
1322     #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
1323     +#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2
1324    
1325     struct i801_mux_config {
1326     char *gpio_chip;
1327     @@ -820,6 +824,8 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
1328     { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) },
1329     { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
1330     { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) },
1331     + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
1332     + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) },
1333     { 0, }
1334     };
1335    
1336     diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
1337     index 4ba4a95b6b26..8a806f5c40cf 100644
1338     --- a/drivers/i2c/busses/i2c-rcar.c
1339     +++ b/drivers/i2c/busses/i2c-rcar.c
1340     @@ -541,6 +541,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
1341    
1342     ret = -EINVAL;
1343     for (i = 0; i < num; i++) {
1344     + /* This HW can't send STOP after address phase */
1345     + if (msgs[i].len == 0) {
1346     + ret = -EOPNOTSUPP;
1347     + break;
1348     + }
1349     +
1350     /*-------------- spin lock -----------------*/
1351     spin_lock_irqsave(&priv->lock, flags);
1352    
1353     @@ -605,7 +611,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
1354    
1355     static u32 rcar_i2c_func(struct i2c_adapter *adap)
1356     {
1357     - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1358     + /* This HW can't do SMBUS_QUICK and NOSTART */
1359     + return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
1360     }
1361    
1362     static const struct i2c_algorithm rcar_i2c_algo = {
1363     diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
1364     index cab1c91b75a3..a72aad9561b0 100644
1365     --- a/drivers/i2c/busses/i2c-s3c2410.c
1366     +++ b/drivers/i2c/busses/i2c-s3c2410.c
1367     @@ -1204,10 +1204,10 @@ static int s3c24xx_i2c_resume(struct device *dev)
1368     struct platform_device *pdev = to_platform_device(dev);
1369     struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
1370    
1371     - i2c->suspended = 0;
1372     clk_prepare_enable(i2c->clk);
1373     s3c24xx_i2c_init(i2c);
1374     clk_disable_unprepare(i2c->clk);
1375     + i2c->suspended = 0;
1376    
1377     return 0;
1378     }
1379     diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1380     index fe4c61e219f3..111ac381b40b 100644
1381     --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1382     +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1383     @@ -660,6 +660,7 @@ static int inv_mpu_probe(struct i2c_client *client,
1384     {
1385     struct inv_mpu6050_state *st;
1386     struct iio_dev *indio_dev;
1387     + struct inv_mpu6050_platform_data *pdata;
1388     int result;
1389    
1390     if (!i2c_check_functionality(client->adapter,
1391     @@ -675,8 +676,10 @@ static int inv_mpu_probe(struct i2c_client *client,
1392     }
1393     st = iio_priv(indio_dev);
1394     st->client = client;
1395     - st->plat_data = *(struct inv_mpu6050_platform_data
1396     - *)dev_get_platdata(&client->dev);
1397     + pdata = (struct inv_mpu6050_platform_data
1398     + *)dev_get_platdata(&client->dev);
1399     + if (pdata)
1400     + st->plat_data = *pdata;
1401     /* power is turned on inside check chip type*/
1402     result = inv_check_and_setup_chip(st, id);
1403     if (result)
1404     diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1405     index ce6c603a3cc9..988e29d18bb4 100644
1406     --- a/drivers/infiniband/ulp/isert/ib_isert.c
1407     +++ b/drivers/infiniband/ulp/isert/ib_isert.c
1408     @@ -27,6 +27,7 @@
1409     #include <target/target_core_base.h>
1410     #include <target/target_core_fabric.h>
1411     #include <target/iscsi/iscsi_transport.h>
1412     +#include <linux/semaphore.h>
1413    
1414     #include "isert_proto.h"
1415     #include "ib_isert.h"
1416     @@ -459,11 +460,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
1417     goto out_conn_dev;
1418    
1419     mutex_lock(&isert_np->np_accept_mutex);
1420     - list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
1421     + list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
1422     mutex_unlock(&isert_np->np_accept_mutex);
1423    
1424     - pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
1425     - wake_up(&isert_np->np_accept_wq);
1426     + pr_debug("isert_connect_request() up np_sem np: %p\n", np);
1427     + up(&isert_np->np_sem);
1428     return 0;
1429    
1430     out_conn_dev:
1431     @@ -2042,7 +2043,7 @@ isert_setup_np(struct iscsi_np *np,
1432     pr_err("Unable to allocate struct isert_np\n");
1433     return -ENOMEM;
1434     }
1435     - init_waitqueue_head(&isert_np->np_accept_wq);
1436     + sema_init(&isert_np->np_sem, 0);
1437     mutex_init(&isert_np->np_accept_mutex);
1438     INIT_LIST_HEAD(&isert_np->np_accept_list);
1439     init_completion(&isert_np->np_login_comp);
1440     @@ -2091,18 +2092,6 @@ out:
1441     }
1442    
1443     static int
1444     -isert_check_accept_queue(struct isert_np *isert_np)
1445     -{
1446     - int empty;
1447     -
1448     - mutex_lock(&isert_np->np_accept_mutex);
1449     - empty = list_empty(&isert_np->np_accept_list);
1450     - mutex_unlock(&isert_np->np_accept_mutex);
1451     -
1452     - return empty;
1453     -}
1454     -
1455     -static int
1456     isert_rdma_accept(struct isert_conn *isert_conn)
1457     {
1458     struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
1459     @@ -2186,16 +2175,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
1460     int max_accept = 0, ret;
1461    
1462     accept_wait:
1463     - ret = wait_event_interruptible(isert_np->np_accept_wq,
1464     - !isert_check_accept_queue(isert_np) ||
1465     - np->np_thread_state == ISCSI_NP_THREAD_RESET);
1466     + ret = down_interruptible(&isert_np->np_sem);
1467     if (max_accept > 5)
1468     return -ENODEV;
1469    
1470     spin_lock_bh(&np->np_thread_lock);
1471     if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
1472     spin_unlock_bh(&np->np_thread_lock);
1473     - pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
1474     + pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
1475     return -ENODEV;
1476     }
1477     spin_unlock_bh(&np->np_thread_lock);
1478     diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
1479     index b9d6cc6917cf..dfe4a2ebef0d 100644
1480     --- a/drivers/infiniband/ulp/isert/ib_isert.h
1481     +++ b/drivers/infiniband/ulp/isert/ib_isert.h
1482     @@ -131,7 +131,7 @@ struct isert_device {
1483     };
1484    
1485     struct isert_np {
1486     - wait_queue_head_t np_accept_wq;
1487     + struct semaphore np_sem;
1488     struct rdma_cm_id *np_cm_id;
1489     struct mutex np_accept_mutex;
1490     struct list_head np_accept_list;
1491     diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
1492     index 2626773ff29b..2dd1d0dd4f7d 100644
1493     --- a/drivers/input/keyboard/atkbd.c
1494     +++ b/drivers/input/keyboard/atkbd.c
1495     @@ -243,6 +243,12 @@ static void (*atkbd_platform_fixup)(struct atkbd *, const void *data);
1496     static void *atkbd_platform_fixup_data;
1497     static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int);
1498    
1499     +/*
1500     + * Certain keyboards to not like ATKBD_CMD_RESET_DIS and stop responding
1501     + * to many commands until full reset (ATKBD_CMD_RESET_BAT) is performed.
1502     + */
1503     +static bool atkbd_skip_deactivate;
1504     +
1505     static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
1506     ssize_t (*handler)(struct atkbd *, char *));
1507     static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
1508     @@ -768,7 +774,8 @@ static int atkbd_probe(struct atkbd *atkbd)
1509     * Make sure nothing is coming from the keyboard and disturbs our
1510     * internal state.
1511     */
1512     - atkbd_deactivate(atkbd);
1513     + if (!atkbd_skip_deactivate)
1514     + atkbd_deactivate(atkbd);
1515    
1516     return 0;
1517     }
1518     @@ -1638,6 +1645,12 @@ static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id)
1519     return 1;
1520     }
1521    
1522     +static int __init atkbd_deactivate_fixup(const struct dmi_system_id *id)
1523     +{
1524     + atkbd_skip_deactivate = true;
1525     + return 1;
1526     +}
1527     +
1528     static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
1529     {
1530     .matches = {
1531     @@ -1775,6 +1788,20 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
1532     .callback = atkbd_setup_scancode_fixup,
1533     .driver_data = atkbd_oqo_01plus_scancode_fixup,
1534     },
1535     + {
1536     + .matches = {
1537     + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
1538     + DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
1539     + },
1540     + .callback = atkbd_deactivate_fixup,
1541     + },
1542     + {
1543     + .matches = {
1544     + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
1545     + DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
1546     + },
1547     + .callback = atkbd_deactivate_fixup,
1548     + },
1549     { }
1550     };
1551    
1552     diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1553     index 1fb1a7b5a754..76f1d37ac0ff 100644
1554     --- a/drivers/input/mouse/elantech.c
1555     +++ b/drivers/input/mouse/elantech.c
1556     @@ -11,6 +11,7 @@
1557     */
1558    
1559     #include <linux/delay.h>
1560     +#include <linux/dmi.h>
1561     #include <linux/slab.h>
1562     #include <linux/module.h>
1563     #include <linux/input.h>
1564     @@ -801,7 +802,11 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
1565     break;
1566    
1567     case 3:
1568     - etd->reg_10 = 0x0b;
1569     + if (etd->set_hw_resolution)
1570     + etd->reg_10 = 0x0b;
1571     + else
1572     + etd->reg_10 = 0x03;
1573     +
1574     if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
1575     rc = -1;
1576    
1577     @@ -1301,6 +1306,22 @@ static int elantech_reconnect(struct psmouse *psmouse)
1578     }
1579    
1580     /*
1581     + * Some hw_version 3 models go into error state when we try to set bit 3 of r10
1582     + */
1583     +static const struct dmi_system_id no_hw_res_dmi_table[] = {
1584     +#if defined(CONFIG_DMI) && defined(CONFIG_X86)
1585     + {
1586     + /* Gigabyte U2442 */
1587     + .matches = {
1588     + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1589     + DMI_MATCH(DMI_PRODUCT_NAME, "U2442"),
1590     + },
1591     + },
1592     +#endif
1593     + { }
1594     +};
1595     +
1596     +/*
1597     * determine hardware version and set some properties according to it.
1598     */
1599     static int elantech_set_properties(struct elantech_data *etd)
1600     @@ -1351,6 +1372,9 @@ static int elantech_set_properties(struct elantech_data *etd)
1601     etd->reports_pressure = true;
1602     }
1603    
1604     + /* Enable real hardware resolution on hw_version 3 ? */
1605     + etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
1606     +
1607     return 0;
1608     }
1609    
1610     diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
1611     index 46db3be45ac9..c1c15ab6872d 100644
1612     --- a/drivers/input/mouse/elantech.h
1613     +++ b/drivers/input/mouse/elantech.h
1614     @@ -129,6 +129,7 @@ struct elantech_data {
1615     bool paritycheck;
1616     bool jumpy_cursor;
1617     bool reports_pressure;
1618     + bool set_hw_resolution;
1619     unsigned char hw_version;
1620     unsigned int fw_version;
1621     unsigned int single_finger_reports;
1622     diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1623     index d60c9b7ad1b8..f36f7b88f260 100644
1624     --- a/drivers/input/mouse/synaptics.c
1625     +++ b/drivers/input/mouse/synaptics.c
1626     @@ -1552,7 +1552,7 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
1627     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1628     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
1629     },
1630     - .driver_data = (int []){1024, 5056, 2058, 4832},
1631     + .driver_data = (int []){1024, 5112, 2024, 4832},
1632     },
1633     {
1634     /* Lenovo ThinkPad L540 */
1635     @@ -1563,6 +1563,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
1636     .driver_data = (int []){1024, 5112, 2024, 4832},
1637     },
1638     {
1639     + /* Lenovo ThinkPad W540 */
1640     + .matches = {
1641     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1642     + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W540"),
1643     + },
1644     + .driver_data = (int []){1024, 5112, 2024, 4832},
1645     + },
1646     + {
1647     /* Lenovo Yoga S1 */
1648     .matches = {
1649     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1650     diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1651     index a3c338942f10..6f849cbcac6f 100644
1652     --- a/drivers/iommu/amd_iommu.c
1653     +++ b/drivers/iommu/amd_iommu.c
1654     @@ -3959,7 +3959,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
1655     iommu_flush_dte(iommu, devid);
1656     if (devid != alias) {
1657     irq_lookup_table[alias] = table;
1658     - set_dte_irq_entry(devid, table);
1659     + set_dte_irq_entry(alias, table);
1660     iommu_flush_dte(iommu, alias);
1661     }
1662    
1663     diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
1664     index 19ceaa60e0f4..4e11218d644e 100644
1665     --- a/drivers/irqchip/irq-gic.c
1666     +++ b/drivers/irqchip/irq-gic.c
1667     @@ -246,10 +246,14 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1668     bool force)
1669     {
1670     void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
1671     - unsigned int shift = (gic_irq(d) % 4) * 8;
1672     - unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
1673     + unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
1674     u32 val, mask, bit;
1675    
1676     + if (!force)
1677     + cpu = cpumask_any_and(mask_val, cpu_online_mask);
1678     + else
1679     + cpu = cpumask_first(mask_val);
1680     +
1681     if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
1682     return -EINVAL;
1683    
1684     diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
1685     index faf52c005e8c..5d64b2431415 100644
1686     --- a/drivers/leds/leds-pwm.c
1687     +++ b/drivers/leds/leds-pwm.c
1688     @@ -82,6 +82,15 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds)
1689     (sizeof(struct led_pwm_data) * num_leds);
1690     }
1691    
1692     +static void led_pwm_cleanup(struct led_pwm_priv *priv)
1693     +{
1694     + while (priv->num_leds--) {
1695     + led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
1696     + if (priv->leds[priv->num_leds].can_sleep)
1697     + cancel_work_sync(&priv->leds[priv->num_leds].work);
1698     + }
1699     +}
1700     +
1701     static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
1702     {
1703     struct device_node *node = pdev->dev.of_node;
1704     @@ -139,8 +148,7 @@ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
1705    
1706     return priv;
1707     err:
1708     - while (priv->num_leds--)
1709     - led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
1710     + led_pwm_cleanup(priv);
1711    
1712     return NULL;
1713     }
1714     @@ -200,8 +208,8 @@ static int led_pwm_probe(struct platform_device *pdev)
1715     return 0;
1716    
1717     err:
1718     - while (i--)
1719     - led_classdev_unregister(&priv->leds[i].cdev);
1720     + priv->num_leds = i;
1721     + led_pwm_cleanup(priv);
1722    
1723     return ret;
1724     }
1725     @@ -209,13 +217,8 @@ err:
1726     static int led_pwm_remove(struct platform_device *pdev)
1727     {
1728     struct led_pwm_priv *priv = platform_get_drvdata(pdev);
1729     - int i;
1730    
1731     - for (i = 0; i < priv->num_leds; i++) {
1732     - led_classdev_unregister(&priv->leds[i].cdev);
1733     - if (priv->leds[i].can_sleep)
1734     - cancel_work_sync(&priv->leds[i].work);
1735     - }
1736     + led_pwm_cleanup(priv);
1737    
1738     return 0;
1739     }
1740     diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1741     index 6d2d41ae9e32..5177ba54559b 100644
1742     --- a/drivers/md/dm-crypt.c
1743     +++ b/drivers/md/dm-crypt.c
1744     @@ -18,7 +18,6 @@
1745     #include <linux/crypto.h>
1746     #include <linux/workqueue.h>
1747     #include <linux/backing-dev.h>
1748     -#include <linux/percpu.h>
1749     #include <linux/atomic.h>
1750     #include <linux/scatterlist.h>
1751     #include <asm/page.h>
1752     @@ -44,6 +43,7 @@ struct convert_context {
1753     unsigned int idx_out;
1754     sector_t cc_sector;
1755     atomic_t cc_pending;
1756     + struct ablkcipher_request *req;
1757     };
1758    
1759     /*
1760     @@ -105,15 +105,7 @@ struct iv_lmk_private {
1761     enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
1762    
1763     /*
1764     - * Duplicated per-CPU state for cipher.
1765     - */
1766     -struct crypt_cpu {
1767     - struct ablkcipher_request *req;
1768     -};
1769     -
1770     -/*
1771     - * The fields in here must be read only after initialization,
1772     - * changing state should be in crypt_cpu.
1773     + * The fields in here must be read only after initialization.
1774     */
1775     struct crypt_config {
1776     struct dm_dev *dev;
1777     @@ -143,12 +135,6 @@ struct crypt_config {
1778     sector_t iv_offset;
1779     unsigned int iv_size;
1780    
1781     - /*
1782     - * Duplicated per cpu state. Access through
1783     - * per_cpu_ptr() only.
1784     - */
1785     - struct crypt_cpu __percpu *cpu;
1786     -
1787     /* ESSIV: struct crypto_cipher *essiv_tfm */
1788     void *iv_private;
1789     struct crypto_ablkcipher **tfms;
1790     @@ -184,11 +170,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
1791     static void kcryptd_queue_crypt(struct dm_crypt_io *io);
1792     static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
1793    
1794     -static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
1795     -{
1796     - return this_cpu_ptr(cc->cpu);
1797     -}
1798     -
1799     /*
1800     * Use this to access cipher attributes that are the same for each CPU.
1801     */
1802     @@ -738,16 +719,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
1803     static void crypt_alloc_req(struct crypt_config *cc,
1804     struct convert_context *ctx)
1805     {
1806     - struct crypt_cpu *this_cc = this_crypt_config(cc);
1807     unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
1808    
1809     - if (!this_cc->req)
1810     - this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
1811     + if (!ctx->req)
1812     + ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
1813    
1814     - ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
1815     - ablkcipher_request_set_callback(this_cc->req,
1816     + ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
1817     + ablkcipher_request_set_callback(ctx->req,
1818     CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
1819     - kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
1820     + kcryptd_async_done, dmreq_of_req(cc, ctx->req));
1821     }
1822    
1823     /*
1824     @@ -756,7 +736,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
1825     static int crypt_convert(struct crypt_config *cc,
1826     struct convert_context *ctx)
1827     {
1828     - struct crypt_cpu *this_cc = this_crypt_config(cc);
1829     int r;
1830    
1831     atomic_set(&ctx->cc_pending, 1);
1832     @@ -768,7 +747,7 @@ static int crypt_convert(struct crypt_config *cc,
1833    
1834     atomic_inc(&ctx->cc_pending);
1835    
1836     - r = crypt_convert_block(cc, ctx, this_cc->req);
1837     + r = crypt_convert_block(cc, ctx, ctx->req);
1838    
1839     switch (r) {
1840     /* async */
1841     @@ -777,7 +756,7 @@ static int crypt_convert(struct crypt_config *cc,
1842     INIT_COMPLETION(ctx->restart);
1843     /* fall through*/
1844     case -EINPROGRESS:
1845     - this_cc->req = NULL;
1846     + ctx->req = NULL;
1847     ctx->cc_sector++;
1848     continue;
1849    
1850     @@ -876,6 +855,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
1851     io->sector = sector;
1852     io->error = 0;
1853     io->base_io = NULL;
1854     + io->ctx.req = NULL;
1855     atomic_set(&io->io_pending, 0);
1856    
1857     return io;
1858     @@ -901,6 +881,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
1859     if (!atomic_dec_and_test(&io->io_pending))
1860     return;
1861    
1862     + if (io->ctx.req)
1863     + mempool_free(io->ctx.req, cc->req_pool);
1864     mempool_free(io, cc->io_pool);
1865    
1866     if (likely(!base_io))
1867     @@ -1326,8 +1308,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
1868     static void crypt_dtr(struct dm_target *ti)
1869     {
1870     struct crypt_config *cc = ti->private;
1871     - struct crypt_cpu *cpu_cc;
1872     - int cpu;
1873    
1874     ti->private = NULL;
1875    
1876     @@ -1339,13 +1319,6 @@ static void crypt_dtr(struct dm_target *ti)
1877     if (cc->crypt_queue)
1878     destroy_workqueue(cc->crypt_queue);
1879    
1880     - if (cc->cpu)
1881     - for_each_possible_cpu(cpu) {
1882     - cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1883     - if (cpu_cc->req)
1884     - mempool_free(cpu_cc->req, cc->req_pool);
1885     - }
1886     -
1887     crypt_free_tfms(cc);
1888    
1889     if (cc->bs)
1890     @@ -1364,9 +1337,6 @@ static void crypt_dtr(struct dm_target *ti)
1891     if (cc->dev)
1892     dm_put_device(ti, cc->dev);
1893    
1894     - if (cc->cpu)
1895     - free_percpu(cc->cpu);
1896     -
1897     kzfree(cc->cipher);
1898     kzfree(cc->cipher_string);
1899    
1900     @@ -1421,13 +1391,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1901     if (tmp)
1902     DMWARN("Ignoring unexpected additional cipher options");
1903    
1904     - cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
1905     - __alignof__(struct crypt_cpu));
1906     - if (!cc->cpu) {
1907     - ti->error = "Cannot allocate per cpu state";
1908     - goto bad_mem;
1909     - }
1910     -
1911     /*
1912     * For compatibility with the original dm-crypt mapping format, if
1913     * only the cipher name is supplied, use cbc-plain.
1914     diff --git a/drivers/md/md.c b/drivers/md/md.c
1915     index a2dda416c9cb..00a99fe797d4 100644
1916     --- a/drivers/md/md.c
1917     +++ b/drivers/md/md.c
1918     @@ -8481,7 +8481,8 @@ static int md_notify_reboot(struct notifier_block *this,
1919     if (mddev_trylock(mddev)) {
1920     if (mddev->pers)
1921     __md_stop_writes(mddev);
1922     - mddev->safemode = 2;
1923     + if (mddev->persistent)
1924     + mddev->safemode = 2;
1925     mddev_unlock(mddev);
1926     }
1927     need_delay = 1;
1928     diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
1929     index 617ad3fff4aa..3ead3a83f04a 100644
1930     --- a/drivers/media/i2c/ov7670.c
1931     +++ b/drivers/media/i2c/ov7670.c
1932     @@ -1110,7 +1110,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
1933     * windows that fall outside that.
1934     */
1935     for (i = 0; i < n_win_sizes; i++) {
1936     - struct ov7670_win_size *win = &info->devtype->win_sizes[index];
1937     + struct ov7670_win_size *win = &info->devtype->win_sizes[i];
1938     if (info->min_width && win->width < info->min_width)
1939     continue;
1940     if (info->min_height && win->height < info->min_height)
1941     diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
1942     index 1957c0df08fd..79715f9feb0a 100644
1943     --- a/drivers/media/media-device.c
1944     +++ b/drivers/media/media-device.c
1945     @@ -93,6 +93,7 @@ static long media_device_enum_entities(struct media_device *mdev,
1946     struct media_entity *ent;
1947     struct media_entity_desc u_ent;
1948    
1949     + memset(&u_ent, 0, sizeof(u_ent));
1950     if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
1951     return -EFAULT;
1952    
1953     diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
1954     index 1d7dbd5c0fba..3e8ef11f67aa 100644
1955     --- a/drivers/media/platform/omap3isp/isp.c
1956     +++ b/drivers/media/platform/omap3isp/isp.c
1957     @@ -2249,6 +2249,7 @@ static int isp_probe(struct platform_device *pdev)
1958     ret = iommu_attach_device(isp->domain, &pdev->dev);
1959     if (ret) {
1960     dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret);
1961     + ret = -EPROBE_DEFER;
1962     goto free_domain;
1963     }
1964    
1965     @@ -2287,6 +2288,7 @@ detach_dev:
1966     iommu_detach_device(isp->domain, &pdev->dev);
1967     free_domain:
1968     iommu_domain_free(isp->domain);
1969     + isp->domain = NULL;
1970     error_isp:
1971     isp_xclk_cleanup(isp);
1972     omap3isp_put(isp);
1973     diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
1974     index 3aecaf465094..f0c9c42867de 100644
1975     --- a/drivers/media/tuners/fc2580.c
1976     +++ b/drivers/media/tuners/fc2580.c
1977     @@ -195,7 +195,7 @@ static int fc2580_set_params(struct dvb_frontend *fe)
1978    
1979     f_ref = 2UL * priv->cfg->clock / r_val;
1980     n_val = div_u64_rem(f_vco, f_ref, &k_val);
1981     - k_val_reg = 1UL * k_val * (1 << 20) / f_ref;
1982     + k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref);
1983    
1984     ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff));
1985     if (ret < 0)
1986     @@ -348,8 +348,8 @@ static int fc2580_set_params(struct dvb_frontend *fe)
1987     if (ret < 0)
1988     goto err;
1989    
1990     - ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \
1991     - fc2580_if_filter_lut[i].mul / 1000000000);
1992     + ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock *
1993     + fc2580_if_filter_lut[i].mul, 1000000000));
1994     if (ret < 0)
1995     goto err;
1996    
1997     diff --git a/drivers/media/tuners/fc2580_priv.h b/drivers/media/tuners/fc2580_priv.h
1998     index be38a9e637e0..646c99452136 100644
1999     --- a/drivers/media/tuners/fc2580_priv.h
2000     +++ b/drivers/media/tuners/fc2580_priv.h
2001     @@ -22,6 +22,7 @@
2002     #define FC2580_PRIV_H
2003    
2004     #include "fc2580.h"
2005     +#include <linux/math64.h>
2006    
2007     struct fc2580_reg_val {
2008     u8 reg;
2009     diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
2010     index f56b729581e7..e2b0a0969ebb 100644
2011     --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
2012     +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
2013     @@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
2014    
2015     static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
2016     {
2017     + if (get_user(kp->type, &up->type))
2018     + return -EFAULT;
2019     +
2020     switch (kp->type) {
2021     case V4L2_BUF_TYPE_VIDEO_CAPTURE:
2022     case V4L2_BUF_TYPE_VIDEO_OUTPUT:
2023     @@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
2024    
2025     static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
2026     {
2027     - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
2028     - get_user(kp->type, &up->type))
2029     - return -EFAULT;
2030     + if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
2031     + return -EFAULT;
2032     return __get_v4l2_format32(kp, up);
2033     }
2034    
2035     static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
2036     {
2037     if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
2038     - copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
2039     - return -EFAULT;
2040     + copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
2041     + return -EFAULT;
2042     return __get_v4l2_format32(&kp->format, &up->format);
2043     }
2044    
2045     diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2046     index e752f5d4995d..0c9b2f1c6939 100644
2047     --- a/drivers/net/wireless/ath/ath9k/xmit.c
2048     +++ b/drivers/net/wireless/ath/ath9k/xmit.c
2049     @@ -1255,14 +1255,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
2050     for (tidno = 0, tid = &an->tid[tidno];
2051     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2052    
2053     - if (!tid->sched)
2054     - continue;
2055     -
2056     ac = tid->ac;
2057     txq = ac->txq;
2058    
2059     ath_txq_lock(sc, txq);
2060    
2061     + if (!tid->sched) {
2062     + ath_txq_unlock(sc, txq);
2063     + continue;
2064     + }
2065     +
2066     buffered = !skb_queue_empty(&tid->buf_q);
2067    
2068     tid->sched = false;
2069     diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
2070     index 3a6544710c8a..8e8543cfe489 100644
2071     --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
2072     +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
2073     @@ -426,6 +426,12 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
2074     bool blocked;
2075     int err;
2076    
2077     + if (!wl->ucode.bcm43xx_bomminor) {
2078     + err = brcms_request_fw(wl, wl->wlc->hw->d11core);
2079     + if (err)
2080     + return -ENOENT;
2081     + }
2082     +
2083     ieee80211_wake_queues(hw);
2084     spin_lock_bh(&wl->lock);
2085     blocked = brcms_rfkill_set_hw_state(wl);
2086     @@ -433,14 +439,6 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
2087     if (!blocked)
2088     wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
2089    
2090     - if (!wl->ucode.bcm43xx_bomminor) {
2091     - err = brcms_request_fw(wl, wl->wlc->hw->d11core);
2092     - if (err) {
2093     - brcms_remove(wl->wlc->hw->d11core);
2094     - return -ENOENT;
2095     - }
2096     - }
2097     -
2098     spin_lock_bh(&wl->lock);
2099     /* avoid acknowledging frames before a non-monitor device is added */
2100     wl->mute_tx = true;
2101     diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
2102     index f8cff1f0b6b7..2b724fc4e306 100644
2103     --- a/drivers/net/wireless/rt2x00/rt2x00mac.c
2104     +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
2105     @@ -623,20 +623,18 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
2106     bss_conf->bssid);
2107    
2108     /*
2109     - * Update the beacon. This is only required on USB devices. PCI
2110     - * devices fetch beacons periodically.
2111     - */
2112     - if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
2113     - rt2x00queue_update_beacon(rt2x00dev, vif);
2114     -
2115     - /*
2116     * Start/stop beaconing.
2117     */
2118     if (changes & BSS_CHANGED_BEACON_ENABLED) {
2119     if (!bss_conf->enable_beacon && intf->enable_beacon) {
2120     - rt2x00queue_clear_beacon(rt2x00dev, vif);
2121     rt2x00dev->intf_beaconing--;
2122     intf->enable_beacon = false;
2123     + /*
2124     + * Clear beacon in the H/W for this vif. This is needed
2125     + * to disable beaconing on this particular interface
2126     + * and keep it running on other interfaces.
2127     + */
2128     + rt2x00queue_clear_beacon(rt2x00dev, vif);
2129    
2130     if (rt2x00dev->intf_beaconing == 0) {
2131     /*
2132     @@ -647,11 +645,15 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
2133     rt2x00queue_stop_queue(rt2x00dev->bcn);
2134     mutex_unlock(&intf->beacon_skb_mutex);
2135     }
2136     -
2137     -
2138     } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
2139     rt2x00dev->intf_beaconing++;
2140     intf->enable_beacon = true;
2141     + /*
2142     + * Upload beacon to the H/W. This is only required on
2143     + * USB devices. PCI devices fetch beacons periodically.
2144     + */
2145     + if (rt2x00_is_usb(rt2x00dev))
2146     + rt2x00queue_update_beacon(rt2x00dev, vif);
2147    
2148     if (rt2x00dev->intf_beaconing == 1) {
2149     /*
2150     diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
2151     index 324aa581938e..c3f2b55501ae 100644
2152     --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
2153     +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
2154     @@ -1001,7 +1001,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
2155     err = _rtl92cu_init_mac(hw);
2156     if (err) {
2157     RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n");
2158     - return err;
2159     + goto exit;
2160     }
2161     err = rtl92c_download_fw(hw);
2162     if (err) {
2163     diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
2164     index 58499277903a..6efc2ec5e4db 100644
2165     --- a/drivers/pci/hotplug/shpchp_ctrl.c
2166     +++ b/drivers/pci/hotplug/shpchp_ctrl.c
2167     @@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot)
2168     return WRONG_BUS_FREQUENCY;
2169     }
2170    
2171     - bsp = ctrl->pci_dev->bus->cur_bus_speed;
2172     - msp = ctrl->pci_dev->bus->max_bus_speed;
2173     + bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
2174     + msp = ctrl->pci_dev->subordinate->max_bus_speed;
2175    
2176     /* Check if there are other slots or devices on the same bus */
2177     if (!list_empty(&ctrl->pci_dev->subordinate->devices))
2178     diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
2179     index 660b109487ae..8032917b6636 100644
2180     --- a/drivers/target/target_core_device.c
2181     +++ b/drivers/target/target_core_device.c
2182     @@ -796,10 +796,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
2183     pr_err("emulate_write_cache not supported for pSCSI\n");
2184     return -EINVAL;
2185     }
2186     - if (dev->transport->get_write_cache) {
2187     - pr_warn("emulate_write_cache cannot be changed when underlying"
2188     - " HW reports WriteCacheEnabled, ignoring request\n");
2189     - return 0;
2190     + if (flag &&
2191     + dev->transport->get_write_cache) {
2192     + pr_err("emulate_write_cache not supported for this device\n");
2193     + return -EINVAL;
2194     }
2195    
2196     dev->dev_attrib.emulate_write_cache = flag;
2197     diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
2198     index b167665b7de2..d8c06a3d391e 100644
2199     --- a/drivers/tty/serial/8250/8250_core.c
2200     +++ b/drivers/tty/serial/8250/8250_core.c
2201     @@ -1520,7 +1520,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
2202     status = serial8250_rx_chars(up, status);
2203     }
2204     serial8250_modem_status(up);
2205     - if (status & UART_LSR_THRE)
2206     + if (!up->dma && (status & UART_LSR_THRE))
2207     serial8250_tx_chars(up);
2208    
2209     spin_unlock_irqrestore(&port->lock, flags);
2210     diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
2211     index 7046769608d4..ab9096dc3849 100644
2212     --- a/drivers/tty/serial/8250/8250_dma.c
2213     +++ b/drivers/tty/serial/8250/8250_dma.c
2214     @@ -20,12 +20,15 @@ static void __dma_tx_complete(void *param)
2215     struct uart_8250_port *p = param;
2216     struct uart_8250_dma *dma = p->dma;
2217     struct circ_buf *xmit = &p->port.state->xmit;
2218     -
2219     - dma->tx_running = 0;
2220     + unsigned long flags;
2221    
2222     dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
2223     UART_XMIT_SIZE, DMA_TO_DEVICE);
2224    
2225     + spin_lock_irqsave(&p->port.lock, flags);
2226     +
2227     + dma->tx_running = 0;
2228     +
2229     xmit->tail += dma->tx_size;
2230     xmit->tail &= UART_XMIT_SIZE - 1;
2231     p->port.icount.tx += dma->tx_size;
2232     @@ -35,6 +38,8 @@ static void __dma_tx_complete(void *param)
2233    
2234     if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port))
2235     serial8250_tx_dma(p);
2236     +
2237     + spin_unlock_irqrestore(&p->port.lock, flags);
2238     }
2239    
2240     static void __dma_rx_complete(void *param)
2241     diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
2242     index 073b938f9135..55e96131753e 100644
2243     --- a/drivers/usb/gadget/at91_udc.c
2244     +++ b/drivers/usb/gadget/at91_udc.c
2245     @@ -1703,16 +1703,6 @@ static int at91udc_probe(struct platform_device *pdev)
2246     return -ENODEV;
2247     }
2248    
2249     - if (pdev->num_resources != 2) {
2250     - DBG("invalid num_resources\n");
2251     - return -ENODEV;
2252     - }
2253     - if ((pdev->resource[0].flags != IORESOURCE_MEM)
2254     - || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
2255     - DBG("invalid resource type\n");
2256     - return -ENODEV;
2257     - }
2258     -
2259     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2260     if (!res)
2261     return -ENXIO;
2262     diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
2263     index 3c0a49a298dd..bfcf38383f74 100644
2264     --- a/drivers/usb/host/ehci-fsl.c
2265     +++ b/drivers/usb/host/ehci-fsl.c
2266     @@ -261,7 +261,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
2267     break;
2268     }
2269    
2270     - if (pdata->have_sysif_regs && pdata->controller_ver &&
2271     + if (pdata->have_sysif_regs &&
2272     + pdata->controller_ver > FSL_USB_VER_1_6 &&
2273     (phy_mode == FSL_USB2_PHY_ULPI)) {
2274     /* check PHY_CLK_VALID to get phy clk valid */
2275     if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
2276     diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
2277     index 60ff4220e8b4..cd908066fde9 100644
2278     --- a/drivers/usb/host/ohci-hub.c
2279     +++ b/drivers/usb/host/ohci-hub.c
2280     @@ -90,6 +90,24 @@ __acquires(ohci->lock)
2281     dl_done_list (ohci);
2282     finish_unlinks (ohci, ohci_frame_no(ohci));
2283    
2284     + /*
2285     + * Some controllers don't handle "global" suspend properly if
2286     + * there are unsuspended ports. For these controllers, put all
2287     + * the enabled ports into suspend before suspending the root hub.
2288     + */
2289     + if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) {
2290     + __hc32 __iomem *portstat = ohci->regs->roothub.portstatus;
2291     + int i;
2292     + unsigned temp;
2293     +
2294     + for (i = 0; i < ohci->num_ports; (++i, ++portstat)) {
2295     + temp = ohci_readl(ohci, portstat);
2296     + if ((temp & (RH_PS_PES | RH_PS_PSS)) ==
2297     + RH_PS_PES)
2298     + ohci_writel(ohci, RH_PS_PSS, portstat);
2299     + }
2300     + }
2301     +
2302     /* maybe resume can wake root hub */
2303     if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) {
2304     ohci->hc_control |= OHCI_CTRL_RWE;
2305     diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
2306     index ef6782bd1fa9..67af8eef6537 100644
2307     --- a/drivers/usb/host/ohci-pci.c
2308     +++ b/drivers/usb/host/ohci-pci.c
2309     @@ -172,6 +172,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
2310     pci_dev_put(amd_smbus_dev);
2311     amd_smbus_dev = NULL;
2312    
2313     + ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND;
2314     return 0;
2315     }
2316    
2317     diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
2318     index d3299143d9e2..f2521f3185d2 100644
2319     --- a/drivers/usb/host/ohci.h
2320     +++ b/drivers/usb/host/ohci.h
2321     @@ -405,6 +405,8 @@ struct ohci_hcd {
2322     #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
2323     #define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
2324     #define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
2325     +#define OHCI_QUIRK_GLOBAL_SUSPEND 0x800 /* must suspend ports */
2326     +
2327     // there are also chip quirks/bugs in init logic
2328    
2329     struct work_struct nec_work; /* Worker for NEC quirk */
2330     diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
2331     index 7ed681a714a5..6c0a542e8ec1 100644
2332     --- a/drivers/usb/serial/qcserial.c
2333     +++ b/drivers/usb/serial/qcserial.c
2334     @@ -151,6 +151,21 @@ static const struct usb_device_id id_table[] = {
2335     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
2336     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
2337     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
2338     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
2339     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
2340     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
2341     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 0)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Device Management */
2342     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 2)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card NMEA */
2343     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Modem */
2344     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 0)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Device Management */
2345     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 2)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card NMEA */
2346     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 3)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Modem */
2347     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 0)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
2348     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 2)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
2349     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 3)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
2350     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 0)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
2351     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 2)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
2352     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 3)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Modem */
2353    
2354     { } /* Terminating entry */
2355     };
2356     diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
2357     index 4ef2a80728f7..008d805c3d21 100644
2358     --- a/drivers/usb/storage/shuttle_usbat.c
2359     +++ b/drivers/usb/storage/shuttle_usbat.c
2360     @@ -1851,7 +1851,7 @@ static int usbat_probe(struct usb_interface *intf,
2361     us->transport_name = "Shuttle USBAT";
2362     us->transport = usbat_flash_transport;
2363     us->transport_reset = usb_stor_CB_reset;
2364     - us->max_lun = 1;
2365     + us->max_lun = 0;
2366    
2367     result = usb_stor_probe2(us);
2368     return result;
2369     diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
2370     index adbeb255616a..042c83b01046 100644
2371     --- a/drivers/usb/storage/unusual_devs.h
2372     +++ b/drivers/usb/storage/unusual_devs.h
2373     @@ -234,6 +234,20 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
2374     USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2375     US_FL_MAX_SECTORS_64 ),
2376    
2377     +/* Reported by Daniele Forsi <dforsi@gmail.com> */
2378     +UNUSUAL_DEV( 0x0421, 0x04b9, 0x0350, 0x0350,
2379     + "Nokia",
2380     + "5300",
2381     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2382     + US_FL_MAX_SECTORS_64 ),
2383     +
2384     +/* Patch submitted by Victor A. Santos <victoraur.santos@gmail.com> */
2385     +UNUSUAL_DEV( 0x0421, 0x05af, 0x0742, 0x0742,
2386     + "Nokia",
2387     + "305",
2388     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2389     + US_FL_MAX_SECTORS_64),
2390     +
2391     /* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
2392     UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
2393     "Nokia",
2394     diff --git a/fs/exec.c b/fs/exec.c
2395     index bb60cda5ee30..dd6aa61c8548 100644
2396     --- a/fs/exec.c
2397     +++ b/fs/exec.c
2398     @@ -654,10 +654,10 @@ int setup_arg_pages(struct linux_binprm *bprm,
2399     unsigned long rlim_stack;
2400    
2401     #ifdef CONFIG_STACK_GROWSUP
2402     - /* Limit stack size to 1GB */
2403     + /* Limit stack size */
2404     stack_base = rlimit_max(RLIMIT_STACK);
2405     - if (stack_base > (1 << 30))
2406     - stack_base = 1 << 30;
2407     + if (stack_base > STACK_SIZE_MAX)
2408     + stack_base = STACK_SIZE_MAX;
2409    
2410     /* Make sure we didn't let the argument array grow too large. */
2411     if (vma->vm_end - vma->vm_start > stack_base)
2412     diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
2413     index 8a50b3c18093..e15bcbd5043c 100644
2414     --- a/fs/nfsd/nfs4acl.c
2415     +++ b/fs/nfsd/nfs4acl.c
2416     @@ -385,8 +385,10 @@ sort_pacl(struct posix_acl *pacl)
2417     * by uid/gid. */
2418     int i, j;
2419    
2420     - if (pacl->a_count <= 4)
2421     - return; /* no users or groups */
2422     + /* no users or groups */
2423     + if (!pacl || pacl->a_count <= 4)
2424     + return;
2425     +
2426     i = 1;
2427     while (pacl->a_entries[i].e_tag == ACL_USER)
2428     i++;
2429     @@ -513,13 +515,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
2430    
2431     /*
2432     * ACLs with no ACEs are treated differently in the inheritable
2433     - * and effective cases: when there are no inheritable ACEs, we
2434     - * set a zero-length default posix acl:
2435     + * and effective cases: when there are no inheritable ACEs,
2436     + * calls ->set_acl with a NULL ACL structure.
2437     */
2438     - if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) {
2439     - pacl = posix_acl_alloc(0, GFP_KERNEL);
2440     - return pacl ? pacl : ERR_PTR(-ENOMEM);
2441     - }
2442     + if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
2443     + return NULL;
2444     +
2445     /*
2446     * When there are no effective ACEs, the following will end
2447     * up setting a 3-element effective posix ACL with all
2448     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
2449     index 442509285ca9..ae6a50b7a617 100644
2450     --- a/fs/nfsd/nfs4state.c
2451     +++ b/fs/nfsd/nfs4state.c
2452     @@ -1081,6 +1081,18 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
2453     return NULL;
2454     }
2455     clp->cl_name.len = name.len;
2456     + INIT_LIST_HEAD(&clp->cl_sessions);
2457     + idr_init(&clp->cl_stateids);
2458     + atomic_set(&clp->cl_refcount, 0);
2459     + clp->cl_cb_state = NFSD4_CB_UNKNOWN;
2460     + INIT_LIST_HEAD(&clp->cl_idhash);
2461     + INIT_LIST_HEAD(&clp->cl_openowners);
2462     + INIT_LIST_HEAD(&clp->cl_delegations);
2463     + INIT_LIST_HEAD(&clp->cl_lru);
2464     + INIT_LIST_HEAD(&clp->cl_callbacks);
2465     + INIT_LIST_HEAD(&clp->cl_revoked);
2466     + spin_lock_init(&clp->cl_lock);
2467     + rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
2468     return clp;
2469     }
2470    
2471     @@ -1098,6 +1110,7 @@ free_client(struct nfs4_client *clp)
2472     WARN_ON_ONCE(atomic_read(&ses->se_ref));
2473     free_session(ses);
2474     }
2475     + rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2476     free_svc_cred(&clp->cl_cred);
2477     kfree(clp->cl_name.data);
2478     idr_destroy(&clp->cl_stateids);
2479     @@ -1315,7 +1328,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
2480     if (clp == NULL)
2481     return NULL;
2482    
2483     - INIT_LIST_HEAD(&clp->cl_sessions);
2484     ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2485     if (ret) {
2486     spin_lock(&nn->client_lock);
2487     @@ -1323,20 +1335,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
2488     spin_unlock(&nn->client_lock);
2489     return NULL;
2490     }
2491     - idr_init(&clp->cl_stateids);
2492     - atomic_set(&clp->cl_refcount, 0);
2493     - clp->cl_cb_state = NFSD4_CB_UNKNOWN;
2494     - INIT_LIST_HEAD(&clp->cl_idhash);
2495     - INIT_LIST_HEAD(&clp->cl_openowners);
2496     - INIT_LIST_HEAD(&clp->cl_delegations);
2497     - INIT_LIST_HEAD(&clp->cl_lru);
2498     - INIT_LIST_HEAD(&clp->cl_callbacks);
2499     - INIT_LIST_HEAD(&clp->cl_revoked);
2500     - spin_lock_init(&clp->cl_lock);
2501     nfsd4_init_callback(&clp->cl_cb_null);
2502     clp->cl_time = get_seconds();
2503     clear_bit(0, &clp->cl_cb_slot_busy);
2504     - rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
2505     copy_verf(clp, verf);
2506     rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
2507     gen_confirm(clp);
2508     @@ -3598,9 +3599,16 @@ out:
2509     static __be32
2510     nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
2511     {
2512     - if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
2513     + struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
2514     +
2515     + if (check_for_locks(stp->st_file, lo))
2516     return nfserr_locks_held;
2517     - release_lock_stateid(stp);
2518     + /*
2519     + * Currently there's a 1-1 lock stateid<->lockowner
2520     + * correspondance, and we have to delete the lockowner when we
2521     + * delete the lock stateid:
2522     + */
2523     + unhash_lockowner(lo);
2524     return nfs_ok;
2525     }
2526    
2527     @@ -4044,6 +4052,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
2528    
2529     if (!same_owner_str(&lo->lo_owner, owner, clid))
2530     return false;
2531     + if (list_empty(&lo->lo_owner.so_stateids)) {
2532     + WARN_ON_ONCE(1);
2533     + return false;
2534     + }
2535     lst = list_first_entry(&lo->lo_owner.so_stateids,
2536     struct nfs4_ol_stateid, st_perstateowner);
2537     return lst->st_file->fi_inode == inode;
2538     diff --git a/fs/posix_acl.c b/fs/posix_acl.c
2539     index 8bd2135b7f82..3542f1f814e2 100644
2540     --- a/fs/posix_acl.c
2541     +++ b/fs/posix_acl.c
2542     @@ -158,6 +158,12 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
2543     umode_t mode = 0;
2544     int not_equiv = 0;
2545    
2546     + /*
2547     + * A null ACL can always be presented as mode bits.
2548     + */
2549     + if (!acl)
2550     + return 0;
2551     +
2552     FOREACH_ACL_ENTRY(pa, acl, pe) {
2553     switch (pa->e_tag) {
2554     case ACL_USER_OBJ:
2555     diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
2556     index 99d0fbcbaf79..7a13848d635c 100644
2557     --- a/include/linux/ftrace.h
2558     +++ b/include/linux/ftrace.h
2559     @@ -524,6 +524,7 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a
2560     extern int ftrace_arch_read_dyn_info(char *buf, int size);
2561    
2562     extern int skip_trace(unsigned long ip);
2563     +extern void ftrace_module_init(struct module *mod);
2564    
2565     extern void ftrace_disable_daemon(void);
2566     extern void ftrace_enable_daemon(void);
2567     @@ -533,6 +534,7 @@ static inline int ftrace_force_update(void) { return 0; }
2568     static inline void ftrace_disable_daemon(void) { }
2569     static inline void ftrace_enable_daemon(void) { }
2570     static inline void ftrace_release_mod(struct module *mod) {}
2571     +static inline void ftrace_module_init(struct module *mod) {}
2572     static inline int register_ftrace_command(struct ftrace_func_command *cmd)
2573     {
2574     return -EINVAL;
2575     diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
2576     index c2559847d7ee..422eac8538fd 100644
2577     --- a/include/linux/hyperv.h
2578     +++ b/include/linux/hyperv.h
2579     @@ -483,15 +483,18 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
2580     * 0 . 13 (Windows Server 2008)
2581     * 1 . 1 (Windows 7)
2582     * 2 . 4 (Windows 8)
2583     + * 3 . 0 (Windows 8 R2)
2584     */
2585    
2586     #define VERSION_WS2008 ((0 << 16) | (13))
2587     #define VERSION_WIN7 ((1 << 16) | (1))
2588     #define VERSION_WIN8 ((2 << 16) | (4))
2589     +#define VERSION_WIN8_1 ((3 << 16) | (0))
2590     +
2591    
2592     #define VERSION_INVAL -1
2593    
2594     -#define VERSION_CURRENT VERSION_WIN8
2595     +#define VERSION_CURRENT VERSION_WIN8_1
2596    
2597     /* Make maximum size of pipe payload of 16K */
2598     #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
2599     @@ -894,7 +897,7 @@ struct vmbus_channel_relid_released {
2600     struct vmbus_channel_initiate_contact {
2601     struct vmbus_channel_message_header header;
2602     u32 vmbus_version_requested;
2603     - u32 padding2;
2604     + u32 target_vcpu; /* The VCPU the host should respond to */
2605     u64 interrupt_page;
2606     u64 monitor_page1;
2607     u64 monitor_page2;
2608     diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
2609     index 5fa5afeeb759..6de0f2c14ec0 100644
2610     --- a/include/linux/interrupt.h
2611     +++ b/include/linux/interrupt.h
2612     @@ -239,7 +239,40 @@ static inline int check_wakeup_irqs(void) { return 0; }
2613    
2614     extern cpumask_var_t irq_default_affinity;
2615    
2616     -extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
2617     +/* Internal implementation. Use the helpers below */
2618     +extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
2619     + bool force);
2620     +
2621     +/**
2622     + * irq_set_affinity - Set the irq affinity of a given irq
2623     + * @irq: Interrupt to set affinity
2624     + * @mask: cpumask
2625     + *
2626     + * Fails if cpumask does not contain an online CPU
2627     + */
2628     +static inline int
2629     +irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
2630     +{
2631     + return __irq_set_affinity(irq, cpumask, false);
2632     +}
2633     +
2634     +/**
2635     + * irq_force_affinity - Force the irq affinity of a given irq
2636     + * @irq: Interrupt to set affinity
2637     + * @mask: cpumask
2638     + *
2639     + * Same as irq_set_affinity, but without checking the mask against
2640     + * online cpus.
2641     + *
2642     + * Solely for low level cpu hotplug code, where we need to make per
2643     + * cpu interrupts affine before the cpu becomes online.
2644     + */
2645     +static inline int
2646     +irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
2647     +{
2648     + return __irq_set_affinity(irq, cpumask, true);
2649     +}
2650     +
2651     extern int irq_can_set_affinity(unsigned int irq);
2652     extern int irq_select_affinity(unsigned int irq);
2653    
2654     @@ -275,6 +308,11 @@ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
2655     return -EINVAL;
2656     }
2657    
2658     +static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
2659     +{
2660     + return 0;
2661     +}
2662     +
2663     static inline int irq_can_set_affinity(unsigned int irq)
2664     {
2665     return 0;
2666     diff --git a/include/linux/irq.h b/include/linux/irq.h
2667     index bc4e06611958..d591bfe1475b 100644
2668     --- a/include/linux/irq.h
2669     +++ b/include/linux/irq.h
2670     @@ -375,7 +375,8 @@ extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
2671    
2672     extern void irq_cpu_online(void);
2673     extern void irq_cpu_offline(void);
2674     -extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask);
2675     +extern int irq_set_affinity_locked(struct irq_data *data,
2676     + const struct cpumask *cpumask, bool force);
2677    
2678     #ifdef CONFIG_GENERIC_HARDIRQS
2679    
2680     diff --git a/include/trace/events/module.h b/include/trace/events/module.h
2681     index 161932737416..ca298c7157ae 100644
2682     --- a/include/trace/events/module.h
2683     +++ b/include/trace/events/module.h
2684     @@ -78,7 +78,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
2685    
2686     TP_fast_assign(
2687     __entry->ip = ip;
2688     - __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
2689     + __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);
2690     __assign_str(name, mod->name);
2691     ),
2692    
2693     diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
2694     index 6e132a2f7420..86b1f9942d0a 100644
2695     --- a/include/uapi/drm/tegra_drm.h
2696     +++ b/include/uapi/drm/tegra_drm.h
2697     @@ -103,7 +103,6 @@ struct drm_tegra_submit {
2698     __u32 num_waitchks;
2699     __u32 waitchk_mask;
2700     __u32 timeout;
2701     - __u32 pad;
2702     __u64 syncpts;
2703     __u64 cmdbufs;
2704     __u64 relocs;
2705     diff --git a/kernel/futex.c b/kernel/futex.c
2706     index 3bc18bf48d0c..625a4e659e7a 100644
2707     --- a/kernel/futex.c
2708     +++ b/kernel/futex.c
2709     @@ -592,6 +592,55 @@ void exit_pi_state_list(struct task_struct *curr)
2710     raw_spin_unlock_irq(&curr->pi_lock);
2711     }
2712    
2713     +/*
2714     + * We need to check the following states:
2715     + *
2716     + * Waiter | pi_state | pi->owner | uTID | uODIED | ?
2717     + *
2718     + * [1] NULL | --- | --- | 0 | 0/1 | Valid
2719     + * [2] NULL | --- | --- | >0 | 0/1 | Valid
2720     + *
2721     + * [3] Found | NULL | -- | Any | 0/1 | Invalid
2722     + *
2723     + * [4] Found | Found | NULL | 0 | 1 | Valid
2724     + * [5] Found | Found | NULL | >0 | 1 | Invalid
2725     + *
2726     + * [6] Found | Found | task | 0 | 1 | Valid
2727     + *
2728     + * [7] Found | Found | NULL | Any | 0 | Invalid
2729     + *
2730     + * [8] Found | Found | task | ==taskTID | 0/1 | Valid
2731     + * [9] Found | Found | task | 0 | 0 | Invalid
2732     + * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
2733     + *
2734     + * [1] Indicates that the kernel can acquire the futex atomically. We
2735     + * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
2736     + *
2737     + * [2] Valid, if TID does not belong to a kernel thread. If no matching
2738     + * thread is found then it indicates that the owner TID has died.
2739     + *
2740     + * [3] Invalid. The waiter is queued on a non PI futex
2741     + *
2742     + * [4] Valid state after exit_robust_list(), which sets the user space
2743     + * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
2744     + *
2745     + * [5] The user space value got manipulated between exit_robust_list()
2746     + * and exit_pi_state_list()
2747     + *
2748     + * [6] Valid state after exit_pi_state_list() which sets the new owner in
2749     + * the pi_state but cannot access the user space value.
2750     + *
2751     + * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
2752     + *
2753     + * [8] Owner and user space value match
2754     + *
2755     + * [9] There is no transient state which sets the user space TID to 0
2756     + * except exit_robust_list(), but this is indicated by the
2757     + * FUTEX_OWNER_DIED bit. See [4]
2758     + *
2759     + * [10] There is no transient state which leaves owner and user space
2760     + * TID out of sync.
2761     + */
2762     static int
2763     lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
2764     union futex_key *key, struct futex_pi_state **ps)
2765     @@ -607,12 +656,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
2766     plist_for_each_entry_safe(this, next, head, list) {
2767     if (match_futex(&this->key, key)) {
2768     /*
2769     - * Another waiter already exists - bump up
2770     - * the refcount and return its pi_state:
2771     + * Sanity check the waiter before increasing
2772     + * the refcount and attaching to it.
2773     */
2774     pi_state = this->pi_state;
2775     /*
2776     - * Userspace might have messed up non-PI and PI futexes
2777     + * Userspace might have messed up non-PI and
2778     + * PI futexes [3]
2779     */
2780     if (unlikely(!pi_state))
2781     return -EINVAL;
2782     @@ -620,34 +670,70 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
2783     WARN_ON(!atomic_read(&pi_state->refcount));
2784    
2785     /*
2786     - * When pi_state->owner is NULL then the owner died
2787     - * and another waiter is on the fly. pi_state->owner
2788     - * is fixed up by the task which acquires
2789     - * pi_state->rt_mutex.
2790     - *
2791     - * We do not check for pid == 0 which can happen when
2792     - * the owner died and robust_list_exit() cleared the
2793     - * TID.
2794     + * Handle the owner died case:
2795     */
2796     - if (pid && pi_state->owner) {
2797     + if (uval & FUTEX_OWNER_DIED) {
2798     + /*
2799     + * exit_pi_state_list sets owner to NULL and
2800     + * wakes the topmost waiter. The task which
2801     + * acquires the pi_state->rt_mutex will fixup
2802     + * owner.
2803     + */
2804     + if (!pi_state->owner) {
2805     + /*
2806     + * No pi state owner, but the user
2807     + * space TID is not 0. Inconsistent
2808     + * state. [5]
2809     + */
2810     + if (pid)
2811     + return -EINVAL;
2812     + /*
2813     + * Take a ref on the state and
2814     + * return. [4]
2815     + */
2816     + goto out_state;
2817     + }
2818     +
2819     /*
2820     - * Bail out if user space manipulated the
2821     - * futex value.
2822     + * If TID is 0, then either the dying owner
2823     + * has not yet executed exit_pi_state_list()
2824     + * or some waiter acquired the rtmutex in the
2825     + * pi state, but did not yet fixup the TID in
2826     + * user space.
2827     + *
2828     + * Take a ref on the state and return. [6]
2829     */
2830     - if (pid != task_pid_vnr(pi_state->owner))
2831     + if (!pid)
2832     + goto out_state;
2833     + } else {
2834     + /*
2835     + * If the owner died bit is not set,
2836     + * then the pi_state must have an
2837     + * owner. [7]
2838     + */
2839     + if (!pi_state->owner)
2840     return -EINVAL;
2841     }
2842    
2843     + /*
2844     + * Bail out if user space manipulated the
2845     + * futex value. If pi state exists then the
2846     + * owner TID must be the same as the user
2847     + * space TID. [9/10]
2848     + */
2849     + if (pid != task_pid_vnr(pi_state->owner))
2850     + return -EINVAL;
2851     +
2852     + out_state:
2853     atomic_inc(&pi_state->refcount);
2854     *ps = pi_state;
2855     -
2856     return 0;
2857     }
2858     }
2859    
2860     /*
2861     * We are the first waiter - try to look up the real owner and attach
2862     - * the new pi_state to it, but bail out when TID = 0
2863     + * the new pi_state to it, but bail out when TID = 0 [1]
2864     */
2865     if (!pid)
2866     return -ESRCH;
2867     @@ -655,6 +741,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
2868     if (!p)
2869     return -ESRCH;
2870    
2871     + if (!p->mm) {
2872     + put_task_struct(p);
2873     + return -EPERM;
2874     + }
2875     +
2876     /*
2877     * We need to look at the task state flags to figure out,
2878     * whether the task is exiting. To protect against the do_exit
2879     @@ -675,6 +766,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
2880     return ret;
2881     }
2882    
2883     + /*
2884     + * No existing pi state. First waiter. [2]
2885     + */
2886     pi_state = alloc_pi_state();
2887    
2888     /*
2889     @@ -746,10 +840,18 @@ retry:
2890     return -EDEADLK;
2891    
2892     /*
2893     - * Surprise - we got the lock. Just return to userspace:
2894     + * Surprise - we got the lock, but we do not trust user space at all.
2895     */
2896     - if (unlikely(!curval))
2897     - return 1;
2898     + if (unlikely(!curval)) {
2899     + /*
2900     + * We verify whether there is kernel state for this
2901     + * futex. If not, we can safely assume, that the 0 ->
2902     + * TID transition is correct. If state exists, we do
2903     + * not bother to fixup the user space state as it was
2904     + * corrupted already.
2905     + */
2906     + return futex_top_waiter(hb, key) ? -EINVAL : 1;
2907     + }
2908    
2909     uval = curval;
2910    
2911     @@ -879,6 +981,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
2912     struct task_struct *new_owner;
2913     struct futex_pi_state *pi_state = this->pi_state;
2914     u32 uninitialized_var(curval), newval;
2915     + int ret = 0;
2916    
2917     if (!pi_state)
2918     return -EINVAL;
2919     @@ -902,23 +1005,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
2920     new_owner = this->task;
2921    
2922     /*
2923     - * We pass it to the next owner. (The WAITERS bit is always
2924     - * kept enabled while there is PI state around. We must also
2925     - * preserve the owner died bit.)
2926     + * We pass it to the next owner. The WAITERS bit is always
2927     + * kept enabled while there is PI state around. We cleanup the
2928     + * owner died bit, because we are the owner.
2929     */
2930     - if (!(uval & FUTEX_OWNER_DIED)) {
2931     - int ret = 0;
2932     + newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
2933    
2934     - newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
2935     -
2936     - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
2937     - ret = -EFAULT;
2938     - else if (curval != uval)
2939     - ret = -EINVAL;
2940     - if (ret) {
2941     - raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
2942     - return ret;
2943     - }
2944     + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
2945     + ret = -EFAULT;
2946     + else if (curval != uval)
2947     + ret = -EINVAL;
2948     + if (ret) {
2949     + raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
2950     + return ret;
2951     }
2952    
2953     raw_spin_lock_irq(&pi_state->owner->pi_lock);
2954     @@ -1197,7 +1296,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
2955     *
2956     * Return:
2957     * 0 - failed to acquire the lock atomically;
2958     - * 1 - acquired the lock;
2959     + * >0 - acquired the lock, return value is vpid of the top_waiter
2960     * <0 - error
2961     */
2962     static int futex_proxy_trylock_atomic(u32 __user *pifutex,
2963     @@ -1208,7 +1307,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
2964     {
2965     struct futex_q *top_waiter = NULL;
2966     u32 curval;
2967     - int ret;
2968     + int ret, vpid;
2969    
2970     if (get_futex_value_locked(&curval, pifutex))
2971     return -EFAULT;
2972     @@ -1236,11 +1335,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
2973     * the contended case or if set_waiters is 1. The pi_state is returned
2974     * in ps in contended cases.
2975     */
2976     + vpid = task_pid_vnr(top_waiter->task);
2977     ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
2978     set_waiters);
2979     - if (ret == 1)
2980     + if (ret == 1) {
2981     requeue_pi_wake_futex(top_waiter, key2, hb2);
2982     -
2983     + return vpid;
2984     + }
2985     return ret;
2986     }
2987    
2988     @@ -1272,10 +1373,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
2989     struct futex_hash_bucket *hb1, *hb2;
2990     struct plist_head *head1;
2991     struct futex_q *this, *next;
2992     - u32 curval2;
2993    
2994     if (requeue_pi) {
2995     /*
2996     + * Requeue PI only works on two distinct uaddrs. This
2997     + * check is only valid for private futexes. See below.
2998     + */
2999     + if (uaddr1 == uaddr2)
3000     + return -EINVAL;
3001     +
3002     + /*
3003     * requeue_pi requires a pi_state, try to allocate it now
3004     * without any locks in case it fails.
3005     */
3006     @@ -1313,6 +1420,15 @@ retry:
3007     if (unlikely(ret != 0))
3008     goto out_put_key1;
3009    
3010     + /*
3011     + * The check above which compares uaddrs is not sufficient for
3012     + * shared futexes. We need to compare the keys:
3013     + */
3014     + if (requeue_pi && match_futex(&key1, &key2)) {
3015     + ret = -EINVAL;
3016     + goto out_put_keys;
3017     + }
3018     +
3019     hb1 = hash_futex(&key1);
3020     hb2 = hash_futex(&key2);
3021    
3022     @@ -1358,16 +1474,25 @@ retry_private:
3023     * At this point the top_waiter has either taken uaddr2 or is
3024     * waiting on it. If the former, then the pi_state will not
3025     * exist yet, look it up one more time to ensure we have a
3026     - * reference to it.
3027     + * reference to it. If the lock was taken, ret contains the
3028     + * vpid of the top waiter task.
3029     */
3030     - if (ret == 1) {
3031     + if (ret > 0) {
3032     WARN_ON(pi_state);
3033     drop_count++;
3034     task_count++;
3035     - ret = get_futex_value_locked(&curval2, uaddr2);
3036     - if (!ret)
3037     - ret = lookup_pi_state(curval2, hb2, &key2,
3038     - &pi_state);
3039     + /*
3040     + * If we acquired the lock, then the user
3041     + * space value of uaddr2 should be vpid. It
3042     + * cannot be changed by the top waiter as it
3043     + * is blocked on hb2 lock if it tries to do
3044     + * so. If something fiddled with it behind our
3045     + * back the pi state lookup might unearth
3046     + * it. So we rather use the known value than
3047     + * rereading and handing potential crap to
3048     + * lookup_pi_state.
3049     + */
3050     + ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
3051     }
3052    
3053     switch (ret) {
3054     @@ -2137,9 +2262,10 @@ retry:
3055     /*
3056     * To avoid races, try to do the TID -> 0 atomic transition
3057     * again. If it succeeds then we can return without waking
3058     - * anyone else up:
3059     + * anyone else up. We only try this if neither the waiters nor
3060     + * the owner died bit are set.
3061     */
3062     - if (!(uval & FUTEX_OWNER_DIED) &&
3063     + if (!(uval & ~FUTEX_TID_MASK) &&
3064     cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
3065     goto pi_faulted;
3066     /*
3067     @@ -2171,11 +2297,9 @@ retry:
3068     /*
3069     * No waiters - kernel unlocks the futex:
3070     */
3071     - if (!(uval & FUTEX_OWNER_DIED)) {
3072     - ret = unlock_futex_pi(uaddr, uval);
3073     - if (ret == -EFAULT)
3074     - goto pi_faulted;
3075     - }
3076     + ret = unlock_futex_pi(uaddr, uval);
3077     + if (ret == -EFAULT)
3078     + goto pi_faulted;
3079    
3080     out_unlock:
3081     spin_unlock(&hb->lock);
3082     @@ -2334,6 +2458,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3083     if (ret)
3084     goto out_key2;
3085    
3086     + /*
3087     + * The check above which compares uaddrs is not sufficient for
3088     + * shared futexes. We need to compare the keys:
3089     + */
3090     + if (match_futex(&q.key, &key2)) {
3091     + ret = -EINVAL;
3092     + goto out_put_keys;
3093     + }
3094     +
3095     /* Queue the futex_q, drop the hb lock, wait for wakeup. */
3096     futex_wait_queue_me(hb, &q, to);
3097    
3098     diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
3099     index 2288fbdada16..aadf4b7a607c 100644
3100     --- a/kernel/hrtimer.c
3101     +++ b/kernel/hrtimer.c
3102     @@ -245,6 +245,11 @@ again:
3103     goto again;
3104     }
3105     timer->base = new_base;
3106     + } else {
3107     + if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
3108     + cpu = this_cpu;
3109     + goto again;
3110     + }
3111     }
3112     return new_base;
3113     }
3114     @@ -580,6 +585,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
3115    
3116     cpu_base->expires_next.tv64 = expires_next.tv64;
3117    
3118     + /*
3119     + * If a hang was detected in the last timer interrupt then we
3120     + * leave the hang delay active in the hardware. We want the
3121     + * system to make progress. That also prevents the following
3122     + * scenario:
3123     + * T1 expires 50ms from now
3124     + * T2 expires 5s from now
3125     + *
3126     + * T1 is removed, so this code is called and would reprogram
3127     + * the hardware to 5s from now. Any hrtimer_start after that
3128     + * will not reprogram the hardware due to hang_detected being
3129     + * set. So we'd effectivly block all timers until the T2 event
3130     + * fires.
3131     + */
3132     + if (cpu_base->hang_detected)
3133     + return;
3134     +
3135     if (cpu_base->expires_next.tv64 != KTIME_MAX)
3136     tick_program_event(cpu_base->expires_next, 1);
3137     }
3138     @@ -977,11 +999,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
3139     /* Remove an active timer from the queue: */
3140     ret = remove_hrtimer(timer, base);
3141    
3142     - /* Switch the timer base, if necessary: */
3143     - new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
3144     -
3145     if (mode & HRTIMER_MODE_REL) {
3146     - tim = ktime_add_safe(tim, new_base->get_time());
3147     + tim = ktime_add_safe(tim, base->get_time());
3148     /*
3149     * CONFIG_TIME_LOW_RES is a temporary way for architectures
3150     * to signal that they simply return xtime in
3151     @@ -996,6 +1015,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
3152    
3153     hrtimer_set_expires_range_ns(timer, tim, delta_ns);
3154    
3155     + /* Switch the timer base, if necessary: */
3156     + new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
3157     +
3158     timer_stats_hrtimer_set_start_info(timer);
3159    
3160     leftmost = enqueue_hrtimer(timer, new_base);
3161     diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
3162     index 9bd5c8a6c8ee..8815abfdf2cb 100644
3163     --- a/kernel/irq/manage.c
3164     +++ b/kernel/irq/manage.c
3165     @@ -150,7 +150,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
3166     struct irq_chip *chip = irq_data_get_irq_chip(data);
3167     int ret;
3168    
3169     - ret = chip->irq_set_affinity(data, mask, false);
3170     + ret = chip->irq_set_affinity(data, mask, force);
3171     switch (ret) {
3172     case IRQ_SET_MASK_OK:
3173     cpumask_copy(data->affinity, mask);
3174     @@ -162,7 +162,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
3175     return ret;
3176     }
3177    
3178     -int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
3179     +int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
3180     + bool force)
3181     {
3182     struct irq_chip *chip = irq_data_get_irq_chip(data);
3183     struct irq_desc *desc = irq_data_to_desc(data);
3184     @@ -172,7 +173,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
3185     return -EINVAL;
3186    
3187     if (irq_can_move_pcntxt(data)) {
3188     - ret = irq_do_set_affinity(data, mask, false);
3189     + ret = irq_do_set_affinity(data, mask, force);
3190     } else {
3191     irqd_set_move_pending(data);
3192     irq_copy_pending(desc, mask);
3193     @@ -187,13 +188,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
3194     return ret;
3195     }
3196    
3197     -/**
3198     - * irq_set_affinity - Set the irq affinity of a given irq
3199     - * @irq: Interrupt to set affinity
3200     - * @mask: cpumask
3201     - *
3202     - */
3203     -int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
3204     +int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
3205     {
3206     struct irq_desc *desc = irq_to_desc(irq);
3207     unsigned long flags;
3208     @@ -203,7 +198,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
3209     return -EINVAL;
3210    
3211     raw_spin_lock_irqsave(&desc->lock, flags);
3212     - ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
3213     + ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
3214     raw_spin_unlock_irqrestore(&desc->lock, flags);
3215     return ret;
3216     }
3217     diff --git a/kernel/module.c b/kernel/module.c
3218     index fa53db8aadeb..10a3af821d28 100644
3219     --- a/kernel/module.c
3220     +++ b/kernel/module.c
3221     @@ -3279,6 +3279,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
3222    
3223     dynamic_debug_setup(info->debug, info->num_debug);
3224    
3225     + /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3226     + ftrace_module_init(mod);
3227     +
3228     /* Finally it's fully formed, ready to start executing. */
3229     err = complete_formation(mod, info);
3230     if (err)
3231     diff --git a/kernel/timer.c b/kernel/timer.c
3232     index 15bc1b41021d..20f45ea6f5a4 100644
3233     --- a/kernel/timer.c
3234     +++ b/kernel/timer.c
3235     @@ -822,7 +822,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
3236    
3237     bit = find_last_bit(&mask, BITS_PER_LONG);
3238    
3239     - mask = (1 << bit) - 1;
3240     + mask = (1UL << bit) - 1;
3241    
3242     expires_limit = expires_limit & ~(mask);
3243    
3244     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
3245     index 4b93b8412252..797d3b91a30b 100644
3246     --- a/kernel/trace/ftrace.c
3247     +++ b/kernel/trace/ftrace.c
3248     @@ -4222,16 +4222,11 @@ static void ftrace_init_module(struct module *mod,
3249     ftrace_process_locs(mod, start, end);
3250     }
3251    
3252     -static int ftrace_module_notify_enter(struct notifier_block *self,
3253     - unsigned long val, void *data)
3254     +void ftrace_module_init(struct module *mod)
3255     {
3256     - struct module *mod = data;
3257     -
3258     - if (val == MODULE_STATE_COMING)
3259     - ftrace_init_module(mod, mod->ftrace_callsites,
3260     - mod->ftrace_callsites +
3261     - mod->num_ftrace_callsites);
3262     - return 0;
3263     + ftrace_init_module(mod, mod->ftrace_callsites,
3264     + mod->ftrace_callsites +
3265     + mod->num_ftrace_callsites);
3266     }
3267    
3268     static int ftrace_module_notify_exit(struct notifier_block *self,
3269     @@ -4245,11 +4240,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
3270     return 0;
3271     }
3272     #else
3273     -static int ftrace_module_notify_enter(struct notifier_block *self,
3274     - unsigned long val, void *data)
3275     -{
3276     - return 0;
3277     -}
3278     static int ftrace_module_notify_exit(struct notifier_block *self,
3279     unsigned long val, void *data)
3280     {
3281     @@ -4257,11 +4247,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
3282     }
3283     #endif /* CONFIG_MODULES */
3284    
3285     -struct notifier_block ftrace_module_enter_nb = {
3286     - .notifier_call = ftrace_module_notify_enter,
3287     - .priority = INT_MAX, /* Run before anything that can use kprobes */
3288     -};
3289     -
3290     struct notifier_block ftrace_module_exit_nb = {
3291     .notifier_call = ftrace_module_notify_exit,
3292     .priority = INT_MIN, /* Run after anything that can remove kprobes */
3293     @@ -4298,10 +4283,6 @@ void __init ftrace_init(void)
3294     __start_mcount_loc,
3295     __stop_mcount_loc);
3296    
3297     - ret = register_module_notifier(&ftrace_module_enter_nb);
3298     - if (ret)
3299     - pr_warning("Failed to register trace ftrace module enter notifier\n");
3300     -
3301     ret = register_module_notifier(&ftrace_module_exit_nb);
3302     if (ret)
3303     pr_warning("Failed to register trace ftrace module exit notifier\n");
3304     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
3305     index db7a6ac7c0a8..652f36dd40de 100644
3306     --- a/kernel/workqueue.c
3307     +++ b/kernel/workqueue.c
3308     @@ -1881,6 +1881,12 @@ static void send_mayday(struct work_struct *work)
3309    
3310     /* mayday mayday mayday */
3311     if (list_empty(&pwq->mayday_node)) {
3312     + /*
3313     + * If @pwq is for an unbound wq, its base ref may be put at
3314     + * any time due to an attribute change. Pin @pwq until the
3315     + * rescuer is done with it.
3316     + */
3317     + get_pwq(pwq);
3318     list_add_tail(&pwq->mayday_node, &wq->maydays);
3319     wake_up_process(wq->rescuer->task);
3320     }
3321     @@ -2356,6 +2362,7 @@ static int rescuer_thread(void *__rescuer)
3322     struct worker *rescuer = __rescuer;
3323     struct workqueue_struct *wq = rescuer->rescue_wq;
3324     struct list_head *scheduled = &rescuer->scheduled;
3325     + bool should_stop;
3326    
3327     set_user_nice(current, RESCUER_NICE_LEVEL);
3328    
3329     @@ -2367,11 +2374,15 @@ static int rescuer_thread(void *__rescuer)
3330     repeat:
3331     set_current_state(TASK_INTERRUPTIBLE);
3332    
3333     - if (kthread_should_stop()) {
3334     - __set_current_state(TASK_RUNNING);
3335     - rescuer->task->flags &= ~PF_WQ_WORKER;
3336     - return 0;
3337     - }
3338     + /*
3339     + * By the time the rescuer is requested to stop, the workqueue
3340     + * shouldn't have any work pending, but @wq->maydays may still have
3341     + * pwq(s) queued. This can happen by non-rescuer workers consuming
3342     + * all the work items before the rescuer got to them. Go through
3343     + * @wq->maydays processing before acting on should_stop so that the
3344     + * list is always empty on exit.
3345     + */
3346     + should_stop = kthread_should_stop();
3347    
3348     /* see whether any pwq is asking for help */
3349     spin_lock_irq(&wq_mayday_lock);
3350     @@ -2403,6 +2414,12 @@ repeat:
3351     process_scheduled_works(rescuer);
3352    
3353     /*
3354     + * Put the reference grabbed by send_mayday(). @pool won't
3355     + * go away while we're holding its lock.
3356     + */
3357     + put_pwq(pwq);
3358     +
3359     + /*
3360     * Leave this pool. If keep_working() is %true, notify a
3361     * regular worker; otherwise, we end up with 0 concurrency
3362     * and stalling the execution.
3363     @@ -2417,6 +2434,12 @@ repeat:
3364    
3365     spin_unlock_irq(&wq_mayday_lock);
3366    
3367     + if (should_stop) {
3368     + __set_current_state(TASK_RUNNING);
3369     + rescuer->task->flags &= ~PF_WQ_WORKER;
3370     + return 0;
3371     + }
3372     +
3373     /* rescuers should never participate in concurrency management */
3374     WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
3375     schedule();
3376     @@ -4043,7 +4066,8 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
3377     if (!pwq) {
3378     pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
3379     wq->name);
3380     - goto out_unlock;
3381     + mutex_lock(&wq->mutex);
3382     + goto use_dfl_pwq;
3383     }
3384    
3385     /*
3386     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
3387     index 59c62fa75c5a..4254eb021583 100644
3388     --- a/mm/memory-failure.c
3389     +++ b/mm/memory-failure.c
3390     @@ -1083,15 +1083,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
3391     return 0;
3392     } else if (PageHuge(hpage)) {
3393     /*
3394     - * Check "just unpoisoned", "filter hit", and
3395     - * "race with other subpage."
3396     + * Check "filter hit" and "race with other subpage."
3397     */
3398     lock_page(hpage);
3399     - if (!PageHWPoison(hpage)
3400     - || (hwpoison_filter(p) && TestClearPageHWPoison(p))
3401     - || (p != hpage && TestSetPageHWPoison(hpage))) {
3402     - atomic_long_sub(nr_pages, &num_poisoned_pages);
3403     - return 0;
3404     + if (PageHWPoison(hpage)) {
3405     + if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
3406     + || (p != hpage && TestSetPageHWPoison(hpage))) {
3407     + atomic_long_sub(nr_pages, &num_poisoned_pages);
3408     + unlock_page(hpage);
3409     + return 0;
3410     + }
3411     }
3412     set_page_hwpoison_huge_page(hpage);
3413     res = dequeue_hwpoisoned_huge_page(hpage);
3414     diff --git a/mm/memory.c b/mm/memory.c
3415     index 4b60011907d7..ebe0f285c0e7 100644
3416     --- a/mm/memory.c
3417     +++ b/mm/memory.c
3418     @@ -1937,12 +1937,17 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
3419     unsigned long address, unsigned int fault_flags)
3420     {
3421     struct vm_area_struct *vma;
3422     + vm_flags_t vm_flags;
3423     int ret;
3424    
3425     vma = find_extend_vma(mm, address);
3426     if (!vma || address < vma->vm_start)
3427     return -EFAULT;
3428    
3429     + vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
3430     + if (!(vm_flags & vma->vm_flags))
3431     + return -EFAULT;
3432     +
3433     ret = handle_mm_fault(mm, vma, address, fault_flags);
3434     if (ret & VM_FAULT_ERROR) {
3435     if (ret & VM_FAULT_OOM)
3436     diff --git a/mm/mremap.c b/mm/mremap.c
3437     index 463a25705ac6..2201d060c31b 100644
3438     --- a/mm/mremap.c
3439     +++ b/mm/mremap.c
3440     @@ -175,10 +175,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
3441     break;
3442     if (pmd_trans_huge(*old_pmd)) {
3443     int err = 0;
3444     - if (extent == HPAGE_PMD_SIZE)
3445     + if (extent == HPAGE_PMD_SIZE) {
3446     + VM_BUG_ON(vma->vm_file || !vma->anon_vma);
3447     + /* See comment in move_ptes() */
3448     + if (need_rmap_locks)
3449     + anon_vma_lock_write(vma->anon_vma);
3450     err = move_huge_pmd(vma, new_vma, old_addr,
3451     new_addr, old_end,
3452     old_pmd, new_pmd);
3453     + if (need_rmap_locks)
3454     + anon_vma_unlock_write(vma->anon_vma);
3455     + }
3456     if (err > 0) {
3457     need_flush = true;
3458     continue;
3459     diff --git a/mm/percpu.c b/mm/percpu.c
3460     index 8c8e08f3a692..25e2ea52db82 100644
3461     --- a/mm/percpu.c
3462     +++ b/mm/percpu.c
3463     @@ -612,7 +612,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
3464     chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
3465     sizeof(chunk->map[0]));
3466     if (!chunk->map) {
3467     - kfree(chunk);
3468     + pcpu_mem_free(chunk, pcpu_chunk_struct_size);
3469     return NULL;
3470     }
3471    
3472     diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
3473     index 6c7f36379722..4c51c055d00f 100644
3474     --- a/net/bluetooth/hci_conn.c
3475     +++ b/net/bluetooth/hci_conn.c
3476     @@ -652,14 +652,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
3477     if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3478     struct hci_cp_auth_requested cp;
3479    
3480     - /* encrypt must be pending if auth is also pending */
3481     - set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3482     -
3483     cp.handle = cpu_to_le16(conn->handle);
3484     hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
3485     sizeof(cp), &cp);
3486     +
3487     + /* If we're already encrypted set the REAUTH_PEND flag,
3488     + * otherwise set the ENCRYPT_PEND.
3489     + */
3490     if (conn->key_type != 0xff)
3491     set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3492     + else
3493     + set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3494     }
3495    
3496     return 0;
3497     diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
3498     index cfca44f8d048..ab2ec7c414cb 100644
3499     --- a/net/bluetooth/hci_event.c
3500     +++ b/net/bluetooth/hci_event.c
3501     @@ -3051,6 +3051,12 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3502     if (!conn)
3503     goto unlock;
3504    
3505     + /* For BR/EDR the necessary steps are taken through the
3506     + * auth_complete event.
3507     + */
3508     + if (conn->type != LE_LINK)
3509     + goto unlock;
3510     +
3511     if (!ev->status)
3512     conn->sec_level = conn->pending_sec_level;
3513    
3514     diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
3515     index eb0a46a49bd4..b9d7df175700 100644
3516     --- a/net/ceph/messenger.c
3517     +++ b/net/ceph/messenger.c
3518     @@ -556,7 +556,7 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
3519     return r;
3520     }
3521    
3522     -static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
3523     +static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
3524     int offset, size_t size, bool more)
3525     {
3526     int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
3527     @@ -569,6 +569,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
3528     return ret;
3529     }
3530    
3531     +static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
3532     + int offset, size_t size, bool more)
3533     +{
3534     + int ret;
3535     + struct kvec iov;
3536     +
3537     + /* sendpage cannot properly handle pages with page_count == 0,
3538     + * we need to fallback to sendmsg if that's the case */
3539     + if (page_count(page) >= 1)
3540     + return __ceph_tcp_sendpage(sock, page, offset, size, more);
3541     +
3542     + iov.iov_base = kmap(page) + offset;
3543     + iov.iov_len = size;
3544     + ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
3545     + kunmap(page);
3546     +
3547     + return ret;
3548     +}
3549    
3550     /*
3551     * Shutdown/close the socket for the given connection.
3552     diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
3553     index 92ef04c72c51..845563b81a0f 100644
3554     --- a/net/mac80211/ieee80211_i.h
3555     +++ b/net/mac80211/ieee80211_i.h
3556     @@ -311,6 +311,7 @@ struct ieee80211_roc_work {
3557    
3558     bool started, abort, hw_begun, notified;
3559     bool to_be_freed;
3560     + bool on_channel;
3561    
3562     unsigned long hw_start_time;
3563    
3564     @@ -1270,6 +1271,7 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
3565     void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
3566     void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
3567     __le16 fc, bool acked);
3568     +void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata);
3569     void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
3570    
3571     /* IBSS code */
3572     diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
3573     index 49bc2246bd86..fc94937cd7b3 100644
3574     --- a/net/mac80211/mlme.c
3575     +++ b/net/mac80211/mlme.c
3576     @@ -3754,6 +3754,32 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
3577     }
3578    
3579     #ifdef CONFIG_PM
3580     +void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
3581     +{
3582     + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3583     + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
3584     +
3585     + mutex_lock(&ifmgd->mtx);
3586     +
3587     + if (ifmgd->auth_data) {
3588     + /*
3589     + * If we are trying to authenticate while suspending, cfg80211
3590     + * won't know and won't actually abort those attempts, thus we
3591     + * need to do that ourselves.
3592     + */
3593     + ieee80211_send_deauth_disassoc(sdata,
3594     + ifmgd->auth_data->bss->bssid,
3595     + IEEE80211_STYPE_DEAUTH,
3596     + WLAN_REASON_DEAUTH_LEAVING,
3597     + false, frame_buf);
3598     + ieee80211_destroy_auth_data(sdata, false);
3599     + cfg80211_send_deauth(sdata->dev, frame_buf,
3600     + IEEE80211_DEAUTH_FRAME_LEN);
3601     + }
3602     +
3603     + mutex_unlock(&ifmgd->mtx);
3604     +}
3605     +
3606     void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
3607     {
3608     struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3609     diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
3610     index 11d3f227e11e..0427a58b4397 100644
3611     --- a/net/mac80211/offchannel.c
3612     +++ b/net/mac80211/offchannel.c
3613     @@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
3614     container_of(work, struct ieee80211_roc_work, work.work);
3615     struct ieee80211_sub_if_data *sdata = roc->sdata;
3616     struct ieee80211_local *local = sdata->local;
3617     - bool started;
3618     + bool started, on_channel;
3619    
3620     mutex_lock(&local->mtx);
3621    
3622     @@ -354,14 +354,24 @@ void ieee80211_sw_roc_work(struct work_struct *work)
3623     if (!roc->started) {
3624     struct ieee80211_roc_work *dep;
3625    
3626     - /* start this ROC */
3627     - ieee80211_offchannel_stop_vifs(local);
3628     + WARN_ON(local->use_chanctx);
3629     +
3630     + /* If actually operating on the desired channel (with at least
3631     + * 20 MHz channel width) don't stop all the operations but still
3632     + * treat it as though the ROC operation started properly, so
3633     + * other ROC operations won't interfere with this one.
3634     + */
3635     + roc->on_channel = roc->chan == local->_oper_chandef.chan;
3636    
3637     - /* switch channel etc */
3638     + /* start this ROC */
3639     ieee80211_recalc_idle(local);
3640    
3641     - local->tmp_channel = roc->chan;
3642     - ieee80211_hw_config(local, 0);
3643     + if (!roc->on_channel) {
3644     + ieee80211_offchannel_stop_vifs(local);
3645     +
3646     + local->tmp_channel = roc->chan;
3647     + ieee80211_hw_config(local, 0);
3648     + }
3649    
3650     /* tell userspace or send frame */
3651     ieee80211_handle_roc_started(roc);
3652     @@ -380,9 +390,10 @@ void ieee80211_sw_roc_work(struct work_struct *work)
3653     finish:
3654     list_del(&roc->list);
3655     started = roc->started;
3656     + on_channel = roc->on_channel;
3657     ieee80211_roc_notify_destroy(roc, !roc->abort);
3658    
3659     - if (started) {
3660     + if (started && !on_channel) {
3661     ieee80211_flush_queues(local, NULL);
3662    
3663     local->tmp_channel = NULL;
3664     diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
3665     index 340126204343..efb510e6f206 100644
3666     --- a/net/mac80211/pm.c
3667     +++ b/net/mac80211/pm.c
3668     @@ -101,10 +101,18 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
3669    
3670     /* remove all interfaces that were created in the driver */
3671     list_for_each_entry(sdata, &local->interfaces, list) {
3672     - if (!ieee80211_sdata_running(sdata) ||
3673     - sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
3674     - sdata->vif.type == NL80211_IFTYPE_MONITOR)
3675     + if (!ieee80211_sdata_running(sdata))
3676     continue;
3677     + switch (sdata->vif.type) {
3678     + case NL80211_IFTYPE_AP_VLAN:
3679     + case NL80211_IFTYPE_MONITOR:
3680     + continue;
3681     + case NL80211_IFTYPE_STATION:
3682     + ieee80211_mgd_quiesce(sdata);
3683     + break;
3684     + default:
3685     + break;
3686     + }
3687    
3688     drv_remove_interface(local, sdata);
3689     }
3690     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
3691     index 5f055d7ee85b..1800db643a16 100644
3692     --- a/sound/pci/hda/hda_intel.c
3693     +++ b/sound/pci/hda/hda_intel.c
3694     @@ -3856,6 +3856,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
3695     /* Lynx Point */
3696     { PCI_DEVICE(0x8086, 0x8c20),
3697     .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
3698     + /* 9 Series */
3699     + { PCI_DEVICE(0x8086, 0x8ca0),
3700     + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
3701     /* Wellsburg */
3702     { PCI_DEVICE(0x8086, 0x8d20),
3703     .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
3704     diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
3705     index e3cd86514cea..1ae1f8bd9c36 100644
3706     --- a/sound/soc/codecs/wm8962.c
3707     +++ b/sound/soc/codecs/wm8962.c
3708     @@ -153,6 +153,7 @@ static struct reg_default wm8962_reg[] = {
3709     { 40, 0x0000 }, /* R40 - SPKOUTL volume */
3710     { 41, 0x0000 }, /* R41 - SPKOUTR volume */
3711    
3712     + { 49, 0x0010 }, /* R49 - Class D Control 1 */
3713     { 51, 0x0003 }, /* R51 - Class D Control 2 */
3714    
3715     { 56, 0x0506 }, /* R56 - Clocking 4 */
3716     @@ -794,7 +795,6 @@ static bool wm8962_volatile_register(struct device *dev, unsigned int reg)
3717     case WM8962_ALC2:
3718     case WM8962_THERMAL_SHUTDOWN_STATUS:
3719     case WM8962_ADDITIONAL_CONTROL_4:
3720     - case WM8962_CLASS_D_CONTROL_1:
3721     case WM8962_DC_SERVO_6:
3722     case WM8962_INTERRUPT_STATUS_1:
3723     case WM8962_INTERRUPT_STATUS_2:
3724     @@ -2901,13 +2901,22 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
3725     static int wm8962_mute(struct snd_soc_dai *dai, int mute)
3726     {
3727     struct snd_soc_codec *codec = dai->codec;
3728     - int val;
3729     + int val, ret;
3730    
3731     if (mute)
3732     - val = WM8962_DAC_MUTE;
3733     + val = WM8962_DAC_MUTE | WM8962_DAC_MUTE_ALT;
3734     else
3735     val = 0;
3736    
3737     + /**
3738     + * The DAC mute bit is mirrored in two registers, update both to keep
3739     + * the register cache consistent.
3740     + */
3741     + ret = snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_1,
3742     + WM8962_DAC_MUTE_ALT, val);
3743     + if (ret < 0)
3744     + return ret;
3745     +
3746     return snd_soc_update_bits(codec, WM8962_ADC_DAC_CONTROL_1,
3747     WM8962_DAC_MUTE, val);
3748     }
3749     diff --git a/sound/soc/codecs/wm8962.h b/sound/soc/codecs/wm8962.h
3750     index a1a5d5294c19..910aafd09d21 100644
3751     --- a/sound/soc/codecs/wm8962.h
3752     +++ b/sound/soc/codecs/wm8962.h
3753     @@ -1954,6 +1954,10 @@
3754     #define WM8962_SPKOUTL_ENA_MASK 0x0040 /* SPKOUTL_ENA */
3755     #define WM8962_SPKOUTL_ENA_SHIFT 6 /* SPKOUTL_ENA */
3756     #define WM8962_SPKOUTL_ENA_WIDTH 1 /* SPKOUTL_ENA */
3757     +#define WM8962_DAC_MUTE_ALT 0x0010 /* DAC_MUTE */
3758     +#define WM8962_DAC_MUTE_ALT_MASK 0x0010 /* DAC_MUTE */
3759     +#define WM8962_DAC_MUTE_ALT_SHIFT 4 /* DAC_MUTE */
3760     +#define WM8962_DAC_MUTE_ALT_WIDTH 1 /* DAC_MUTE */
3761     #define WM8962_SPKOUTL_PGA_MUTE 0x0002 /* SPKOUTL_PGA_MUTE */
3762     #define WM8962_SPKOUTL_PGA_MUTE_MASK 0x0002 /* SPKOUTL_PGA_MUTE */
3763     #define WM8962_SPKOUTL_PGA_MUTE_SHIFT 1 /* SPKOUTL_PGA_MUTE */
3764     diff --git a/sound/usb/card.h b/sound/usb/card.h
3765     index bf2889a2cae5..82c2d80c8228 100644
3766     --- a/sound/usb/card.h
3767     +++ b/sound/usb/card.h
3768     @@ -90,6 +90,7 @@ struct snd_usb_endpoint {
3769     unsigned int curframesize; /* current packet size in frames (for capture) */
3770     unsigned int syncmaxsize; /* sync endpoint packet size */
3771     unsigned int fill_max:1; /* fill max packet size always */
3772     + unsigned int udh01_fb_quirk:1; /* corrupted feedback data */
3773     unsigned int datainterval; /* log_2 of data packet interval */
3774     unsigned int syncinterval; /* P for adaptive mode, 0 otherwise */
3775     unsigned char silence_value;
3776     diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
3777     index 659950e5b94f..308c02b2a597 100644
3778     --- a/sound/usb/endpoint.c
3779     +++ b/sound/usb/endpoint.c
3780     @@ -467,6 +467,10 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
3781     ep->syncinterval = 3;
3782    
3783     ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
3784     +
3785     + if (chip->usb_id == USB_ID(0x0644, 0x8038) /* TEAC UD-H01 */ &&
3786     + ep->syncmaxsize == 4)
3787     + ep->udh01_fb_quirk = 1;
3788     }
3789    
3790     list_add_tail(&ep->list, &chip->ep_list);
3791     @@ -1075,7 +1079,16 @@ void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
3792     if (f == 0)
3793     return;
3794    
3795     - if (unlikely(ep->freqshift == INT_MIN)) {
3796     + if (unlikely(sender->udh01_fb_quirk)) {
3797     + /*
3798     + * The TEAC UD-H01 firmware sometimes changes the feedback value
3799     + * by +/- 0x1.0000.
3800     + */
3801     + if (f < ep->freqn - 0x8000)
3802     + f += 0x10000;
3803     + else if (f > ep->freqn + 0x8000)
3804     + f -= 0x10000;
3805     + } else if (unlikely(ep->freqshift == INT_MIN)) {
3806     /*
3807     * The first time we see a feedback value, determine its format
3808     * by shifting it left or right until it matches the nominal