Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.14/0105-3.14.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2506 - (hide annotations) (download)
Fri Oct 17 07:55:45 2014 UTC (9 years, 6 months ago) by niro
File size: 320683 byte(s)
-patches for 3.14
1 niro 2506 diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
2     index 9fbbdb783a72..68ff2137bae7 100644
3     --- a/Documentation/devicetree/bindings/dma/ti-edma.txt
4     +++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
5     @@ -29,6 +29,6 @@ edma: edma@49000000 {
6     dma-channels = <64>;
7     ti,edma-regions = <4>;
8     ti,edma-slots = <256>;
9     - ti,edma-xbar-event-map = <1 12
10     - 2 13>;
11     + ti,edma-xbar-event-map = /bits/ 16 <1 12
12     + 2 13>;
13     };
14     diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
15     index aaaf069306a3..adf5e33e8312 100644
16     --- a/Documentation/i2c/busses/i2c-i801
17     +++ b/Documentation/i2c/busses/i2c-i801
18     @@ -26,6 +26,7 @@ Supported adapters:
19     * Intel Wellsburg (PCH)
20     * Intel Coleto Creek (PCH)
21     * Intel Wildcat Point-LP (PCH)
22     + * Intel BayTrail (SOC)
23     Datasheets: Publicly available at the Intel website
24    
25     On Intel Patsburg and later chipsets, both the normal host SMBus controller
26     diff --git a/Documentation/input/elantech.txt b/Documentation/input/elantech.txt
27     index 5602eb71ad5d..e1ae127ed099 100644
28     --- a/Documentation/input/elantech.txt
29     +++ b/Documentation/input/elantech.txt
30     @@ -504,9 +504,12 @@ byte 5:
31     * reg_10
32    
33     bit 7 6 5 4 3 2 1 0
34     - 0 0 0 0 0 0 0 A
35     + 0 0 0 0 R F T A
36    
37     A: 1 = enable absolute tracking
38     + T: 1 = enable two finger mode auto correct
39     + F: 1 = disable ABS Position Filter
40     + R: 1 = enable real hardware resolution
41    
42     6.2 Native absolute mode 6 byte packet format
43     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
44     diff --git a/Documentation/ja_JP/HOWTO b/Documentation/ja_JP/HOWTO
45     index 0091a8215ac1..b61885c35ce1 100644
46     --- a/Documentation/ja_JP/HOWTO
47     +++ b/Documentation/ja_JP/HOWTO
48     @@ -315,7 +315,7 @@ Andrew Morton が Linux-kernel メーリングリストにカーネルリリー
49     もし、3.x.y カーネルが存在しない場合には、番号が一番大きい 3.x が
50     最新の安定版カーネルです。
51    
52     -3.x.y は "stable" チーム <stable@kernel.org> でメンテされており、必
53     +3.x.y は "stable" チーム <stable@vger.kernel.org> でメンテされており、必
54     要に応じてリリースされます。通常のリリース期間は 2週間毎ですが、差し迫っ
55     た問題がなければもう少し長くなることもあります。セキュリティ関連の問題
56     の場合はこれに対してだいたいの場合、すぐにリリースがされます。
57     diff --git a/Documentation/ja_JP/stable_kernel_rules.txt b/Documentation/ja_JP/stable_kernel_rules.txt
58     index 14265837c4ce..9dbda9b5d21e 100644
59     --- a/Documentation/ja_JP/stable_kernel_rules.txt
60     +++ b/Documentation/ja_JP/stable_kernel_rules.txt
61     @@ -50,16 +50,16 @@ linux-2.6.29/Documentation/stable_kernel_rules.txt
62    
63     -stable ツリーにパッチを送付する手続き-
64    
65     - - 上記の規則に従っているかを確認した後に、stable@kernel.org にパッチ
66     + - 上記の規則に従っているかを確認した後に、stable@vger.kernel.org にパッチ
67     を送る。
68     - 送信者はパッチがキューに受け付けられた際には ACK を、却下された場合
69     には NAK を受け取る。この反応は開発者たちのスケジュールによって、数
70     日かかる場合がある。
71     - もし受け取られたら、パッチは他の開発者たちと関連するサブシステムの
72     メンテナーによるレビューのために -stable キューに追加される。
73     - - パッチに stable@kernel.org のアドレスが付加されているときには、それ
74     + - パッチに stable@vger.kernel.org のアドレスが付加されているときには、それ
75     が Linus のツリーに入る時に自動的に stable チームに email される。
76     - - セキュリティパッチはこのエイリアス (stable@kernel.org) に送られるべ
77     + - セキュリティパッチはこのエイリアス (stable@vger.kernel.org) に送られるべ
78     きではなく、代わりに security@kernel.org のアドレスに送られる。
79    
80     レビューサイクル-
81     diff --git a/Documentation/zh_CN/HOWTO b/Documentation/zh_CN/HOWTO
82     index 6c914aa87e71..54ea24ff63c7 100644
83     --- a/Documentation/zh_CN/HOWTO
84     +++ b/Documentation/zh_CN/HOWTO
85     @@ -237,7 +237,7 @@ kernel.org网站的pub/linux/kernel/v2.6/目录下找到它。它的开发遵循
86     如果没有2.6.x.y版本内核存在,那么最新的2.6.x版本内核就相当于是当前的稳定
87     版内核。
88    
89     -2.6.x.y版本由“稳定版”小组(邮件地址<stable@kernel.org>)维护,一般隔周发
90     +2.6.x.y版本由“稳定版”小组(邮件地址<stable@vger.kernel.org>)维护,一般隔周发
91     布新版本。
92    
93     内核源码中的Documentation/stable_kernel_rules.txt文件具体描述了可被稳定
94     diff --git a/Documentation/zh_CN/stable_kernel_rules.txt b/Documentation/zh_CN/stable_kernel_rules.txt
95     index b5b9b0ab02fd..26ea5ed7cd9c 100644
96     --- a/Documentation/zh_CN/stable_kernel_rules.txt
97     +++ b/Documentation/zh_CN/stable_kernel_rules.txt
98     @@ -42,7 +42,7 @@ Documentation/stable_kernel_rules.txt 的中文翻译
99    
100     向稳定版代码树提交补丁的过程:
101    
102     - - 在确认了补丁符合以上的规则后,将补丁发送到stable@kernel.org。
103     + - 在确认了补丁符合以上的规则后,将补丁发送到stable@vger.kernel.org。
104     - 如果补丁被接受到队列里,发送者会收到一个ACK回复,如果没有被接受,收
105     到的是NAK回复。回复需要几天的时间,这取决于开发者的时间安排。
106     - 被接受的补丁会被加到稳定版本队列里,等待其他开发者的审查。
107     diff --git a/Makefile b/Makefile
108     index fa77b0bed2a2..0d499e6896cd 100644
109     --- a/Makefile
110     +++ b/Makefile
111     @@ -1,6 +1,6 @@
112     VERSION = 3
113     PATCHLEVEL = 14
114     -SUBLEVEL = 5
115     +SUBLEVEL = 6
116     EXTRAVERSION =
117     NAME = Remembering Coco
118    
119     diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
120     index 79087ccf64bc..ab01d0f98d1a 100644
121     --- a/arch/arm/boot/dts/am33xx.dtsi
122     +++ b/arch/arm/boot/dts/am33xx.dtsi
123     @@ -140,7 +140,7 @@
124     compatible = "ti,edma3";
125     ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
126     reg = <0x49000000 0x10000>,
127     - <0x44e10f90 0x10>;
128     + <0x44e10f90 0x40>;
129     interrupts = <12 13 14>;
130     #dma-cells = <1>;
131     dma-channels = <64>;
132     diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
133     index bcf6d79a57ec..8c2fe44e4dfe 100644
134     --- a/arch/arm/boot/dts/armada-xp-db.dts
135     +++ b/arch/arm/boot/dts/armada-xp-db.dts
136     @@ -40,7 +40,7 @@
137     /* Device Bus parameters are required */
138    
139     /* Read parameters */
140     - devbus,bus-width = <8>;
141     + devbus,bus-width = <16>;
142     devbus,turn-off-ps = <60000>;
143     devbus,badr-skew-ps = <0>;
144     devbus,acc-first-ps = <124000>;
145     diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
146     index 274e2ad5f51c..df5c34284dbd 100644
147     --- a/arch/arm/boot/dts/armada-xp-gp.dts
148     +++ b/arch/arm/boot/dts/armada-xp-gp.dts
149     @@ -49,7 +49,7 @@
150     /* Device Bus parameters are required */
151    
152     /* Read parameters */
153     - devbus,bus-width = <8>;
154     + devbus,bus-width = <16>;
155     devbus,turn-off-ps = <60000>;
156     devbus,badr-skew-ps = <0>;
157     devbus,acc-first-ps = <124000>;
158     diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
159     index 99bcf76e6953..b33e10ea2ea0 100644
160     --- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
161     +++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
162     @@ -37,7 +37,7 @@
163     /* Device Bus parameters are required */
164    
165     /* Read parameters */
166     - devbus,bus-width = <8>;
167     + devbus,bus-width = <16>;
168     devbus,turn-off-ps = <60000>;
169     devbus,badr-skew-ps = <0>;
170     devbus,acc-first-ps = <124000>;
171     diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
172     index 4307e80b2d2e..dc72353de0b3 100644
173     --- a/arch/arm/boot/dts/imx53.dtsi
174     +++ b/arch/arm/boot/dts/imx53.dtsi
175     @@ -87,7 +87,7 @@
176     ipu: ipu@18000000 {
177     #crtc-cells = <1>;
178     compatible = "fsl,imx53-ipu";
179     - reg = <0x18000000 0x080000000>;
180     + reg = <0x18000000 0x08000000>;
181     interrupts = <11 10>;
182     clocks = <&clks 59>, <&clks 110>, <&clks 61>;
183     clock-names = "bus", "di0", "di1";
184     diff --git a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
185     index dc86429756d7..4651672bda89 100644
186     --- a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
187     +++ b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
188     @@ -30,6 +30,16 @@
189     bootargs = "console=ttyS0,115200n8 earlyprintk";
190     };
191    
192     + mbus {
193     + pcie-controller {
194     + status = "okay";
195     +
196     + pcie@1,0 {
197     + status = "okay";
198     + };
199     + };
200     + };
201     +
202     ocp@f1000000 {
203     pinctrl@10000 {
204     pmx_usb_led: pmx-usb-led {
205     @@ -73,14 +83,6 @@
206     ehci@50000 {
207     status = "okay";
208     };
209     -
210     - pcie-controller {
211     - status = "okay";
212     -
213     - pcie@1,0 {
214     - status = "okay";
215     - };
216     - };
217     };
218    
219     gpio-leds {
220     diff --git a/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi b/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
221     index aa78c2d11fe7..e2cc85cc3b87 100644
222     --- a/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
223     +++ b/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
224     @@ -4,6 +4,16 @@
225     / {
226     model = "ZyXEL NSA310";
227    
228     + mbus {
229     + pcie-controller {
230     + status = "okay";
231     +
232     + pcie@1,0 {
233     + status = "okay";
234     + };
235     + };
236     + };
237     +
238     ocp@f1000000 {
239     pinctrl: pinctrl@10000 {
240    
241     @@ -26,14 +36,6 @@
242     status = "okay";
243     nr-ports = <2>;
244     };
245     -
246     - pcie-controller {
247     - status = "okay";
248     -
249     - pcie@1,0 {
250     - status = "okay";
251     - };
252     - };
253     };
254    
255     gpio_poweroff {
256     diff --git a/arch/arm/boot/dts/ste-ccu8540.dts b/arch/arm/boot/dts/ste-ccu8540.dts
257     index 7f3baf51a3a9..32dd55e5f4e6 100644
258     --- a/arch/arm/boot/dts/ste-ccu8540.dts
259     +++ b/arch/arm/boot/dts/ste-ccu8540.dts
260     @@ -18,6 +18,7 @@
261     compatible = "st-ericsson,ccu8540", "st-ericsson,u8540";
262    
263     memory@0 {
264     + device_type = "memory";
265     reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>;
266     };
267    
268     diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
269     index 41bca32409fc..5339009b3c0c 100644
270     --- a/arch/arm/common/edma.c
271     +++ b/arch/arm/common/edma.c
272     @@ -1423,55 +1423,38 @@ EXPORT_SYMBOL(edma_clear_event);
273    
274     #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES)
275    
276     -static int edma_of_read_u32_to_s16_array(const struct device_node *np,
277     - const char *propname, s16 *out_values,
278     - size_t sz)
279     +static int edma_xbar_event_map(struct device *dev, struct device_node *node,
280     + struct edma_soc_info *pdata, size_t sz)
281     {
282     - int ret;
283     -
284     - ret = of_property_read_u16_array(np, propname, out_values, sz);
285     - if (ret)
286     - return ret;
287     -
288     - /* Terminate it */
289     - *out_values++ = -1;
290     - *out_values++ = -1;
291     -
292     - return 0;
293     -}
294     -
295     -static int edma_xbar_event_map(struct device *dev,
296     - struct device_node *node,
297     - struct edma_soc_info *pdata, int len)
298     -{
299     - int ret, i;
300     + const char pname[] = "ti,edma-xbar-event-map";
301     struct resource res;
302     void __iomem *xbar;
303     - const s16 (*xbar_chans)[2];
304     + s16 (*xbar_chans)[2];
305     + size_t nelm = sz / sizeof(s16);
306     u32 shift, offset, mux;
307     + int ret, i;
308    
309     - xbar_chans = devm_kzalloc(dev,
310     - len/sizeof(s16) + 2*sizeof(s16),
311     - GFP_KERNEL);
312     + xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL);
313     if (!xbar_chans)
314     return -ENOMEM;
315    
316     ret = of_address_to_resource(node, 1, &res);
317     if (ret)
318     - return -EIO;
319     + return -ENOMEM;
320    
321     xbar = devm_ioremap(dev, res.start, resource_size(&res));
322     if (!xbar)
323     return -ENOMEM;
324    
325     - ret = edma_of_read_u32_to_s16_array(node,
326     - "ti,edma-xbar-event-map",
327     - (s16 *)xbar_chans,
328     - len/sizeof(u32));
329     + ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm);
330     if (ret)
331     return -EIO;
332    
333     - for (i = 0; xbar_chans[i][0] != -1; i++) {
334     + /* Invalidate last entry for the other user of this mess */
335     + nelm >>= 1;
336     + xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1;
337     +
338     + for (i = 0; i < nelm; i++) {
339     shift = (xbar_chans[i][1] & 0x03) << 3;
340     offset = xbar_chans[i][1] & 0xfffffffc;
341     mux = readl(xbar + offset);
342     @@ -1480,8 +1463,7 @@ static int edma_xbar_event_map(struct device *dev,
343     writel(mux, (xbar + offset));
344     }
345    
346     - pdata->xbar_chans = xbar_chans;
347     -
348     + pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
349     return 0;
350     }
351    
352     diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c
353     index 90c50d4b43f7..5d1286d51154 100644
354     --- a/arch/arm/kernel/crash_dump.c
355     +++ b/arch/arm/kernel/crash_dump.c
356     @@ -39,7 +39,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
357     if (!csize)
358     return 0;
359    
360     - vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
361     + vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
362     if (!vaddr)
363     return -ENOMEM;
364    
365     diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.c b/arch/arm/mach-mvebu/mvebu-soc-id.c
366     index f3b325f6cbd4..0e1547a186b0 100644
367     --- a/arch/arm/mach-mvebu/mvebu-soc-id.c
368     +++ b/arch/arm/mach-mvebu/mvebu-soc-id.c
369     @@ -107,7 +107,18 @@ static int __init mvebu_soc_id_init(void)
370     iounmap(pci_base);
371    
372     res_ioremap:
373     - clk_disable_unprepare(clk);
374     + /*
375     + * If the PCIe unit is actually enabled and we have PCI
376     + * support in the kernel, we intentionally do not release the
377     + * reference to the clock. We want to keep it running since
378     + * the bootloader does some PCIe link configuration that the
379     + * kernel is for now unable to do, and gating the clock would
380     + * make us loose this precious configuration.
381     + */
382     + if (!of_device_is_available(child) || !IS_ENABLED(CONFIG_PCI_MVEBU)) {
383     + clk_disable_unprepare(clk);
384     + clk_put(clk);
385     + }
386    
387     clk_err:
388     of_node_put(child);
389     diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
390     index f565f9944af2..7548db2bfb8a 100644
391     --- a/arch/arm/mach-orion5x/common.h
392     +++ b/arch/arm/mach-orion5x/common.h
393     @@ -21,7 +21,7 @@ struct mv_sata_platform_data;
394     #define ORION_MBUS_DEVBUS_BOOT_ATTR 0x0f
395     #define ORION_MBUS_DEVBUS_TARGET(cs) 0x01
396     #define ORION_MBUS_DEVBUS_ATTR(cs) (~(1 << cs))
397     -#define ORION_MBUS_SRAM_TARGET 0x00
398     +#define ORION_MBUS_SRAM_TARGET 0x09
399     #define ORION_MBUS_SRAM_ATTR 0x00
400    
401     /*
402     diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
403     index 473e5dbf8f39..0f08dfd69ebc 100644
404     --- a/arch/arm64/kernel/irq.c
405     +++ b/arch/arm64/kernel/irq.c
406     @@ -97,11 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
407     if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
408     return false;
409    
410     - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
411     - affinity = cpu_online_mask;
412     + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
413     ret = true;
414     - }
415    
416     + /*
417     + * when using forced irq_set_affinity we must ensure that the cpu
418     + * being offlined is not present in the affinity mask, it may be
419     + * selected as the target CPU otherwise
420     + */
421     + affinity = cpu_online_mask;
422     c = irq_data_get_irq_chip(d);
423     if (!c->irq_set_affinity)
424     pr_debug("IRQ%u: unable to set affinity\n", d->irq);
425     diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
426     index 5e9aec358306..31eb959e9aa8 100644
427     --- a/arch/arm64/mm/hugetlbpage.c
428     +++ b/arch/arm64/mm/hugetlbpage.c
429     @@ -51,7 +51,11 @@ int pmd_huge(pmd_t pmd)
430    
431     int pud_huge(pud_t pud)
432     {
433     +#ifndef __PAGETABLE_PMD_FOLDED
434     return !(pud_val(pud) & PUD_TABLE_BIT);
435     +#else
436     + return 0;
437     +#endif
438     }
439    
440     int pmd_huge_support(void)
441     diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
442     index 5d6b4b407dda..2d6f0de77325 100644
443     --- a/arch/metag/include/asm/barrier.h
444     +++ b/arch/metag/include/asm/barrier.h
445     @@ -15,6 +15,7 @@ static inline void wr_fence(void)
446     volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
447     barrier();
448     *flushptr = 0;
449     + barrier();
450     }
451    
452     #else /* CONFIG_METAG_META21 */
453     @@ -35,6 +36,7 @@ static inline void wr_fence(void)
454     *flushptr = 0;
455     *flushptr = 0;
456     *flushptr = 0;
457     + barrier();
458     }
459    
460     #endif /* !CONFIG_METAG_META21 */
461     @@ -68,6 +70,7 @@ static inline void fence(void)
462     volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
463     barrier();
464     *flushptr = 0;
465     + barrier();
466     }
467     #define smp_mb() fence()
468     #define smp_rmb() fence()
469     diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
470     index f16477d1f571..3be8581af495 100644
471     --- a/arch/metag/include/asm/processor.h
472     +++ b/arch/metag/include/asm/processor.h
473     @@ -22,6 +22,8 @@
474     /* Add an extra page of padding at the top of the stack for the guard page. */
475     #define STACK_TOP (TASK_SIZE - PAGE_SIZE)
476     #define STACK_TOP_MAX STACK_TOP
477     +/* Maximum virtual space for stack */
478     +#define STACK_SIZE_MAX (1 << 28) /* 256 MB */
479    
480     /* This decides where the kernel will search for a free chunk of vm
481     * space during mmap's.
482     diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
483     index c2bb4f896ce7..3aa5b46b2d40 100644
484     --- a/arch/mips/cavium-octeon/octeon-irq.c
485     +++ b/arch/mips/cavium-octeon/octeon-irq.c
486     @@ -635,7 +635,7 @@ static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
487     cpumask_clear(&new_affinity);
488     cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
489     }
490     - __irq_set_affinity_locked(data, &new_affinity);
491     + irq_set_affinity_locked(data, &new_affinity, false);
492     }
493    
494     static int octeon_irq_ciu_set_affinity(struct irq_data *data,
495     diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts
496     index fac1f5b178eb..143b8a37b5e4 100644
497     --- a/arch/mips/lantiq/dts/easy50712.dts
498     +++ b/arch/mips/lantiq/dts/easy50712.dts
499     @@ -8,6 +8,7 @@
500     };
501    
502     memory@0 {
503     + device_type = "memory";
504     reg = <0x0 0x2000000>;
505     };
506    
507     diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c
508     index aed32b88576c..7d8c9cce3009 100644
509     --- a/arch/mips/loongson/lemote-2f/clock.c
510     +++ b/arch/mips/loongson/lemote-2f/clock.c
511     @@ -91,6 +91,7 @@ EXPORT_SYMBOL(clk_put);
512    
513     int clk_set_rate(struct clk *clk, unsigned long rate)
514     {
515     + unsigned int rate_khz = rate / 1000;
516     int ret = 0;
517     int regval;
518     int i;
519     @@ -111,10 +112,10 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
520     if (loongson2_clockmod_table[i].frequency ==
521     CPUFREQ_ENTRY_INVALID)
522     continue;
523     - if (rate == loongson2_clockmod_table[i].frequency)
524     + if (rate_khz == loongson2_clockmod_table[i].frequency)
525     break;
526     }
527     - if (rate != loongson2_clockmod_table[i].frequency)
528     + if (rate_khz != loongson2_clockmod_table[i].frequency)
529     return -ENOTSUPP;
530    
531     clk->rate = rate;
532     diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts
533     index 35eb874ab7f1..709f58132f5c 100644
534     --- a/arch/mips/ralink/dts/mt7620a_eval.dts
535     +++ b/arch/mips/ralink/dts/mt7620a_eval.dts
536     @@ -7,6 +7,7 @@
537     model = "Ralink MT7620A evaluation board";
538    
539     memory@0 {
540     + device_type = "memory";
541     reg = <0x0 0x2000000>;
542     };
543    
544     diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts
545     index 322d7002595b..0a685db093d4 100644
546     --- a/arch/mips/ralink/dts/rt2880_eval.dts
547     +++ b/arch/mips/ralink/dts/rt2880_eval.dts
548     @@ -7,6 +7,7 @@
549     model = "Ralink RT2880 evaluation board";
550    
551     memory@0 {
552     + device_type = "memory";
553     reg = <0x8000000 0x2000000>;
554     };
555    
556     diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
557     index 0ac73ea28198..ec9e9a035541 100644
558     --- a/arch/mips/ralink/dts/rt3052_eval.dts
559     +++ b/arch/mips/ralink/dts/rt3052_eval.dts
560     @@ -7,6 +7,7 @@
561     model = "Ralink RT3052 evaluation board";
562    
563     memory@0 {
564     + device_type = "memory";
565     reg = <0x0 0x2000000>;
566     };
567    
568     diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts
569     index 2fa6b330bf4f..e8df21a5d10d 100644
570     --- a/arch/mips/ralink/dts/rt3883_eval.dts
571     +++ b/arch/mips/ralink/dts/rt3883_eval.dts
572     @@ -7,6 +7,7 @@
573     model = "Ralink RT3883 evaluation board";
574    
575     memory@0 {
576     + device_type = "memory";
577     reg = <0x0 0x2000000>;
578     };
579    
580     diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
581     index bb2a8ec440e7..a8f4a70c0742 100644
582     --- a/arch/parisc/Kconfig
583     +++ b/arch/parisc/Kconfig
584     @@ -22,6 +22,7 @@ config PARISC
585     select GENERIC_SMP_IDLE_THREAD
586     select GENERIC_STRNCPY_FROM_USER
587     select SYSCTL_ARCH_UNALIGN_ALLOW
588     + select SYSCTL_EXCEPTION_TRACE
589     select HAVE_MOD_ARCH_SPECIFIC
590     select VIRT_TO_BUS
591     select MODULES_USE_ELF_RELA
592     diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
593     index 198a86feb574..86522ef09d52 100644
594     --- a/arch/parisc/include/asm/processor.h
595     +++ b/arch/parisc/include/asm/processor.h
596     @@ -55,6 +55,8 @@
597     #define STACK_TOP TASK_SIZE
598     #define STACK_TOP_MAX DEFAULT_TASK_SIZE
599    
600     +#define STACK_SIZE_MAX (1 << 30) /* 1 GB */
601     +
602     #endif
603    
604     #ifndef __ASSEMBLY__
605     diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
606     index a63bb179f79a..838786011037 100644
607     --- a/arch/parisc/kernel/syscall.S
608     +++ b/arch/parisc/kernel/syscall.S
609     @@ -589,10 +589,13 @@ cas_nocontend:
610     # endif
611     /* ENABLE_LWS_DEBUG */
612    
613     + rsm PSW_SM_I, %r0 /* Disable interrupts */
614     + /* COW breaks can cause contention on UP systems */
615     LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
616     cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
617     cas_wouldblock:
618     ldo 2(%r0), %r28 /* 2nd case */
619     + ssm PSW_SM_I, %r0
620     b lws_exit /* Contended... */
621     ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
622    
623     @@ -619,15 +622,17 @@ cas_action:
624     stw %r1, 4(%sr2,%r20)
625     #endif
626     /* The load and store could fail */
627     -1: ldw 0(%sr3,%r26), %r28
628     +1: ldw,ma 0(%sr3,%r26), %r28
629     sub,<> %r28, %r25, %r0
630     -2: stw %r24, 0(%sr3,%r26)
631     +2: stw,ma %r24, 0(%sr3,%r26)
632     /* Free lock */
633     - stw %r20, 0(%sr2,%r20)
634     + stw,ma %r20, 0(%sr2,%r20)
635     #if ENABLE_LWS_DEBUG
636     /* Clear thread register indicator */
637     stw %r0, 4(%sr2,%r20)
638     #endif
639     + /* Enable interrupts */
640     + ssm PSW_SM_I, %r0
641     /* Return to userspace, set no error */
642     b lws_exit
643     copy %r0, %r21
644     @@ -639,6 +644,7 @@ cas_action:
645     #if ENABLE_LWS_DEBUG
646     stw %r0, 4(%sr2,%r20)
647     #endif
648     + ssm PSW_SM_I, %r0
649     b lws_exit
650     ldo -EFAULT(%r0),%r21 /* set errno */
651     nop
652     diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
653     index 1cd1d0c83b6d..47ee620d15d2 100644
654     --- a/arch/parisc/kernel/traps.c
655     +++ b/arch/parisc/kernel/traps.c
656     @@ -25,6 +25,7 @@
657     #include <linux/interrupt.h>
658     #include <linux/console.h>
659     #include <linux/bug.h>
660     +#include <linux/ratelimit.h>
661    
662     #include <asm/assembly.h>
663     #include <asm/uaccess.h>
664     @@ -42,9 +43,6 @@
665    
666     #include "../math-emu/math-emu.h" /* for handle_fpe() */
667    
668     -#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
669     - /* dumped to the console via printk) */
670     -
671     #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
672     DEFINE_SPINLOCK(pa_dbit_lock);
673     #endif
674     @@ -160,6 +158,17 @@ void show_regs(struct pt_regs *regs)
675     }
676     }
677    
678     +static DEFINE_RATELIMIT_STATE(_hppa_rs,
679     + DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
680     +
681     +#define parisc_printk_ratelimited(critical, regs, fmt, ...) { \
682     + if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
683     + printk(fmt, ##__VA_ARGS__); \
684     + show_regs(regs); \
685     + } \
686     +}
687     +
688     +
689     static void do_show_stack(struct unwind_frame_info *info)
690     {
691     int i = 1;
692     @@ -229,12 +238,10 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
693     if (err == 0)
694     return; /* STFU */
695    
696     - printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
697     + parisc_printk_ratelimited(1, regs,
698     + KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
699     current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
700     -#ifdef PRINT_USER_FAULTS
701     - /* XXX for debugging only */
702     - show_regs(regs);
703     -#endif
704     +
705     return;
706     }
707    
708     @@ -321,14 +328,11 @@ static void handle_break(struct pt_regs *regs)
709     (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
710     }
711    
712     -#ifdef PRINT_USER_FAULTS
713     - if (unlikely(iir != GDB_BREAK_INSN)) {
714     - printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
715     + if (unlikely(iir != GDB_BREAK_INSN))
716     + parisc_printk_ratelimited(0, regs,
717     + KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
718     iir & 31, (iir>>13) & ((1<<13)-1),
719     task_pid_nr(current), current->comm);
720     - show_regs(regs);
721     - }
722     -#endif
723    
724     /* send standard GDB signal */
725     handle_gdb_break(regs, TRAP_BRKPT);
726     @@ -758,11 +762,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
727    
728     default:
729     if (user_mode(regs)) {
730     -#ifdef PRINT_USER_FAULTS
731     - printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
732     - task_pid_nr(current), current->comm);
733     - show_regs(regs);
734     -#endif
735     + parisc_printk_ratelimited(0, regs, KERN_DEBUG
736     + "handle_interruption() pid=%d command='%s'\n",
737     + task_pid_nr(current), current->comm);
738     /* SIGBUS, for lack of a better one. */
739     si.si_signo = SIGBUS;
740     si.si_code = BUS_OBJERR;
741     @@ -779,16 +781,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
742    
743     if (user_mode(regs)) {
744     if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
745     -#ifdef PRINT_USER_FAULTS
746     - if (fault_space == 0)
747     - printk(KERN_DEBUG "User Fault on Kernel Space ");
748     - else
749     - printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
750     - code);
751     - printk(KERN_CONT "pid=%d command='%s'\n",
752     - task_pid_nr(current), current->comm);
753     - show_regs(regs);
754     -#endif
755     + parisc_printk_ratelimited(0, regs, KERN_DEBUG
756     + "User fault %d on space 0x%08lx, pid=%d command='%s'\n",
757     + code, fault_space,
758     + task_pid_nr(current), current->comm);
759     si.si_signo = SIGSEGV;
760     si.si_errno = 0;
761     si.si_code = SEGV_MAPERR;
762     diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
763     index 9d08c71a967e..d72197f0ddb8 100644
764     --- a/arch/parisc/mm/fault.c
765     +++ b/arch/parisc/mm/fault.c
766     @@ -19,10 +19,6 @@
767     #include <asm/uaccess.h>
768     #include <asm/traps.h>
769    
770     -#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
771     - /* dumped to the console via printk) */
772     -
773     -
774     /* Various important other fields */
775     #define bit22set(x) (x & 0x00000200)
776     #define bits23_25set(x) (x & 0x000001c0)
777     @@ -34,6 +30,8 @@
778    
779     DEFINE_PER_CPU(struct exception_data, exception_data);
780    
781     +int show_unhandled_signals = 1;
782     +
783     /*
784     * parisc_acctyp(unsigned int inst) --
785     * Given a PA-RISC memory access instruction, determine if the
786     @@ -173,6 +171,32 @@ int fixup_exception(struct pt_regs *regs)
787     return 0;
788     }
789    
790     +/*
791     + * Print out info about fatal segfaults, if the show_unhandled_signals
792     + * sysctl is set:
793     + */
794     +static inline void
795     +show_signal_msg(struct pt_regs *regs, unsigned long code,
796     + unsigned long address, struct task_struct *tsk,
797     + struct vm_area_struct *vma)
798     +{
799     + if (!unhandled_signal(tsk, SIGSEGV))
800     + return;
801     +
802     + if (!printk_ratelimit())
803     + return;
804     +
805     + pr_warn("\n");
806     + pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx",
807     + tsk->comm, code, address);
808     + print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
809     + if (vma)
810     + pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n",
811     + vma->vm_start, vma->vm_end);
812     +
813     + show_regs(regs);
814     +}
815     +
816     void do_page_fault(struct pt_regs *regs, unsigned long code,
817     unsigned long address)
818     {
819     @@ -270,16 +294,8 @@ bad_area:
820     if (user_mode(regs)) {
821     struct siginfo si;
822    
823     -#ifdef PRINT_USER_FAULTS
824     - printk(KERN_DEBUG "\n");
825     - printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
826     - task_pid_nr(tsk), tsk->comm, code, address);
827     - if (vma) {
828     - printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
829     - vma->vm_start, vma->vm_end);
830     - }
831     - show_regs(regs);
832     -#endif
833     + show_signal_msg(regs, code, address, tsk, vma);
834     +
835     switch (code) {
836     case 15: /* Data TLB miss fault/Data page fault */
837     /* send SIGSEGV when outside of vma */
838     diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
839     index 0f4344e6fbca..8289e2b241a2 100644
840     --- a/arch/powerpc/Makefile
841     +++ b/arch/powerpc/Makefile
842     @@ -149,7 +149,9 @@ endif
843    
844     CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
845    
846     -KBUILD_CPPFLAGS += -Iarch/$(ARCH)
847     +asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
848     +
849     +KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr)
850     KBUILD_AFLAGS += -Iarch/$(ARCH)
851     KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
852     CPP = $(CC) -E $(KBUILD_CFLAGS)
853     diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
854     index 6586a40a46ce..cded7c1278ef 100644
855     --- a/arch/powerpc/include/asm/ppc_asm.h
856     +++ b/arch/powerpc/include/asm/ppc_asm.h
857     @@ -318,11 +318,16 @@ n:
858     addi reg,reg,(name - 0b)@l;
859    
860     #ifdef __powerpc64__
861     +#ifdef HAVE_AS_ATHIGH
862     +#define __AS_ATHIGH high
863     +#else
864     +#define __AS_ATHIGH h
865     +#endif
866     #define LOAD_REG_IMMEDIATE(reg,expr) \
867     lis reg,(expr)@highest; \
868     ori reg,reg,(expr)@higher; \
869     rldicr reg,reg,32,31; \
870     - oris reg,reg,(expr)@h; \
871     + oris reg,reg,(expr)@__AS_ATHIGH; \
872     ori reg,reg,(expr)@l;
873    
874     #define LOAD_REG_ADDR(reg,name) \
875     diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
876     index 59d229a2a3e0..879b3aacac32 100644
877     --- a/arch/powerpc/kernel/machine_kexec_64.c
878     +++ b/arch/powerpc/kernel/machine_kexec_64.c
879     @@ -237,7 +237,7 @@ static void wake_offline_cpus(void)
880     if (!cpu_online(cpu)) {
881     printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
882     cpu);
883     - cpu_up(cpu);
884     + WARN_ON(cpu_up(cpu));
885     }
886     }
887     }
888     diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
889     index b3dab20acf34..57d4bada19bd 100644
890     --- a/arch/powerpc/kernel/time.c
891     +++ b/arch/powerpc/kernel/time.c
892     @@ -805,9 +805,6 @@ static void __init clocksource_init(void)
893     static int decrementer_set_next_event(unsigned long evt,
894     struct clock_event_device *dev)
895     {
896     - /* Don't adjust the decrementer if some irq work is pending */
897     - if (test_irq_work_pending())
898     - return 0;
899     __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
900     set_dec(evt);
901    
902     diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
903     index 253fefe3d1a0..5b51079f3e3b 100644
904     --- a/arch/powerpc/platforms/powernv/eeh-ioda.c
905     +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
906     @@ -549,7 +549,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
907     ret = ioda_eeh_phb_reset(hose, option);
908     } else {
909     bus = eeh_pe_bus_get(pe);
910     - if (pci_is_root_bus(bus))
911     + if (pci_is_root_bus(bus) ||
912     + pci_is_root_bus(bus->parent))
913     ret = ioda_eeh_root_reset(hose, option);
914     else
915     ret = ioda_eeh_bridge_reset(hose, bus->self, option);
916     diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
917     index cf3c0089bef2..23223cd63e54 100644
918     --- a/arch/s390/crypto/aes_s390.c
919     +++ b/arch/s390/crypto/aes_s390.c
920     @@ -820,6 +820,9 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
921     else
922     memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
923     spin_unlock(&ctrblk_lock);
924     + } else {
925     + if (!nbytes)
926     + memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
927     }
928     /*
929     * final block may be < AES_BLOCK_SIZE, copy only nbytes
930     diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
931     index 0a5aac8a9412..7acb77f7ef1a 100644
932     --- a/arch/s390/crypto/des_s390.c
933     +++ b/arch/s390/crypto/des_s390.c
934     @@ -429,6 +429,9 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
935     else
936     memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
937     spin_unlock(&ctrblk_lock);
938     + } else {
939     + if (!nbytes)
940     + memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
941     }
942     /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
943     if (nbytes) {
944     diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
945     index a8091216963b..68c05398bba9 100644
946     --- a/arch/x86/include/asm/hugetlb.h
947     +++ b/arch/x86/include/asm/hugetlb.h
948     @@ -52,6 +52,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
949     static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
950     unsigned long addr, pte_t *ptep)
951     {
952     + ptep_clear_flush(vma, addr, ptep);
953     }
954    
955     static inline int huge_pte_none(pte_t pte)
956     diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
957     index af1d14a9ebda..dcbbaa165bde 100644
958     --- a/arch/x86/kernel/ldt.c
959     +++ b/arch/x86/kernel/ldt.c
960     @@ -20,6 +20,8 @@
961     #include <asm/mmu_context.h>
962     #include <asm/syscalls.h>
963    
964     +int sysctl_ldt16 = 0;
965     +
966     #ifdef CONFIG_SMP
967     static void flush_ldt(void *current_mm)
968     {
969     @@ -234,7 +236,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
970     * IRET leaking the high bits of the kernel stack address.
971     */
972     #ifdef CONFIG_X86_64
973     - if (!ldt_info.seg_32bit) {
974     + if (!ldt_info.seg_32bit && !sysctl_ldt16) {
975     error = -EINVAL;
976     goto out_unlock;
977     }
978     diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
979     index d6bfb876cfb0..f1d633a43f8e 100644
980     --- a/arch/x86/vdso/vdso32-setup.c
981     +++ b/arch/x86/vdso/vdso32-setup.c
982     @@ -41,6 +41,7 @@ enum {
983     #ifdef CONFIG_X86_64
984     #define vdso_enabled sysctl_vsyscall32
985     #define arch_setup_additional_pages syscall32_setup_pages
986     +extern int sysctl_ldt16;
987     #endif
988    
989     /*
990     @@ -380,6 +381,13 @@ static struct ctl_table abi_table2[] = {
991     .mode = 0644,
992     .proc_handler = proc_dointvec
993     },
994     + {
995     + .procname = "ldt16",
996     + .data = &sysctl_ldt16,
997     + .maxlen = sizeof(int),
998     + .mode = 0644,
999     + .proc_handler = proc_dointvec
1000     + },
1001     {}
1002     };
1003    
1004     diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c
1005     index adad92a44ba2..2f1b8d12952a 100644
1006     --- a/crypto/crypto_wq.c
1007     +++ b/crypto/crypto_wq.c
1008     @@ -33,7 +33,7 @@ static void __exit crypto_wq_exit(void)
1009     destroy_workqueue(kcrypto_wq);
1010     }
1011    
1012     -module_init(crypto_wq_init);
1013     +subsys_initcall(crypto_wq_init);
1014     module_exit(crypto_wq_exit);
1015    
1016     MODULE_LICENSE("GPL");
1017     diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
1018     index 4770de5707b9..561bf25ac9f9 100644
1019     --- a/drivers/acpi/Kconfig
1020     +++ b/drivers/acpi/Kconfig
1021     @@ -56,6 +56,23 @@ config ACPI_PROCFS
1022    
1023     Say N to delete /proc/acpi/ files that have moved to /sys/
1024    
1025     +config ACPI_PROCFS_POWER
1026     + bool "Deprecated power /proc/acpi directories"
1027     + depends on PROC_FS
1028     + help
1029     + For backwards compatibility, this option allows
1030     + deprecated power /proc/acpi/ directories to exist, even when
1031     + they have been replaced by functions in /sys.
1032     + The deprecated directories (and their replacements) include:
1033     + /proc/acpi/battery/* (/sys/class/power_supply/*)
1034     + /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
1035     + This option has no effect on /proc/acpi/ directories
1036     + and functions, which do not yet exist in /sys
1037     + This option, together with the proc directories, will be
1038     + deleted in 2.6.39.
1039     +
1040     + Say N to delete power /proc/acpi/ directories that have moved to /sys/
1041     +
1042     config ACPI_EC_DEBUGFS
1043     tristate "EC read/write access through /sys/kernel/debug/ec"
1044     default n
1045     diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
1046     index 0331f91d56e6..bce34afadcd0 100644
1047     --- a/drivers/acpi/Makefile
1048     +++ b/drivers/acpi/Makefile
1049     @@ -47,6 +47,7 @@ acpi-y += sysfs.o
1050     acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
1051     acpi-$(CONFIG_DEBUG_FS) += debugfs.o
1052     acpi-$(CONFIG_ACPI_NUMA) += numa.o
1053     +acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
1054     ifdef CONFIG_ACPI_VIDEO
1055     acpi-y += video_detect.o
1056     endif
1057     diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
1058     index 6f190bc2b8b7..7c1f8452918a 100644
1059     --- a/drivers/acpi/ac.c
1060     +++ b/drivers/acpi/ac.c
1061     @@ -51,11 +51,39 @@ MODULE_AUTHOR("Paul Diefenbaugh");
1062     MODULE_DESCRIPTION("ACPI AC Adapter Driver");
1063     MODULE_LICENSE("GPL");
1064    
1065     +static int acpi_ac_add(struct acpi_device *device);
1066     +static int acpi_ac_remove(struct acpi_device *device);
1067     +static void acpi_ac_notify(struct acpi_device *device, u32 event);
1068     +
1069     +static const struct acpi_device_id ac_device_ids[] = {
1070     + {"ACPI0003", 0},
1071     + {"", 0},
1072     +};
1073     +MODULE_DEVICE_TABLE(acpi, ac_device_ids);
1074     +
1075     +#ifdef CONFIG_PM_SLEEP
1076     +static int acpi_ac_resume(struct device *dev);
1077     +#endif
1078     +static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
1079     +
1080     static int ac_sleep_before_get_state_ms;
1081    
1082     +static struct acpi_driver acpi_ac_driver = {
1083     + .name = "ac",
1084     + .class = ACPI_AC_CLASS,
1085     + .ids = ac_device_ids,
1086     + .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
1087     + .ops = {
1088     + .add = acpi_ac_add,
1089     + .remove = acpi_ac_remove,
1090     + .notify = acpi_ac_notify,
1091     + },
1092     + .drv.pm = &acpi_ac_pm,
1093     +};
1094     +
1095     struct acpi_ac {
1096     struct power_supply charger;
1097     - struct platform_device *pdev;
1098     + struct acpi_device * device;
1099     unsigned long long state;
1100     };
1101    
1102     @@ -67,10 +95,12 @@ struct acpi_ac {
1103    
1104     static int acpi_ac_get_state(struct acpi_ac *ac)
1105     {
1106     - acpi_status status;
1107     - acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev);
1108     + acpi_status status = AE_OK;
1109     +
1110     + if (!ac)
1111     + return -EINVAL;
1112    
1113     - status = acpi_evaluate_integer(handle, "_PSR", NULL,
1114     + status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
1115     &ac->state);
1116     if (ACPI_FAILURE(status)) {
1117     ACPI_EXCEPTION((AE_INFO, status,
1118     @@ -115,10 +145,9 @@ static enum power_supply_property ac_props[] = {
1119     Driver Model
1120     -------------------------------------------------------------------------- */
1121    
1122     -static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
1123     +static void acpi_ac_notify(struct acpi_device *device, u32 event)
1124     {
1125     - struct acpi_ac *ac = data;
1126     - struct acpi_device *adev;
1127     + struct acpi_ac *ac = acpi_driver_data(device);
1128    
1129     if (!ac)
1130     return;
1131     @@ -141,11 +170,10 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
1132     msleep(ac_sleep_before_get_state_ms);
1133    
1134     acpi_ac_get_state(ac);
1135     - adev = ACPI_COMPANION(&ac->pdev->dev);
1136     - acpi_bus_generate_netlink_event(adev->pnp.device_class,
1137     - dev_name(&ac->pdev->dev),
1138     - event, (u32) ac->state);
1139     - acpi_notifier_call_chain(adev, event, (u32) ac->state);
1140     + acpi_bus_generate_netlink_event(device->pnp.device_class,
1141     + dev_name(&device->dev), event,
1142     + (u32) ac->state);
1143     + acpi_notifier_call_chain(device, event, (u32) ac->state);
1144     kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
1145     }
1146    
1147     @@ -170,49 +198,39 @@ static struct dmi_system_id ac_dmi_table[] = {
1148     {},
1149     };
1150    
1151     -static int acpi_ac_probe(struct platform_device *pdev)
1152     +static int acpi_ac_add(struct acpi_device *device)
1153     {
1154     int result = 0;
1155     struct acpi_ac *ac = NULL;
1156     - struct acpi_device *adev;
1157    
1158     - if (!pdev)
1159     - return -EINVAL;
1160    
1161     - adev = ACPI_COMPANION(&pdev->dev);
1162     - if (!adev)
1163     - return -ENODEV;
1164     + if (!device)
1165     + return -EINVAL;
1166    
1167     ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
1168     if (!ac)
1169     return -ENOMEM;
1170    
1171     - strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
1172     - strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
1173     - ac->pdev = pdev;
1174     - platform_set_drvdata(pdev, ac);
1175     + ac->device = device;
1176     + strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
1177     + strcpy(acpi_device_class(device), ACPI_AC_CLASS);
1178     + device->driver_data = ac;
1179    
1180     result = acpi_ac_get_state(ac);
1181     if (result)
1182     goto end;
1183    
1184     - ac->charger.name = acpi_device_bid(adev);
1185     + ac->charger.name = acpi_device_bid(device);
1186     ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
1187     ac->charger.properties = ac_props;
1188     ac->charger.num_properties = ARRAY_SIZE(ac_props);
1189     ac->charger.get_property = get_ac_property;
1190     - result = power_supply_register(&pdev->dev, &ac->charger);
1191     + result = power_supply_register(&ac->device->dev, &ac->charger);
1192     if (result)
1193     goto end;
1194    
1195     - result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
1196     - ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
1197     - if (result) {
1198     - power_supply_unregister(&ac->charger);
1199     - goto end;
1200     - }
1201     printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
1202     - acpi_device_name(adev), acpi_device_bid(adev),
1203     + acpi_device_name(device), acpi_device_bid(device),
1204     ac->state ? "on-line" : "off-line");
1205    
1206     end:
1207     @@ -232,7 +250,7 @@ static int acpi_ac_resume(struct device *dev)
1208     if (!dev)
1209     return -EINVAL;
1210    
1211     - ac = platform_get_drvdata(to_platform_device(dev));
1212     + ac = acpi_driver_data(to_acpi_device(dev));
1213     if (!ac)
1214     return -EINVAL;
1215    
1216     @@ -246,19 +264,17 @@ static int acpi_ac_resume(struct device *dev)
1217     #else
1218     #define acpi_ac_resume NULL
1219     #endif
1220     -static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
1221    
1222     -static int acpi_ac_remove(struct platform_device *pdev)
1223     +static int acpi_ac_remove(struct acpi_device *device)
1224     {
1225     - struct acpi_ac *ac;
1226     + struct acpi_ac *ac = NULL;
1227     +
1228    
1229     - if (!pdev)
1230     + if (!device || !acpi_driver_data(device))
1231     return -EINVAL;
1232    
1233     - acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
1234     - ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
1235     + ac = acpi_driver_data(device);
1236    
1237     - ac = platform_get_drvdata(pdev);
1238     if (ac->charger.dev)
1239     power_supply_unregister(&ac->charger);
1240    
1241     @@ -267,23 +283,6 @@ static int acpi_ac_remove(struct platform_device *pdev)
1242     return 0;
1243     }
1244    
1245     -static const struct acpi_device_id acpi_ac_match[] = {
1246     - { "ACPI0003", 0 },
1247     - { }
1248     -};
1249     -MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
1250     -
1251     -static struct platform_driver acpi_ac_driver = {
1252     - .probe = acpi_ac_probe,
1253     - .remove = acpi_ac_remove,
1254     - .driver = {
1255     - .name = "acpi-ac",
1256     - .owner = THIS_MODULE,
1257     - .pm = &acpi_ac_pm_ops,
1258     - .acpi_match_table = ACPI_PTR(acpi_ac_match),
1259     - },
1260     -};
1261     -
1262     static int __init acpi_ac_init(void)
1263     {
1264     int result;
1265     @@ -291,7 +290,7 @@ static int __init acpi_ac_init(void)
1266     if (acpi_disabled)
1267     return -ENODEV;
1268    
1269     - result = platform_driver_register(&acpi_ac_driver);
1270     + result = acpi_bus_register_driver(&acpi_ac_driver);
1271     if (result < 0)
1272     return -ENODEV;
1273    
1274     @@ -300,7 +299,7 @@ static int __init acpi_ac_init(void)
1275    
1276     static void __exit acpi_ac_exit(void)
1277     {
1278     - platform_driver_unregister(&acpi_ac_driver);
1279     + acpi_bus_unregister_driver(&acpi_ac_driver);
1280     }
1281     module_init(acpi_ac_init);
1282     module_exit(acpi_ac_exit);
1283     diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
1284     index dbfe49e5fd63..1d4950388fa1 100644
1285     --- a/drivers/acpi/acpi_platform.c
1286     +++ b/drivers/acpi/acpi_platform.c
1287     @@ -29,7 +29,6 @@ ACPI_MODULE_NAME("platform");
1288     static const struct acpi_device_id acpi_platform_device_ids[] = {
1289    
1290     { "PNP0D40" },
1291     - { "ACPI0003" },
1292     { "VPC2004" },
1293     { "BCM4752" },
1294    
1295     diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
1296     index b06f5f55ada9..52c81c49cc7d 100644
1297     --- a/drivers/acpi/acpi_processor.c
1298     +++ b/drivers/acpi/acpi_processor.c
1299     @@ -405,7 +405,6 @@ static int acpi_processor_add(struct acpi_device *device,
1300     goto err;
1301    
1302     pr->dev = dev;
1303     - dev->offline = pr->flags.need_hotplug_init;
1304    
1305     /* Trigger the processor driver's .probe() if present. */
1306     if (device_attach(dev) >= 0)
1307     diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
1308     index 4ed1aa384df2..514f34033f96 100644
1309     --- a/drivers/acpi/acpica/acglobal.h
1310     +++ b/drivers/acpi/acpica/acglobal.h
1311     @@ -132,9 +132,9 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_do_not_use_xsdt, FALSE);
1312     * address. Although ACPICA adheres to the ACPI specification which
1313     * requires the use of the corresponding 64-bit address if it is non-zero,
1314     * some machines have been found to have a corrupted non-zero 64-bit
1315     - * address. Default is FALSE, do not favor the 32-bit addresses.
1316     + * address. Default is TRUE, favor the 32-bit addresses.
1317     */
1318     -u8 ACPI_INIT_GLOBAL(acpi_gbl_use32_bit_fadt_addresses, FALSE);
1319     +u8 ACPI_INIT_GLOBAL(acpi_gbl_use32_bit_fadt_addresses, TRUE);
1320    
1321     /*
1322     * Optionally truncate I/O addresses to 16 bits. Provides compatibility
1323     diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
1324     index 6412d3c301cb..1bc879ec83d4 100644
1325     --- a/drivers/acpi/acpica/tbutils.c
1326     +++ b/drivers/acpi/acpica/tbutils.c
1327     @@ -461,6 +461,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
1328     u32 table_count;
1329     struct acpi_table_header *table;
1330     acpi_physical_address address;
1331     + acpi_physical_address rsdt_address;
1332     u32 length;
1333     u8 *table_entry;
1334     acpi_status status;
1335     @@ -488,11 +489,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
1336     * as per the ACPI specification.
1337     */
1338     address = (acpi_physical_address) rsdp->xsdt_physical_address;
1339     + rsdt_address =
1340     + (acpi_physical_address) rsdp->rsdt_physical_address;
1341     table_entry_size = ACPI_XSDT_ENTRY_SIZE;
1342     } else {
1343     /* Root table is an RSDT (32-bit physical addresses) */
1344    
1345     address = (acpi_physical_address) rsdp->rsdt_physical_address;
1346     + rsdt_address = address;
1347     table_entry_size = ACPI_RSDT_ENTRY_SIZE;
1348     }
1349    
1350     @@ -515,8 +519,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
1351    
1352     /* Fall back to the RSDT */
1353    
1354     - address =
1355     - (acpi_physical_address) rsdp->rsdt_physical_address;
1356     + address = rsdt_address;
1357     table_entry_size = ACPI_RSDT_ENTRY_SIZE;
1358     }
1359     }
1360     diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
1361     index 797a6938d051..efa71d66e8b0 100644
1362     --- a/drivers/acpi/battery.c
1363     +++ b/drivers/acpi/battery.c
1364     @@ -36,6 +36,12 @@
1365     #include <linux/suspend.h>
1366     #include <asm/unaligned.h>
1367    
1368     +#ifdef CONFIG_ACPI_PROCFS_POWER
1369     +#include <linux/proc_fs.h>
1370     +#include <linux/seq_file.h>
1371     +#include <asm/uaccess.h>
1372     +#endif
1373     +
1374     #include <linux/acpi.h>
1375     #include <linux/power_supply.h>
1376    
1377     @@ -66,6 +72,19 @@ static unsigned int cache_time = 1000;
1378     module_param(cache_time, uint, 0644);
1379     MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
1380    
1381     +#ifdef CONFIG_ACPI_PROCFS_POWER
1382     +extern struct proc_dir_entry *acpi_lock_battery_dir(void);
1383     +extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
1384     +
1385     +enum acpi_battery_files {
1386     + info_tag = 0,
1387     + state_tag,
1388     + alarm_tag,
1389     + ACPI_BATTERY_NUMFILES,
1390     +};
1391     +
1392     +#endif
1393     +
1394     static const struct acpi_device_id battery_device_ids[] = {
1395     {"PNP0C0A", 0},
1396     {"", 0},
1397     @@ -301,6 +320,14 @@ static enum power_supply_property energy_battery_props[] = {
1398     POWER_SUPPLY_PROP_SERIAL_NUMBER,
1399     };
1400    
1401     +#ifdef CONFIG_ACPI_PROCFS_POWER
1402     +inline char *acpi_battery_units(struct acpi_battery *battery)
1403     +{
1404     + return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
1405     + "mA" : "mW";
1406     +}
1407     +#endif
1408     +
1409     /* --------------------------------------------------------------------------
1410     Battery Management
1411     -------------------------------------------------------------------------- */
1412     @@ -719,6 +746,279 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
1413     }
1414    
1415     /* --------------------------------------------------------------------------
1416     + FS Interface (/proc)
1417     + -------------------------------------------------------------------------- */
1418     +
1419     +#ifdef CONFIG_ACPI_PROCFS_POWER
1420     +static struct proc_dir_entry *acpi_battery_dir;
1421     +
1422     +static int acpi_battery_print_info(struct seq_file *seq, int result)
1423     +{
1424     + struct acpi_battery *battery = seq->private;
1425     +
1426     + if (result)
1427     + goto end;
1428     +
1429     + seq_printf(seq, "present: %s\n",
1430     + acpi_battery_present(battery) ? "yes" : "no");
1431     + if (!acpi_battery_present(battery))
1432     + goto end;
1433     + if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
1434     + seq_printf(seq, "design capacity: unknown\n");
1435     + else
1436     + seq_printf(seq, "design capacity: %d %sh\n",
1437     + battery->design_capacity,
1438     + acpi_battery_units(battery));
1439     +
1440     + if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
1441     + seq_printf(seq, "last full capacity: unknown\n");
1442     + else
1443     + seq_printf(seq, "last full capacity: %d %sh\n",
1444     + battery->full_charge_capacity,
1445     + acpi_battery_units(battery));
1446     +
1447     + seq_printf(seq, "battery technology: %srechargeable\n",
1448     + (!battery->technology)?"non-":"");
1449     +
1450     + if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
1451     + seq_printf(seq, "design voltage: unknown\n");
1452     + else
1453     + seq_printf(seq, "design voltage: %d mV\n",
1454     + battery->design_voltage);
1455     + seq_printf(seq, "design capacity warning: %d %sh\n",
1456     + battery->design_capacity_warning,
1457     + acpi_battery_units(battery));
1458     + seq_printf(seq, "design capacity low: %d %sh\n",
1459     + battery->design_capacity_low,
1460     + acpi_battery_units(battery));
1461     + seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
1462     + seq_printf(seq, "capacity granularity 1: %d %sh\n",
1463     + battery->capacity_granularity_1,
1464     + acpi_battery_units(battery));
1465     + seq_printf(seq, "capacity granularity 2: %d %sh\n",
1466     + battery->capacity_granularity_2,
1467     + acpi_battery_units(battery));
1468     + seq_printf(seq, "model number: %s\n", battery->model_number);
1469     + seq_printf(seq, "serial number: %s\n", battery->serial_number);
1470     + seq_printf(seq, "battery type: %s\n", battery->type);
1471     + seq_printf(seq, "OEM info: %s\n", battery->oem_info);
1472     + end:
1473     + if (result)
1474     + seq_printf(seq, "ERROR: Unable to read battery info\n");
1475     + return result;
1476     +}
1477     +
1478     +static int acpi_battery_print_state(struct seq_file *seq, int result)
1479     +{
1480     + struct acpi_battery *battery = seq->private;
1481     +
1482     + if (result)
1483     + goto end;
1484     +
1485     + seq_printf(seq, "present: %s\n",
1486     + acpi_battery_present(battery) ? "yes" : "no");
1487     + if (!acpi_battery_present(battery))
1488     + goto end;
1489     +
1490     + seq_printf(seq, "capacity state: %s\n",
1491     + (battery->state & 0x04) ? "critical" : "ok");
1492     + if ((battery->state & 0x01) && (battery->state & 0x02))
1493     + seq_printf(seq,
1494     + "charging state: charging/discharging\n");
1495     + else if (battery->state & 0x01)
1496     + seq_printf(seq, "charging state: discharging\n");
1497     + else if (battery->state & 0x02)
1498     + seq_printf(seq, "charging state: charging\n");
1499     + else
1500     + seq_printf(seq, "charging state: charged\n");
1501     +
1502     + if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
1503     + seq_printf(seq, "present rate: unknown\n");
1504     + else
1505     + seq_printf(seq, "present rate: %d %s\n",
1506     + battery->rate_now, acpi_battery_units(battery));
1507     +
1508     + if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
1509     + seq_printf(seq, "remaining capacity: unknown\n");
1510     + else
1511     + seq_printf(seq, "remaining capacity: %d %sh\n",
1512     + battery->capacity_now, acpi_battery_units(battery));
1513     + if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
1514     + seq_printf(seq, "present voltage: unknown\n");
1515     + else
1516     + seq_printf(seq, "present voltage: %d mV\n",
1517     + battery->voltage_now);
1518     + end:
1519     + if (result)
1520     + seq_printf(seq, "ERROR: Unable to read battery state\n");
1521     +
1522     + return result;
1523     +}
1524     +
1525     +static int acpi_battery_print_alarm(struct seq_file *seq, int result)
1526     +{
1527     + struct acpi_battery *battery = seq->private;
1528     +
1529     + if (result)
1530     + goto end;
1531     +
1532     + if (!acpi_battery_present(battery)) {
1533     + seq_printf(seq, "present: no\n");
1534     + goto end;
1535     + }
1536     + seq_printf(seq, "alarm: ");
1537     + if (!battery->alarm)
1538     + seq_printf(seq, "unsupported\n");
1539     + else
1540     + seq_printf(seq, "%u %sh\n", battery->alarm,
1541     + acpi_battery_units(battery));
1542     + end:
1543     + if (result)
1544     + seq_printf(seq, "ERROR: Unable to read battery alarm\n");
1545     + return result;
1546     +}
1547     +
1548     +static ssize_t acpi_battery_write_alarm(struct file *file,
1549     + const char __user * buffer,
1550     + size_t count, loff_t * ppos)
1551     +{
1552     + int result = 0;
1553     + char alarm_string[12] = { '\0' };
1554     + struct seq_file *m = file->private_data;
1555     + struct acpi_battery *battery = m->private;
1556     +
1557     + if (!battery || (count > sizeof(alarm_string) - 1))
1558     + return -EINVAL;
1559     + if (!acpi_battery_present(battery)) {
1560     + result = -ENODEV;
1561     + goto end;
1562     + }
1563     + if (copy_from_user(alarm_string, buffer, count)) {
1564     + result = -EFAULT;
1565     + goto end;
1566     + }
1567     + alarm_string[count] = '\0';
1568     + battery->alarm = simple_strtol(alarm_string, NULL, 0);
1569     + result = acpi_battery_set_alarm(battery);
1570     + end:
1571     + if (!result)
1572     + return count;
1573     + return result;
1574     +}
1575     +
1576     +typedef int(*print_func)(struct seq_file *seq, int result);
1577     +
1578     +static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
1579     + acpi_battery_print_info,
1580     + acpi_battery_print_state,
1581     + acpi_battery_print_alarm,
1582     +};
1583     +
1584     +static int acpi_battery_read(int fid, struct seq_file *seq)
1585     +{
1586     + struct acpi_battery *battery = seq->private;
1587     + int result = acpi_battery_update(battery);
1588     + return acpi_print_funcs[fid](seq, result);
1589     +}
1590     +
1591     +#define DECLARE_FILE_FUNCTIONS(_name) \
1592     +static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
1593     +{ \
1594     + return acpi_battery_read(_name##_tag, seq); \
1595     +} \
1596     +static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
1597     +{ \
1598     + return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
1599     +}
1600     +
1601     +DECLARE_FILE_FUNCTIONS(info);
1602     +DECLARE_FILE_FUNCTIONS(state);
1603     +DECLARE_FILE_FUNCTIONS(alarm);
1604     +
1605     +#undef DECLARE_FILE_FUNCTIONS
1606     +
1607     +#define FILE_DESCRIPTION_RO(_name) \
1608     + { \
1609     + .name = __stringify(_name), \
1610     + .mode = S_IRUGO, \
1611     + .ops = { \
1612     + .open = acpi_battery_##_name##_open_fs, \
1613     + .read = seq_read, \
1614     + .llseek = seq_lseek, \
1615     + .release = single_release, \
1616     + .owner = THIS_MODULE, \
1617     + }, \
1618     + }
1619     +
1620     +#define FILE_DESCRIPTION_RW(_name) \
1621     + { \
1622     + .name = __stringify(_name), \
1623     + .mode = S_IFREG | S_IRUGO | S_IWUSR, \
1624     + .ops = { \
1625     + .open = acpi_battery_##_name##_open_fs, \
1626     + .read = seq_read, \
1627     + .llseek = seq_lseek, \
1628     + .write = acpi_battery_write_##_name, \
1629     + .release = single_release, \
1630     + .owner = THIS_MODULE, \
1631     + }, \
1632     + }
1633     +
1634     +static const struct battery_file {
1635     + struct file_operations ops;
1636     + umode_t mode;
1637     + const char *name;
1638     +} acpi_battery_file[] = {
1639     + FILE_DESCRIPTION_RO(info),
1640     + FILE_DESCRIPTION_RO(state),
1641     + FILE_DESCRIPTION_RW(alarm),
1642     +};
1643     +
1644     +#undef FILE_DESCRIPTION_RO
1645     +#undef FILE_DESCRIPTION_RW
1646     +
1647     +static int acpi_battery_add_fs(struct acpi_device *device)
1648     +{
1649     + struct proc_dir_entry *entry = NULL;
1650     + int i;
1651     +
1652     + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
1653     + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
1654     + if (!acpi_device_dir(device)) {
1655     + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
1656     + acpi_battery_dir);
1657     + if (!acpi_device_dir(device))
1658     + return -ENODEV;
1659     + }
1660     +
1661     + for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
1662     + entry = proc_create_data(acpi_battery_file[i].name,
1663     + acpi_battery_file[i].mode,
1664     + acpi_device_dir(device),
1665     + &acpi_battery_file[i].ops,
1666     + acpi_driver_data(device));
1667     + if (!entry)
1668     + return -ENODEV;
1669     + }
1670     + return 0;
1671     +}
1672     +
1673     +static void acpi_battery_remove_fs(struct acpi_device *device)
1674     +{
1675     + int i;
1676     + if (!acpi_device_dir(device))
1677     + return;
1678     + for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
1679     + remove_proc_entry(acpi_battery_file[i].name,
1680     + acpi_device_dir(device));
1681     +
1682     + remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
1683     + acpi_device_dir(device) = NULL;
1684     +}
1685     +
1686     +#endif
1687     +
1688     +/* --------------------------------------------------------------------------
1689     Driver Interface
1690     -------------------------------------------------------------------------- */
1691    
1692     @@ -791,6 +1091,15 @@ static int acpi_battery_add(struct acpi_device *device)
1693     result = acpi_battery_update(battery);
1694     if (result)
1695     goto fail;
1696     +#ifdef CONFIG_ACPI_PROCFS_POWER
1697     + result = acpi_battery_add_fs(device);
1698     +#endif
1699     + if (result) {
1700     +#ifdef CONFIG_ACPI_PROCFS_POWER
1701     + acpi_battery_remove_fs(device);
1702     +#endif
1703     + goto fail;
1704     + }
1705    
1706     printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
1707     ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
1708     @@ -817,6 +1126,9 @@ static int acpi_battery_remove(struct acpi_device *device)
1709     return -EINVAL;
1710     battery = acpi_driver_data(device);
1711     unregister_pm_notifier(&battery->pm_nb);
1712     +#ifdef CONFIG_ACPI_PROCFS_POWER
1713     + acpi_battery_remove_fs(device);
1714     +#endif
1715     sysfs_remove_battery(battery);
1716     mutex_destroy(&battery->lock);
1717     mutex_destroy(&battery->sysfs_lock);
1718     @@ -867,7 +1179,19 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
1719    
1720     if (dmi_check_system(bat_dmi_table))
1721     battery_bix_broken_package = 1;
1722     - acpi_bus_register_driver(&acpi_battery_driver);
1723     +
1724     +#ifdef CONFIG_ACPI_PROCFS_POWER
1725     + acpi_battery_dir = acpi_lock_battery_dir();
1726     + if (!acpi_battery_dir)
1727     + return;
1728     +#endif
1729     + if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
1730     +#ifdef CONFIG_ACPI_PROCFS_POWER
1731     + acpi_unlock_battery_dir(acpi_battery_dir);
1732     +#endif
1733     + return;
1734     + }
1735     + return;
1736     }
1737    
1738     static int __init acpi_battery_init(void)
1739     @@ -879,6 +1203,9 @@ static int __init acpi_battery_init(void)
1740     static void __exit acpi_battery_exit(void)
1741     {
1742     acpi_bus_unregister_driver(&acpi_battery_driver);
1743     +#ifdef CONFIG_ACPI_PROCFS_POWER
1744     + acpi_unlock_battery_dir(acpi_battery_dir);
1745     +#endif
1746     }
1747    
1748     module_init(acpi_battery_init);
1749     diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
1750     index afec4526c48a..3d8413d02a97 100644
1751     --- a/drivers/acpi/blacklist.c
1752     +++ b/drivers/acpi/blacklist.c
1753     @@ -314,6 +314,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
1754     DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
1755     },
1756     },
1757     + {
1758     + .callback = dmi_disable_osi_win8,
1759     + .ident = "Dell Inspiron 7737",
1760     + .matches = {
1761     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1762     + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
1763     + },
1764     + },
1765    
1766     /*
1767     * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
1768     @@ -374,6 +382,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
1769     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
1770     },
1771     },
1772     + /*
1773     + * Without this this EEEpc exports a non working WMI interface, with
1774     + * this it exports a working "good old" eeepc_laptop interface, fixing
1775     + * both brightness control, and rfkill not working.
1776     + */
1777     + {
1778     + .callback = dmi_enable_osi_linux,
1779     + .ident = "Asus EEE PC 1015PX",
1780     + .matches = {
1781     + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
1782     + DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
1783     + },
1784     + },
1785     {}
1786     };
1787    
1788     diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
1789     new file mode 100644
1790     index 000000000000..6c9ee68e46fb
1791     --- /dev/null
1792     +++ b/drivers/acpi/cm_sbs.c
1793     @@ -0,0 +1,105 @@
1794     +/*
1795     + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1796     + *
1797     + * This program is free software; you can redistribute it and/or modify
1798     + * it under the terms of the GNU General Public License as published by
1799     + * the Free Software Foundation; either version 2 of the License, or (at
1800     + * your option) any later version.
1801     + *
1802     + * This program is distributed in the hope that it will be useful, but
1803     + * WITHOUT ANY WARRANTY; without even the implied warranty of
1804     + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1805     + * General Public License for more details.
1806     + *
1807     + * You should have received a copy of the GNU General Public License along
1808     + * with this program; if not, write to the Free Software Foundation, Inc.,
1809     + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
1810     + *
1811     + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1812     + */
1813     +
1814     +#include <linux/kernel.h>
1815     +#include <linux/module.h>
1816     +#include <linux/init.h>
1817     +#include <linux/acpi.h>
1818     +#include <linux/types.h>
1819     +#include <linux/proc_fs.h>
1820     +#include <linux/seq_file.h>
1821     +#include <acpi/acpi_bus.h>
1822     +#include <acpi/acpi_drivers.h>
1823     +
1824     +#define PREFIX "ACPI: "
1825     +
1826     +ACPI_MODULE_NAME("cm_sbs");
1827     +#define ACPI_AC_CLASS "ac_adapter"
1828     +#define ACPI_BATTERY_CLASS "battery"
1829     +#define _COMPONENT ACPI_SBS_COMPONENT
1830     +static struct proc_dir_entry *acpi_ac_dir;
1831     +static struct proc_dir_entry *acpi_battery_dir;
1832     +
1833     +static DEFINE_MUTEX(cm_sbs_mutex);
1834     +
1835     +static int lock_ac_dir_cnt;
1836     +static int lock_battery_dir_cnt;
1837     +
1838     +struct proc_dir_entry *acpi_lock_ac_dir(void)
1839     +{
1840     + mutex_lock(&cm_sbs_mutex);
1841     + if (!acpi_ac_dir)
1842     + acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
1843     + if (acpi_ac_dir) {
1844     + lock_ac_dir_cnt++;
1845     + } else {
1846     + printk(KERN_ERR PREFIX
1847     + "Cannot create %s\n", ACPI_AC_CLASS);
1848     + }
1849     + mutex_unlock(&cm_sbs_mutex);
1850     + return acpi_ac_dir;
1851     +}
1852     +EXPORT_SYMBOL(acpi_lock_ac_dir);
1853     +
1854     +void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
1855     +{
1856     + mutex_lock(&cm_sbs_mutex);
1857     + if (acpi_ac_dir_param)
1858     + lock_ac_dir_cnt--;
1859     + if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
1860     + remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
1861     + acpi_ac_dir = NULL;
1862     + }
1863     + mutex_unlock(&cm_sbs_mutex);
1864     +}
1865     +EXPORT_SYMBOL(acpi_unlock_ac_dir);
1866     +
1867     +struct proc_dir_entry *acpi_lock_battery_dir(void)
1868     +{
1869     + mutex_lock(&cm_sbs_mutex);
1870     + if (!acpi_battery_dir) {
1871     + acpi_battery_dir =
1872     + proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
1873     + }
1874     + if (acpi_battery_dir) {
1875     + lock_battery_dir_cnt++;
1876     + } else {
1877     + printk(KERN_ERR PREFIX
1878     + "Cannot create %s\n", ACPI_BATTERY_CLASS);
1879     + }
1880     + mutex_unlock(&cm_sbs_mutex);
1881     + return acpi_battery_dir;
1882     +}
1883     +EXPORT_SYMBOL(acpi_lock_battery_dir);
1884     +
1885     +void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
1886     +{
1887     + mutex_lock(&cm_sbs_mutex);
1888     + if (acpi_battery_dir_param)
1889     + lock_battery_dir_cnt--;
1890     + if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
1891     + && acpi_battery_dir) {
1892     + remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
1893     + acpi_battery_dir = NULL;
1894     + }
1895     + mutex_unlock(&cm_sbs_mutex);
1896     + return;
1897     +}
1898     +EXPORT_SYMBOL(acpi_unlock_battery_dir);
1899     diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1900     index d7d32c28829b..ad11ba4a412d 100644
1901     --- a/drivers/acpi/ec.c
1902     +++ b/drivers/acpi/ec.c
1903     @@ -206,13 +206,13 @@ unlock:
1904     spin_unlock_irqrestore(&ec->lock, flags);
1905     }
1906    
1907     -static int acpi_ec_sync_query(struct acpi_ec *ec);
1908     +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
1909    
1910     static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
1911     {
1912     if (state & ACPI_EC_FLAG_SCI) {
1913     if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
1914     - return acpi_ec_sync_query(ec);
1915     + return acpi_ec_sync_query(ec, NULL);
1916     }
1917     return 0;
1918     }
1919     @@ -443,10 +443,8 @@ acpi_handle ec_get_handle(void)
1920    
1921     EXPORT_SYMBOL(ec_get_handle);
1922    
1923     -static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
1924     -
1925     /*
1926     - * Clears stale _Q events that might have accumulated in the EC.
1927     + * Process _Q events that might have accumulated in the EC.
1928     * Run with locked ec mutex.
1929     */
1930     static void acpi_ec_clear(struct acpi_ec *ec)
1931     @@ -455,7 +453,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
1932     u8 value = 0;
1933    
1934     for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
1935     - status = acpi_ec_query_unlocked(ec, &value);
1936     + status = acpi_ec_sync_query(ec, &value);
1937     if (status || !value)
1938     break;
1939     }
1940     @@ -582,13 +580,18 @@ static void acpi_ec_run(void *cxt)
1941     kfree(handler);
1942     }
1943    
1944     -static int acpi_ec_sync_query(struct acpi_ec *ec)
1945     +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
1946     {
1947     u8 value = 0;
1948     int status;
1949     struct acpi_ec_query_handler *handler, *copy;
1950     - if ((status = acpi_ec_query_unlocked(ec, &value)))
1951     +
1952     + status = acpi_ec_query_unlocked(ec, &value);
1953     + if (data)
1954     + *data = value;
1955     + if (status)
1956     return status;
1957     +
1958     list_for_each_entry(handler, &ec->list, node) {
1959     if (value == handler->query_bit) {
1960     /* have custom handler for this bit */
1961     @@ -612,7 +615,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
1962     if (!ec)
1963     return;
1964     mutex_lock(&ec->mutex);
1965     - acpi_ec_sync_query(ec);
1966     + acpi_ec_sync_query(ec, NULL);
1967     mutex_unlock(&ec->mutex);
1968     }
1969    
1970     diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
1971     index b6ba88ed31ae..bb0b90461a6b 100644
1972     --- a/drivers/acpi/video.c
1973     +++ b/drivers/acpi/video.c
1974     @@ -459,10 +459,10 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
1975     },
1976     {
1977     .callback = video_set_use_native_backlight,
1978     - .ident = "ThinkPad T430s",
1979     + .ident = "ThinkPad T430 and T430s",
1980     .matches = {
1981     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1982     - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
1983     + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"),
1984     },
1985     },
1986     {
1987     @@ -474,7 +474,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
1988     },
1989     },
1990     {
1991     - .callback = video_set_use_native_backlight,
1992     + .callback = video_set_use_native_backlight,
1993     .ident = "ThinkPad X1 Carbon",
1994     .matches = {
1995     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1996     @@ -494,7 +494,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
1997     .ident = "Dell Inspiron 7520",
1998     .matches = {
1999     DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2000     - DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"),
2001     + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
2002     },
2003     },
2004     {
2005     @@ -507,6 +507,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
2006     },
2007     {
2008     .callback = video_set_use_native_backlight,
2009     + .ident = "Acer Aspire 5742G",
2010     + .matches = {
2011     + DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2012     + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"),
2013     + },
2014     + },
2015     + {
2016     + .callback = video_set_use_native_backlight,
2017     .ident = "Acer Aspire V5-431",
2018     .matches = {
2019     DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2020     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
2021     index 0a79c540169c..bb26636f10c8 100644
2022     --- a/drivers/ata/libata-core.c
2023     +++ b/drivers/ata/libata-core.c
2024     @@ -6329,6 +6329,8 @@ int ata_host_activate(struct ata_host *host, int irq,
2025     static void ata_port_detach(struct ata_port *ap)
2026     {
2027     unsigned long flags;
2028     + struct ata_link *link;
2029     + struct ata_device *dev;
2030    
2031     if (!ap->ops->error_handler)
2032     goto skip_eh;
2033     @@ -6348,6 +6350,13 @@ static void ata_port_detach(struct ata_port *ap)
2034     cancel_delayed_work_sync(&ap->hotplug_task);
2035    
2036     skip_eh:
2037     + /* clean up zpodd on port removal */
2038     + ata_for_each_link(link, ap, HOST_FIRST) {
2039     + ata_for_each_dev(dev, link, ALL) {
2040     + if (zpodd_dev_enabled(dev))
2041     + zpodd_exit(dev);
2042     + }
2043     + }
2044     if (ap->pmp_link) {
2045     int i;
2046     for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
2047     diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
2048     index d63ee8f41a4f..e3a49df958a3 100644
2049     --- a/drivers/ata/pata_at91.c
2050     +++ b/drivers/ata/pata_at91.c
2051     @@ -408,12 +408,13 @@ static int pata_at91_probe(struct platform_device *pdev)
2052    
2053     host->private_data = info;
2054    
2055     - return ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
2056     - gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
2057     - irq_flags, &pata_at91_sht);
2058     + ret = ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
2059     + gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
2060     + irq_flags, &pata_at91_sht);
2061     + if (ret)
2062     + goto err_put;
2063    
2064     - if (!ret)
2065     - return 0;
2066     + return 0;
2067    
2068     err_put:
2069     clk_put(info->mck);
2070     diff --git a/drivers/base/dd.c b/drivers/base/dd.c
2071     index 06051767393f..8a8d611f2021 100644
2072     --- a/drivers/base/dd.c
2073     +++ b/drivers/base/dd.c
2074     @@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
2075     static LIST_HEAD(deferred_probe_pending_list);
2076     static LIST_HEAD(deferred_probe_active_list);
2077     static struct workqueue_struct *deferred_wq;
2078     +static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
2079    
2080     /**
2081     * deferred_probe_work_func() - Retry probing devices in the active list.
2082     @@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;
2083     * This functions moves all devices from the pending list to the active
2084     * list and schedules the deferred probe workqueue to process them. It
2085     * should be called anytime a driver is successfully bound to a device.
2086     + *
2087     + * Note, there is a race condition in multi-threaded probe. In the case where
2088     + * more than one device is probing at the same time, it is possible for one
2089     + * probe to complete successfully while another is about to defer. If the second
2090     + * depends on the first, then it will get put on the pending list after the
2091     + * trigger event has already occured and will be stuck there.
2092     + *
2093     + * The atomic 'deferred_trigger_count' is used to determine if a successful
2094     + * trigger has occurred in the midst of probing a driver. If the trigger count
2095     + * changes in the midst of a probe, then deferred processing should be triggered
2096     + * again.
2097     */
2098     static void driver_deferred_probe_trigger(void)
2099     {
2100     @@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)
2101     * into the active list so they can be retried by the workqueue
2102     */
2103     mutex_lock(&deferred_probe_mutex);
2104     + atomic_inc(&deferred_trigger_count);
2105     list_splice_tail_init(&deferred_probe_pending_list,
2106     &deferred_probe_active_list);
2107     mutex_unlock(&deferred_probe_mutex);
2108     @@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
2109     static int really_probe(struct device *dev, struct device_driver *drv)
2110     {
2111     int ret = 0;
2112     + int local_trigger_count = atomic_read(&deferred_trigger_count);
2113    
2114     atomic_inc(&probe_count);
2115     pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
2116     @@ -310,6 +324,9 @@ probe_failed:
2117     /* Driver requested deferred probing */
2118     dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
2119     driver_deferred_probe_add(dev);
2120     + /* Did a trigger occur while probing? Need to re-trigger if yes */
2121     + if (local_trigger_count != atomic_read(&deferred_trigger_count))
2122     + driver_deferred_probe_trigger();
2123     } else if (ret != -ENODEV && ret != -ENXIO) {
2124     /* driver matched but the probe failed */
2125     printk(KERN_WARNING
2126     diff --git a/drivers/base/platform.c b/drivers/base/platform.c
2127     index bc78848dd59a..3c51eb0bd659 100644
2128     --- a/drivers/base/platform.c
2129     +++ b/drivers/base/platform.c
2130     @@ -13,6 +13,7 @@
2131     #include <linux/string.h>
2132     #include <linux/platform_device.h>
2133     #include <linux/of_device.h>
2134     +#include <linux/of_irq.h>
2135     #include <linux/module.h>
2136     #include <linux/init.h>
2137     #include <linux/dma-mapping.h>
2138     @@ -87,7 +88,11 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
2139     return -ENXIO;
2140     return dev->archdata.irqs[num];
2141     #else
2142     - struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
2143     + struct resource *r;
2144     + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
2145     + return of_irq_get(dev->dev.of_node, num);
2146     +
2147     + r = platform_get_resource(dev, IORESOURCE_IRQ, num);
2148    
2149     return r ? r->start : -ENXIO;
2150     #endif
2151     diff --git a/drivers/base/topology.c b/drivers/base/topology.c
2152     index 94ffee378f10..37a5661ca5f9 100644
2153     --- a/drivers/base/topology.c
2154     +++ b/drivers/base/topology.c
2155     @@ -40,8 +40,7 @@
2156     static ssize_t show_##name(struct device *dev, \
2157     struct device_attribute *attr, char *buf) \
2158     { \
2159     - unsigned int cpu = dev->id; \
2160     - return sprintf(buf, "%d\n", topology_##name(cpu)); \
2161     + return sprintf(buf, "%d\n", topology_##name(dev->id)); \
2162     }
2163    
2164     #if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \
2165     diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
2166     index 106d1d8e16ad..b11949c5b029 100644
2167     --- a/drivers/bluetooth/ath3k.c
2168     +++ b/drivers/bluetooth/ath3k.c
2169     @@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
2170     { USB_DEVICE(0x04CA, 0x3004) },
2171     { USB_DEVICE(0x04CA, 0x3005) },
2172     { USB_DEVICE(0x04CA, 0x3006) },
2173     + { USB_DEVICE(0x04CA, 0x3007) },
2174     { USB_DEVICE(0x04CA, 0x3008) },
2175     { USB_DEVICE(0x04CA, 0x300b) },
2176     { USB_DEVICE(0x13d3, 0x3362) },
2177     @@ -127,6 +128,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
2178     { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
2179     { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
2180     { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
2181     + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
2182     { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
2183     { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
2184     { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
2185     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
2186     index baeaaed299e4..1c7b5040d921 100644
2187     --- a/drivers/bluetooth/btusb.c
2188     +++ b/drivers/bluetooth/btusb.c
2189     @@ -149,6 +149,7 @@ static const struct usb_device_id blacklist_table[] = {
2190     { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
2191     { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
2192     { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
2193     + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
2194     { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
2195     { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
2196     { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
2197     @@ -1478,10 +1479,8 @@ static int btusb_probe(struct usb_interface *intf,
2198     if (id->driver_info & BTUSB_BCM92035)
2199     hdev->setup = btusb_setup_bcm92035;
2200    
2201     - if (id->driver_info & BTUSB_INTEL) {
2202     - usb_enable_autosuspend(data->udev);
2203     + if (id->driver_info & BTUSB_INTEL)
2204     hdev->setup = btusb_setup_intel;
2205     - }
2206    
2207     /* Interface numbers are hardcoded in the specification */
2208     data->isoc = usb_ifnum_to_if(data->udev, 1);
2209     diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
2210     index 725c46162bbd..372ae72cce34 100644
2211     --- a/drivers/bus/mvebu-mbus.c
2212     +++ b/drivers/bus/mvebu-mbus.c
2213     @@ -222,12 +222,6 @@ static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
2214     */
2215     if ((u64)base < wend && end > wbase)
2216     return 0;
2217     -
2218     - /*
2219     - * Check if target/attribute conflicts
2220     - */
2221     - if (target == wtarget && attr == wattr)
2222     - return 0;
2223     }
2224    
2225     return 1;
2226     diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
2227     index 6a4bdc18955a..8c25f596808a 100644
2228     --- a/drivers/char/ipmi/ipmi_kcs_sm.c
2229     +++ b/drivers/char/ipmi/ipmi_kcs_sm.c
2230     @@ -251,8 +251,9 @@ static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
2231     if (!GET_STATUS_OBF(status)) {
2232     kcs->obf_timeout -= time;
2233     if (kcs->obf_timeout < 0) {
2234     - start_error_recovery(kcs, "OBF not ready in time");
2235     - return 1;
2236     + kcs->obf_timeout = OBF_RETRY_TIMEOUT;
2237     + start_error_recovery(kcs, "OBF not ready in time");
2238     + return 1;
2239     }
2240     return 0;
2241     }
2242     diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
2243     index 03f41896d090..8b4fa2c4e661 100644
2244     --- a/drivers/char/ipmi/ipmi_si_intf.c
2245     +++ b/drivers/char/ipmi/ipmi_si_intf.c
2246     @@ -249,6 +249,9 @@ struct smi_info {
2247     /* The timer for this si. */
2248     struct timer_list si_timer;
2249    
2250     + /* This flag is set, if the timer is running (timer_pending() isn't enough) */
2251     + bool timer_running;
2252     +
2253     /* The time (in jiffies) the last timeout occurred at. */
2254     unsigned long last_timeout_jiffies;
2255    
2256     @@ -435,6 +438,13 @@ static void start_clear_flags(struct smi_info *smi_info)
2257     smi_info->si_state = SI_CLEARING_FLAGS;
2258     }
2259    
2260     +static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
2261     +{
2262     + smi_info->last_timeout_jiffies = jiffies;
2263     + mod_timer(&smi_info->si_timer, new_val);
2264     + smi_info->timer_running = true;
2265     +}
2266     +
2267     /*
2268     * When we have a situtaion where we run out of memory and cannot
2269     * allocate messages, we just leave them in the BMC and run the system
2270     @@ -447,8 +457,7 @@ static inline void disable_si_irq(struct smi_info *smi_info)
2271     start_disable_irq(smi_info);
2272     smi_info->interrupt_disabled = 1;
2273     if (!atomic_read(&smi_info->stop_operation))
2274     - mod_timer(&smi_info->si_timer,
2275     - jiffies + SI_TIMEOUT_JIFFIES);
2276     + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
2277     }
2278     }
2279    
2280     @@ -908,15 +917,7 @@ static void sender(void *send_info,
2281     list_add_tail(&msg->link, &smi_info->xmit_msgs);
2282    
2283     if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
2284     - /*
2285     - * last_timeout_jiffies is updated here to avoid
2286     - * smi_timeout() handler passing very large time_diff
2287     - * value to smi_event_handler() that causes
2288     - * the send command to abort.
2289     - */
2290     - smi_info->last_timeout_jiffies = jiffies;
2291     -
2292     - mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
2293     + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
2294    
2295     if (smi_info->thread)
2296     wake_up_process(smi_info->thread);
2297     @@ -1005,6 +1006,17 @@ static int ipmi_thread(void *data)
2298    
2299     spin_lock_irqsave(&(smi_info->si_lock), flags);
2300     smi_result = smi_event_handler(smi_info, 0);
2301     +
2302     + /*
2303     + * If the driver is doing something, there is a possible
2304     + * race with the timer. If the timer handler see idle,
2305     + * and the thread here sees something else, the timer
2306     + * handler won't restart the timer even though it is
2307     + * required. So start it here if necessary.
2308     + */
2309     + if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
2310     + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
2311     +
2312     spin_unlock_irqrestore(&(smi_info->si_lock), flags);
2313     busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
2314     &busy_until);
2315     @@ -1074,10 +1086,6 @@ static void smi_timeout(unsigned long data)
2316     * SI_USEC_PER_JIFFY);
2317     smi_result = smi_event_handler(smi_info, time_diff);
2318    
2319     - spin_unlock_irqrestore(&(smi_info->si_lock), flags);
2320     -
2321     - smi_info->last_timeout_jiffies = jiffies_now;
2322     -
2323     if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
2324     /* Running with interrupts, only do long timeouts. */
2325     timeout = jiffies + SI_TIMEOUT_JIFFIES;
2326     @@ -1099,7 +1107,10 @@ static void smi_timeout(unsigned long data)
2327    
2328     do_mod_timer:
2329     if (smi_result != SI_SM_IDLE)
2330     - mod_timer(&(smi_info->si_timer), timeout);
2331     + smi_mod_timer(smi_info, timeout);
2332     + else
2333     + smi_info->timer_running = false;
2334     + spin_unlock_irqrestore(&(smi_info->si_lock), flags);
2335     }
2336    
2337     static irqreturn_t si_irq_handler(int irq, void *data)
2338     @@ -1147,8 +1158,7 @@ static int smi_start_processing(void *send_info,
2339    
2340     /* Set up the timer that drives the interface. */
2341     setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
2342     - new_smi->last_timeout_jiffies = jiffies;
2343     - mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
2344     + smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
2345    
2346     /*
2347     * Check if the user forcefully enabled the daemon.
2348     diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
2349     index b3ea223585bd..61dcc8011ec7 100644
2350     --- a/drivers/char/tpm/tpm_ppi.c
2351     +++ b/drivers/char/tpm/tpm_ppi.c
2352     @@ -328,13 +328,11 @@ int tpm_add_ppi(struct kobject *parent)
2353     /* Cache TPM ACPI handle and version string */
2354     acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
2355     ppi_callback, NULL, NULL, &tpm_ppi_handle);
2356     - if (tpm_ppi_handle == NULL)
2357     - return -ENODEV;
2358     -
2359     - return sysfs_create_group(parent, &ppi_attr_grp);
2360     + return tpm_ppi_handle ? sysfs_create_group(parent, &ppi_attr_grp) : 0;
2361     }
2362    
2363     void tpm_remove_ppi(struct kobject *parent)
2364     {
2365     - sysfs_remove_group(parent, &ppi_attr_grp);
2366     + if (tpm_ppi_handle)
2367     + sysfs_remove_group(parent, &ppi_attr_grp);
2368     }
2369     diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
2370     index c42e608af6bb..b94a311e5ab6 100644
2371     --- a/drivers/clk/clk.c
2372     +++ b/drivers/clk/clk.c
2373     @@ -1977,9 +1977,28 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
2374     }
2375     EXPORT_SYMBOL_GPL(__clk_register);
2376    
2377     -static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
2378     +/**
2379     + * clk_register - allocate a new clock, register it and return an opaque cookie
2380     + * @dev: device that is registering this clock
2381     + * @hw: link to hardware-specific clock data
2382     + *
2383     + * clk_register is the primary interface for populating the clock tree with new
2384     + * clock nodes. It returns a pointer to the newly allocated struct clk which
2385     + * cannot be dereferenced by driver code but may be used in conjuction with the
2386     + * rest of the clock API. In the event of an error clk_register will return an
2387     + * error code; drivers must test for an error code after calling clk_register.
2388     + */
2389     +struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2390     {
2391     int i, ret;
2392     + struct clk *clk;
2393     +
2394     + clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2395     + if (!clk) {
2396     + pr_err("%s: could not allocate clk\n", __func__);
2397     + ret = -ENOMEM;
2398     + goto fail_out;
2399     + }
2400    
2401     clk->name = kstrdup(hw->init->name, GFP_KERNEL);
2402     if (!clk->name) {
2403     @@ -2019,7 +2038,7 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
2404    
2405     ret = __clk_init(dev, clk);
2406     if (!ret)
2407     - return 0;
2408     + return clk;
2409    
2410     fail_parent_names_copy:
2411     while (--i >= 0)
2412     @@ -2028,36 +2047,6 @@ fail_parent_names_copy:
2413     fail_parent_names:
2414     kfree(clk->name);
2415     fail_name:
2416     - return ret;
2417     -}
2418     -
2419     -/**
2420     - * clk_register - allocate a new clock, register it and return an opaque cookie
2421     - * @dev: device that is registering this clock
2422     - * @hw: link to hardware-specific clock data
2423     - *
2424     - * clk_register is the primary interface for populating the clock tree with new
2425     - * clock nodes. It returns a pointer to the newly allocated struct clk which
2426     - * cannot be dereferenced by driver code but may be used in conjuction with the
2427     - * rest of the clock API. In the event of an error clk_register will return an
2428     - * error code; drivers must test for an error code after calling clk_register.
2429     - */
2430     -struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2431     -{
2432     - int ret;
2433     - struct clk *clk;
2434     -
2435     - clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2436     - if (!clk) {
2437     - pr_err("%s: could not allocate clk\n", __func__);
2438     - ret = -ENOMEM;
2439     - goto fail_out;
2440     - }
2441     -
2442     - ret = _clk_register(dev, hw, clk);
2443     - if (!ret)
2444     - return clk;
2445     -
2446     kfree(clk);
2447     fail_out:
2448     return ERR_PTR(ret);
2449     @@ -2144,9 +2133,10 @@ void clk_unregister(struct clk *clk)
2450    
2451     if (!hlist_empty(&clk->children)) {
2452     struct clk *child;
2453     + struct hlist_node *t;
2454    
2455     /* Reparent all children to the orphan list. */
2456     - hlist_for_each_entry(child, &clk->children, child_node)
2457     + hlist_for_each_entry_safe(child, t, &clk->children, child_node)
2458     clk_set_parent(child, NULL);
2459     }
2460    
2461     @@ -2166,7 +2156,7 @@ EXPORT_SYMBOL_GPL(clk_unregister);
2462    
2463     static void devm_clk_release(struct device *dev, void *res)
2464     {
2465     - clk_unregister(res);
2466     + clk_unregister(*(struct clk **)res);
2467     }
2468    
2469     /**
2470     @@ -2181,18 +2171,18 @@ static void devm_clk_release(struct device *dev, void *res)
2471     struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2472     {
2473     struct clk *clk;
2474     - int ret;
2475     + struct clk **clkp;
2476    
2477     - clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
2478     - if (!clk)
2479     + clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2480     + if (!clkp)
2481     return ERR_PTR(-ENOMEM);
2482    
2483     - ret = _clk_register(dev, hw, clk);
2484     - if (!ret) {
2485     - devres_add(dev, clk);
2486     + clk = clk_register(dev, hw);
2487     + if (!IS_ERR(clk)) {
2488     + *clkp = clk;
2489     + devres_add(dev, clkp);
2490     } else {
2491     - devres_free(clk);
2492     - clk = ERR_PTR(ret);
2493     + devres_free(clkp);
2494     }
2495    
2496     return clk;
2497     diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
2498     index 0d20241e0770..e1769addf435 100644
2499     --- a/drivers/clk/tegra/clk-pll.c
2500     +++ b/drivers/clk/tegra/clk-pll.c
2501     @@ -1718,7 +1718,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
2502     "pll_re_vco");
2503     } else {
2504     val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
2505     - pll_writel(val, pll_params->aux_reg, pll);
2506     + pll_writel(val_aux, pll_params->aux_reg, pll);
2507     }
2508    
2509     clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
2510     diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
2511     index 2dc8b41a339d..a535c7bf8574 100644
2512     --- a/drivers/clk/versatile/clk-vexpress-osc.c
2513     +++ b/drivers/clk/versatile/clk-vexpress-osc.c
2514     @@ -102,7 +102,7 @@ void __init vexpress_osc_of_setup(struct device_node *node)
2515    
2516     osc = kzalloc(sizeof(*osc), GFP_KERNEL);
2517     if (!osc)
2518     - goto error;
2519     + return;
2520    
2521     osc->func = vexpress_config_func_get_by_node(node);
2522     if (!osc->func) {
2523     diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
2524     index 48f76bc05da0..e252939b9ee1 100644
2525     --- a/drivers/clocksource/exynos_mct.c
2526     +++ b/drivers/clocksource/exynos_mct.c
2527     @@ -418,8 +418,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
2528     evt->set_mode = exynos4_tick_set_mode;
2529     evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
2530     evt->rating = 450;
2531     - clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
2532     - 0xf, 0x7fffffff);
2533    
2534     exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
2535    
2536     @@ -432,9 +430,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
2537     evt->irq);
2538     return -EIO;
2539     }
2540     + irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
2541     } else {
2542     enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
2543     }
2544     + clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
2545     + 0xf, 0x7fffffff);
2546    
2547     return 0;
2548     }
2549     @@ -452,7 +453,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
2550     unsigned long action, void *hcpu)
2551     {
2552     struct mct_clock_event_device *mevt;
2553     - unsigned int cpu;
2554    
2555     /*
2556     * Grab cpu pointer in each case to avoid spurious
2557     @@ -463,12 +463,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
2558     mevt = this_cpu_ptr(&percpu_mct_tick);
2559     exynos4_local_timer_setup(&mevt->evt);
2560     break;
2561     - case CPU_ONLINE:
2562     - cpu = (unsigned long)hcpu;
2563     - if (mct_int_type == MCT_INT_SPI)
2564     - irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
2565     - cpumask_of(cpu));
2566     - break;
2567     case CPU_DYING:
2568     mevt = this_cpu_ptr(&percpu_mct_tick);
2569     exynos4_local_timer_stop(&mevt->evt);
2570     diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
2571     index 2cd36b9297f3..9ac378380677 100644
2572     --- a/drivers/cpufreq/intel_pstate.c
2573     +++ b/drivers/cpufreq/intel_pstate.c
2574     @@ -37,6 +37,7 @@
2575     #define BYT_RATIOS 0x66a
2576     #define BYT_VIDS 0x66b
2577     #define BYT_TURBO_RATIOS 0x66c
2578     +#define BYT_TURBO_VIDS 0x66d
2579    
2580    
2581     #define FRAC_BITS 6
2582     @@ -70,8 +71,9 @@ struct pstate_data {
2583     };
2584    
2585     struct vid_data {
2586     - int32_t min;
2587     - int32_t max;
2588     + int min;
2589     + int max;
2590     + int turbo;
2591     int32_t ratio;
2592     };
2593    
2594     @@ -360,14 +362,14 @@ static int byt_get_min_pstate(void)
2595     {
2596     u64 value;
2597     rdmsrl(BYT_RATIOS, value);
2598     - return (value >> 8) & 0xFF;
2599     + return (value >> 8) & 0x3F;
2600     }
2601    
2602     static int byt_get_max_pstate(void)
2603     {
2604     u64 value;
2605     rdmsrl(BYT_RATIOS, value);
2606     - return (value >> 16) & 0xFF;
2607     + return (value >> 16) & 0x3F;
2608     }
2609    
2610     static int byt_get_turbo_pstate(void)
2611     @@ -394,6 +396,9 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
2612     vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
2613     vid = fp_toint(vid_fp);
2614    
2615     + if (pstate > cpudata->pstate.max_pstate)
2616     + vid = cpudata->vid.turbo;
2617     +
2618     val |= vid;
2619    
2620     wrmsrl(MSR_IA32_PERF_CTL, val);
2621     @@ -403,13 +408,17 @@ static void byt_get_vid(struct cpudata *cpudata)
2622     {
2623     u64 value;
2624    
2625     +
2626     rdmsrl(BYT_VIDS, value);
2627     - cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
2628     - cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
2629     + cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
2630     + cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
2631     cpudata->vid.ratio = div_fp(
2632     cpudata->vid.max - cpudata->vid.min,
2633     int_tofp(cpudata->pstate.max_pstate -
2634     cpudata->pstate.min_pstate));
2635     +
2636     + rdmsrl(BYT_TURBO_VIDS, value);
2637     + cpudata->vid.turbo = value & 0x7f;
2638     }
2639    
2640    
2641     @@ -546,12 +555,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
2642    
2643     if (pstate_funcs.get_vid)
2644     pstate_funcs.get_vid(cpu);
2645     -
2646     - /*
2647     - * goto max pstate so we don't slow up boot if we are built-in if we are
2648     - * a module we will take care of it during normal operation
2649     - */
2650     - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
2651     + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
2652     }
2653    
2654     static inline void intel_pstate_calc_busy(struct cpudata *cpu,
2655     @@ -697,11 +701,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
2656     cpu = all_cpu_data[cpunum];
2657    
2658     intel_pstate_get_cpu_pstates(cpu);
2659     - if (!cpu->pstate.current_pstate) {
2660     - all_cpu_data[cpunum] = NULL;
2661     - kfree(cpu);
2662     - return -ENODATA;
2663     - }
2664    
2665     cpu->cpu = cpunum;
2666    
2667     @@ -712,7 +711,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
2668     cpu->timer.expires = jiffies + HZ/100;
2669     intel_pstate_busy_pid_reset(cpu);
2670     intel_pstate_sample(cpu);
2671     - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
2672    
2673     add_timer_on(&cpu->timer, cpunum);
2674    
2675     diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
2676     index 8dead6fb28e8..7231967f51b4 100644
2677     --- a/drivers/cpufreq/loongson2_cpufreq.c
2678     +++ b/drivers/cpufreq/loongson2_cpufreq.c
2679     @@ -62,7 +62,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
2680     set_cpus_allowed_ptr(current, &cpus_allowed);
2681    
2682     /* setting the cpu frequency */
2683     - clk_set_rate(policy->clk, freq);
2684     + clk_set_rate(policy->clk, freq * 1000);
2685    
2686     return 0;
2687     }
2688     @@ -92,7 +92,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
2689     i++)
2690     loongson2_clockmod_table[i].frequency = (rate * i) / 8;
2691    
2692     - ret = clk_set_rate(cpuclk, rate);
2693     + ret = clk_set_rate(cpuclk, rate * 1000);
2694     if (ret) {
2695     clk_put(cpuclk);
2696     return ret;
2697     diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
2698     index 9f25f5296029..0eabd81e1a90 100644
2699     --- a/drivers/crypto/caam/error.c
2700     +++ b/drivers/crypto/caam/error.c
2701     @@ -16,9 +16,13 @@
2702     char *tmp; \
2703     \
2704     tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
2705     - sprintf(tmp, format, param); \
2706     - strcat(str, tmp); \
2707     - kfree(tmp); \
2708     + if (likely(tmp)) { \
2709     + sprintf(tmp, format, param); \
2710     + strcat(str, tmp); \
2711     + kfree(tmp); \
2712     + } else { \
2713     + strcat(str, "kmalloc failure in SPRINTFCAT"); \
2714     + } \
2715     }
2716    
2717     static void report_jump_idx(u32 status, char *outstr)
2718     diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
2719     index ed610b497518..a4068e2d2b5d 100644
2720     --- a/drivers/dma/dmaengine.c
2721     +++ b/drivers/dma/dmaengine.c
2722     @@ -1014,6 +1014,7 @@ static void dmaengine_unmap(struct kref *kref)
2723     dma_unmap_page(dev, unmap->addr[i], unmap->len,
2724     DMA_BIDIRECTIONAL);
2725     }
2726     + cnt = unmap->map_cnt;
2727     mempool_free(unmap, __get_unmap_pool(cnt)->pool);
2728     }
2729    
2730     @@ -1079,6 +1080,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
2731     memset(unmap, 0, sizeof(*unmap));
2732     kref_init(&unmap->kref);
2733     unmap->dev = dev;
2734     + unmap->map_cnt = nr;
2735    
2736     return unmap;
2737     }
2738     diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
2739     index 13ac3f240e79..01a200cd0189 100644
2740     --- a/drivers/dma/dw/core.c
2741     +++ b/drivers/dma/dw/core.c
2742     @@ -1545,11 +1545,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
2743     /* Disable BLOCK interrupts as well */
2744     channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
2745    
2746     - err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
2747     - IRQF_SHARED, "dw_dmac", dw);
2748     - if (err)
2749     - return err;
2750     -
2751     /* Create a pool of consistent memory blocks for hardware descriptors */
2752     dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
2753     sizeof(struct dw_desc), 4, 0);
2754     @@ -1560,6 +1555,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
2755    
2756     tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
2757    
2758     + err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
2759     + "dw_dmac", dw);
2760     + if (err)
2761     + return err;
2762     +
2763     INIT_LIST_HEAD(&dw->dma.channels);
2764     for (i = 0; i < nr_channels; i++) {
2765     struct dw_dma_chan *dwc = &dw->chan[i];
2766     @@ -1664,6 +1664,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
2767     dw_dma_off(dw);
2768     dma_async_device_unregister(&dw->dma);
2769    
2770     + free_irq(chip->irq, dw);
2771     tasklet_kill(&dw->tasklet);
2772    
2773     list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
2774     diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
2775     index 766b68ed505c..394cbc5c93e3 100644
2776     --- a/drivers/dma/mv_xor.c
2777     +++ b/drivers/dma/mv_xor.c
2778     @@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
2779    
2780     static void mv_chan_activate(struct mv_xor_chan *chan)
2781     {
2782     - u32 activation;
2783     -
2784     dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
2785     - activation = readl_relaxed(XOR_ACTIVATION(chan));
2786     - activation |= 0x1;
2787     - writel_relaxed(activation, XOR_ACTIVATION(chan));
2788     +
2789     + /* writel ensures all descriptors are flushed before activation */
2790     + writel(BIT(0), XOR_ACTIVATION(chan));
2791     }
2792    
2793     static char mv_chan_is_busy(struct mv_xor_chan *chan)
2794     diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
2795     index ec7bb0fc71bc..9debd6e74439 100644
2796     --- a/drivers/gpu/drm/i915/i915_drv.c
2797     +++ b/drivers/gpu/drm/i915/i915_drv.c
2798     @@ -614,15 +614,20 @@ static void intel_resume_hotplug(struct drm_device *dev)
2799     drm_helper_hpd_irq_event(dev);
2800     }
2801    
2802     +static int i915_drm_thaw_early(struct drm_device *dev)
2803     +{
2804     + intel_uncore_early_sanitize(dev);
2805     + intel_uncore_sanitize(dev);
2806     + intel_power_domains_init_hw(dev);
2807     +
2808     + return 0;
2809     +}
2810     +
2811     static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
2812     {
2813     struct drm_i915_private *dev_priv = dev->dev_private;
2814     int error = 0;
2815    
2816     - intel_uncore_early_sanitize(dev);
2817     -
2818     - intel_uncore_sanitize(dev);
2819     -
2820     if (drm_core_check_feature(dev, DRIVER_MODESET) &&
2821     restore_gtt_mappings) {
2822     mutex_lock(&dev->struct_mutex);
2823     @@ -630,8 +635,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
2824     mutex_unlock(&dev->struct_mutex);
2825     }
2826    
2827     - intel_power_domains_init_hw(dev);
2828     -
2829     i915_restore_state(dev);
2830     intel_opregion_setup(dev);
2831    
2832     @@ -700,19 +703,33 @@ static int i915_drm_thaw(struct drm_device *dev)
2833     return __i915_drm_thaw(dev, true);
2834     }
2835    
2836     -int i915_resume(struct drm_device *dev)
2837     +static int i915_resume_early(struct drm_device *dev)
2838     {
2839     - struct drm_i915_private *dev_priv = dev->dev_private;
2840     - int ret;
2841     -
2842     if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2843     return 0;
2844    
2845     + /*
2846     + * We have a resume ordering issue with the snd-hda driver also
2847     + * requiring our device to be power up. Due to the lack of a
2848     + * parent/child relationship we currently solve this with an early
2849     + * resume hook.
2850     + *
2851     + * FIXME: This should be solved with a special hdmi sink device or
2852     + * similar so that power domains can be employed.
2853     + */
2854     if (pci_enable_device(dev->pdev))
2855     return -EIO;
2856    
2857     pci_set_master(dev->pdev);
2858    
2859     + return i915_drm_thaw_early(dev);
2860     +}
2861     +
2862     +int i915_resume(struct drm_device *dev)
2863     +{
2864     + struct drm_i915_private *dev_priv = dev->dev_private;
2865     + int ret;
2866     +
2867     /*
2868     * Platforms with opregion should have sane BIOS, older ones (gen3 and
2869     * earlier) need to restore the GTT mappings since the BIOS might clear
2870     @@ -726,6 +743,14 @@ int i915_resume(struct drm_device *dev)
2871     return 0;
2872     }
2873    
2874     +static int i915_resume_legacy(struct drm_device *dev)
2875     +{
2876     + i915_resume_early(dev);
2877     + i915_resume(dev);
2878     +
2879     + return 0;
2880     +}
2881     +
2882     /**
2883     * i915_reset - reset chip after a hang
2884     * @dev: drm device to reset
2885     @@ -846,7 +871,6 @@ static int i915_pm_suspend(struct device *dev)
2886     {
2887     struct pci_dev *pdev = to_pci_dev(dev);
2888     struct drm_device *drm_dev = pci_get_drvdata(pdev);
2889     - int error;
2890    
2891     if (!drm_dev || !drm_dev->dev_private) {
2892     dev_err(dev, "DRM not initialized, aborting suspend.\n");
2893     @@ -856,9 +880,25 @@ static int i915_pm_suspend(struct device *dev)
2894     if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2895     return 0;
2896    
2897     - error = i915_drm_freeze(drm_dev);
2898     - if (error)
2899     - return error;
2900     + return i915_drm_freeze(drm_dev);
2901     +}
2902     +
2903     +static int i915_pm_suspend_late(struct device *dev)
2904     +{
2905     + struct pci_dev *pdev = to_pci_dev(dev);
2906     + struct drm_device *drm_dev = pci_get_drvdata(pdev);
2907     +
2908     + /*
2909     + * We have a suspedn ordering issue with the snd-hda driver also
2910     + * requiring our device to be power up. Due to the lack of a
2911     + * parent/child relationship we currently solve this with an late
2912     + * suspend hook.
2913     + *
2914     + * FIXME: This should be solved with a special hdmi sink device or
2915     + * similar so that power domains can be employed.
2916     + */
2917     + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2918     + return 0;
2919    
2920     pci_disable_device(pdev);
2921     pci_set_power_state(pdev, PCI_D3hot);
2922     @@ -866,6 +906,14 @@ static int i915_pm_suspend(struct device *dev)
2923     return 0;
2924     }
2925    
2926     +static int i915_pm_resume_early(struct device *dev)
2927     +{
2928     + struct pci_dev *pdev = to_pci_dev(dev);
2929     + struct drm_device *drm_dev = pci_get_drvdata(pdev);
2930     +
2931     + return i915_resume_early(drm_dev);
2932     +}
2933     +
2934     static int i915_pm_resume(struct device *dev)
2935     {
2936     struct pci_dev *pdev = to_pci_dev(dev);
2937     @@ -887,6 +935,14 @@ static int i915_pm_freeze(struct device *dev)
2938     return i915_drm_freeze(drm_dev);
2939     }
2940    
2941     +static int i915_pm_thaw_early(struct device *dev)
2942     +{
2943     + struct pci_dev *pdev = to_pci_dev(dev);
2944     + struct drm_device *drm_dev = pci_get_drvdata(pdev);
2945     +
2946     + return i915_drm_thaw_early(drm_dev);
2947     +}
2948     +
2949     static int i915_pm_thaw(struct device *dev)
2950     {
2951     struct pci_dev *pdev = to_pci_dev(dev);
2952     @@ -948,10 +1004,14 @@ static int i915_runtime_resume(struct device *device)
2953    
2954     static const struct dev_pm_ops i915_pm_ops = {
2955     .suspend = i915_pm_suspend,
2956     + .suspend_late = i915_pm_suspend_late,
2957     + .resume_early = i915_pm_resume_early,
2958     .resume = i915_pm_resume,
2959     .freeze = i915_pm_freeze,
2960     + .thaw_early = i915_pm_thaw_early,
2961     .thaw = i915_pm_thaw,
2962     .poweroff = i915_pm_poweroff,
2963     + .restore_early = i915_pm_resume_early,
2964     .restore = i915_pm_resume,
2965     .runtime_suspend = i915_runtime_suspend,
2966     .runtime_resume = i915_runtime_resume,
2967     @@ -994,7 +1054,7 @@ static struct drm_driver driver = {
2968    
2969     /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
2970     .suspend = i915_suspend,
2971     - .resume = i915_resume,
2972     + .resume = i915_resume_legacy,
2973    
2974     .device_is_agp = i915_driver_device_is_agp,
2975     .master_create = i915_master_create,
2976     diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
2977     index 00c836154725..3ecb332e7cfa 100644
2978     --- a/drivers/gpu/drm/i915/i915_gem.c
2979     +++ b/drivers/gpu/drm/i915/i915_gem.c
2980     @@ -3529,7 +3529,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2981     {
2982     struct drm_device *dev = obj->base.dev;
2983     drm_i915_private_t *dev_priv = dev->dev_private;
2984     - struct i915_vma *vma;
2985     + struct i915_vma *vma, *next;
2986     int ret;
2987    
2988     if (obj->cache_level == cache_level)
2989     @@ -3540,7 +3540,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2990     return -EBUSY;
2991     }
2992    
2993     - list_for_each_entry(vma, &obj->vma_list, vma_link) {
2994     + list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
2995     if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
2996     ret = i915_vma_unbind(vma);
2997     if (ret)
2998     diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
2999     index d554169ac592..40504504c9e2 100644
3000     --- a/drivers/gpu/drm/i915/i915_irq.c
3001     +++ b/drivers/gpu/drm/i915/i915_irq.c
3002     @@ -1252,10 +1252,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
3003     spin_lock(&dev_priv->irq_lock);
3004     for (i = 1; i < HPD_NUM_PINS; i++) {
3005    
3006     - WARN_ONCE(hpd[i] & hotplug_trigger &&
3007     - dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
3008     - "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
3009     - hotplug_trigger, i, hpd[i]);
3010     + if (hpd[i] & hotplug_trigger &&
3011     + dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
3012     + /*
3013     + * On GMCH platforms the interrupt mask bits only
3014     + * prevent irq generation, not the setting of the
3015     + * hotplug bits itself. So only WARN about unexpected
3016     + * interrupts on saner platforms.
3017     + */
3018     + WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
3019     + "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
3020     + hotplug_trigger, i, hpd[i]);
3021     +
3022     + continue;
3023     + }
3024    
3025     if (!(hpd[i] & hotplug_trigger) ||
3026     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
3027     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
3028     index 963639d9049b..9d4d837506d0 100644
3029     --- a/drivers/gpu/drm/i915/intel_display.c
3030     +++ b/drivers/gpu/drm/i915/intel_display.c
3031     @@ -9354,11 +9354,22 @@ intel_pipe_config_compare(struct drm_device *dev,
3032     PIPE_CONF_CHECK_I(pipe_src_w);
3033     PIPE_CONF_CHECK_I(pipe_src_h);
3034    
3035     - PIPE_CONF_CHECK_I(gmch_pfit.control);
3036     - /* pfit ratios are autocomputed by the hw on gen4+ */
3037     - if (INTEL_INFO(dev)->gen < 4)
3038     - PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
3039     - PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
3040     + /*
3041     + * FIXME: BIOS likes to set up a cloned config with lvds+external
3042     + * screen. Since we don't yet re-compute the pipe config when moving
3043     + * just the lvds port away to another pipe the sw tracking won't match.
3044     + *
3045     + * Proper atomic modesets with recomputed global state will fix this.
3046     + * Until then just don't check gmch state for inherited modes.
3047     + */
3048     + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
3049     + PIPE_CONF_CHECK_I(gmch_pfit.control);
3050     + /* pfit ratios are autocomputed by the hw on gen4+ */
3051     + if (INTEL_INFO(dev)->gen < 4)
3052     + PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
3053     + PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
3054     + }
3055     +
3056     PIPE_CONF_CHECK_I(pch_pfit.enabled);
3057     if (current_config->pch_pfit.enabled) {
3058     PIPE_CONF_CHECK_I(pch_pfit.pos);
3059     @@ -10851,6 +10862,9 @@ static struct intel_quirk intel_quirks[] = {
3060     /* Acer Aspire 4736Z */
3061     { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
3062    
3063     + /* Acer Aspire 5336 */
3064     + { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
3065     +
3066     /* Dell XPS13 HD Sandy Bridge */
3067     { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
3068     /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
3069     @@ -10978,15 +10992,6 @@ void intel_modeset_init(struct drm_device *dev)
3070     intel_disable_fbc(dev);
3071     }
3072    
3073     -static void
3074     -intel_connector_break_all_links(struct intel_connector *connector)
3075     -{
3076     - connector->base.dpms = DRM_MODE_DPMS_OFF;
3077     - connector->base.encoder = NULL;
3078     - connector->encoder->connectors_active = false;
3079     - connector->encoder->base.crtc = NULL;
3080     -}
3081     -
3082     static void intel_enable_pipe_a(struct drm_device *dev)
3083     {
3084     struct intel_connector *connector;
3085     @@ -11068,8 +11073,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
3086     if (connector->encoder->base.crtc != &crtc->base)
3087     continue;
3088    
3089     - intel_connector_break_all_links(connector);
3090     + connector->base.dpms = DRM_MODE_DPMS_OFF;
3091     + connector->base.encoder = NULL;
3092     }
3093     + /* multiple connectors may have the same encoder:
3094     + * handle them and break crtc link separately */
3095     + list_for_each_entry(connector, &dev->mode_config.connector_list,
3096     + base.head)
3097     + if (connector->encoder->base.crtc == &crtc->base) {
3098     + connector->encoder->base.crtc = NULL;
3099     + connector->encoder->connectors_active = false;
3100     + }
3101    
3102     WARN_ON(crtc->active);
3103     crtc->base.enabled = false;
3104     @@ -11140,6 +11154,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
3105     drm_get_encoder_name(&encoder->base));
3106     encoder->disable(encoder);
3107     }
3108     + encoder->base.crtc = NULL;
3109     + encoder->connectors_active = false;
3110    
3111     /* Inconsistent output/port/pipe state happens presumably due to
3112     * a bug in one of the get_hw_state functions. Or someplace else
3113     @@ -11150,8 +11166,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
3114     base.head) {
3115     if (connector->encoder != encoder)
3116     continue;
3117     -
3118     - intel_connector_break_all_links(connector);
3119     + connector->base.dpms = DRM_MODE_DPMS_OFF;
3120     + connector->base.encoder = NULL;
3121     }
3122     }
3123     /* Enabled encoders without active connectors will be fixed in
3124     @@ -11193,6 +11209,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
3125     base.head) {
3126     memset(&crtc->config, 0, sizeof(crtc->config));
3127    
3128     + crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
3129     +
3130     crtc->active = dev_priv->display.get_pipe_config(crtc,
3131     &crtc->config);
3132    
3133     diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
3134     index fbfaaba5cc3b..00de120531ac 100644
3135     --- a/drivers/gpu/drm/i915/intel_drv.h
3136     +++ b/drivers/gpu/drm/i915/intel_drv.h
3137     @@ -219,7 +219,8 @@ struct intel_crtc_config {
3138     * tracked with quirk flags so that fastboot and state checker can act
3139     * accordingly.
3140     */
3141     -#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
3142     +#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
3143     +#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
3144     unsigned long quirks;
3145    
3146     /* User requested mode, only valid as a starting point to
3147     diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
3148     index ee3181ebcc92..ca5d23d17d20 100644
3149     --- a/drivers/gpu/drm/i915/intel_hdmi.c
3150     +++ b/drivers/gpu/drm/i915/intel_hdmi.c
3151     @@ -841,11 +841,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
3152     }
3153     }
3154    
3155     -static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
3156     +static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
3157     {
3158     struct drm_device *dev = intel_hdmi_to_dev(hdmi);
3159    
3160     - if (!hdmi->has_hdmi_sink || IS_G4X(dev))
3161     + if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
3162     return 165000;
3163     else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
3164     return 300000;
3165     @@ -857,7 +857,8 @@ static enum drm_mode_status
3166     intel_hdmi_mode_valid(struct drm_connector *connector,
3167     struct drm_display_mode *mode)
3168     {
3169     - if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
3170     + if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
3171     + true))
3172     return MODE_CLOCK_HIGH;
3173     if (mode->clock < 20000)
3174     return MODE_CLOCK_LOW;
3175     @@ -875,7 +876,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
3176     struct drm_device *dev = encoder->base.dev;
3177     struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
3178     int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
3179     - int portclock_limit = hdmi_portclock_limit(intel_hdmi);
3180     + int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
3181     int desired_bpp;
3182    
3183     if (intel_hdmi->color_range_auto) {
3184     diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
3185     index 9f1d7a9300e8..19a4f0535b63 100644
3186     --- a/drivers/gpu/drm/i915/intel_panel.c
3187     +++ b/drivers/gpu/drm/i915/intel_panel.c
3188     @@ -501,6 +501,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
3189     enum pipe pipe = intel_get_pipe_from_connector(connector);
3190     u32 freq;
3191     unsigned long flags;
3192     + u64 n;
3193    
3194     if (!panel->backlight.present || pipe == INVALID_PIPE)
3195     return;
3196     @@ -511,10 +512,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
3197    
3198     /* scale to hardware max, but be careful to not overflow */
3199     freq = panel->backlight.max;
3200     - if (freq < max)
3201     - level = level * freq / max;
3202     - else
3203     - level = freq / max * level;
3204     + n = (u64)level * freq;
3205     + do_div(n, max);
3206     + level = n;
3207    
3208     panel->backlight.level = level;
3209     if (panel->backlight.device)
3210     diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
3211     index e1fc35a72656..bd1b00344dac 100644
3212     --- a/drivers/gpu/drm/i915/intel_pm.c
3213     +++ b/drivers/gpu/drm/i915/intel_pm.c
3214     @@ -1539,6 +1539,16 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
3215    
3216     DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
3217    
3218     + if (IS_I915GM(dev) && enabled) {
3219     + struct intel_framebuffer *fb;
3220     +
3221     + fb = to_intel_framebuffer(enabled->fb);
3222     +
3223     + /* self-refresh seems busted with untiled */
3224     + if (fb->obj->tiling_mode == I915_TILING_NONE)
3225     + enabled = NULL;
3226     + }
3227     +
3228     /*
3229     * Overlay gets an aggressive default since video jitter is bad.
3230     */
3231     diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
3232     index 87df68f5f504..c8796316d242 100644
3233     --- a/drivers/gpu/drm/i915/intel_uncore.c
3234     +++ b/drivers/gpu/drm/i915/intel_uncore.c
3235     @@ -177,6 +177,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
3236     {
3237     __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
3238     _MASKED_BIT_DISABLE(0xffff));
3239     + __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
3240     + _MASKED_BIT_DISABLE(0xffff));
3241     /* something from same cacheline, but !FORCEWAKE_VLV */
3242     __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
3243     }
3244     diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
3245     index 95f6129eeede..29d4c417a5b3 100644
3246     --- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
3247     +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
3248     @@ -54,8 +54,10 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
3249    
3250     /* check that we're not already at the target duty cycle */
3251     duty = fan->get(therm);
3252     - if (duty == target)
3253     - goto done;
3254     + if (duty == target) {
3255     + spin_unlock_irqrestore(&fan->lock, flags);
3256     + return 0;
3257     + }
3258    
3259     /* smooth out the fanspeed increase/decrease */
3260     if (!immediate && duty >= 0) {
3261     @@ -73,8 +75,15 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
3262    
3263     nv_debug(therm, "FAN update: %d\n", duty);
3264     ret = fan->set(therm, duty);
3265     - if (ret)
3266     - goto done;
3267     + if (ret) {
3268     + spin_unlock_irqrestore(&fan->lock, flags);
3269     + return ret;
3270     + }
3271     +
3272     + /* fan speed updated, drop the fan lock before grabbing the
3273     + * alarm-scheduling lock and risking a deadlock
3274     + */
3275     + spin_unlock_irqrestore(&fan->lock, flags);
3276    
3277     /* schedule next fan update, if not at target speed already */
3278     if (list_empty(&fan->alarm.head) && target != duty) {
3279     @@ -92,8 +101,6 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
3280     ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
3281     }
3282    
3283     -done:
3284     - spin_unlock_irqrestore(&fan->lock, flags);
3285     return ret;
3286     }
3287    
3288     diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
3289     index 83face3f608f..279206997e5c 100644
3290     --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
3291     +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
3292     @@ -389,9 +389,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
3293     acpi_status status;
3294     acpi_handle dhandle, rom_handle;
3295    
3296     - if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
3297     - return false;
3298     -
3299     dhandle = ACPI_HANDLE(&pdev->dev);
3300     if (!dhandle)
3301     return false;
3302     diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
3303     index 24011596af43..5524a3705224 100644
3304     --- a/drivers/gpu/drm/nouveau/nouveau_display.c
3305     +++ b/drivers/gpu/drm/nouveau/nouveau_display.c
3306     @@ -762,9 +762,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3307     }
3308    
3309     ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
3310     - mutex_unlock(&chan->cli->mutex);
3311     if (ret)
3312     goto fail_unreserve;
3313     + mutex_unlock(&chan->cli->mutex);
3314    
3315     /* Update the crtc struct and cleanup */
3316     crtc->fb = fb;
3317     diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
3318     index 8d49104ca6c2..9241e96f8502 100644
3319     --- a/drivers/gpu/drm/radeon/ci_dpm.c
3320     +++ b/drivers/gpu/drm/radeon/ci_dpm.c
3321     @@ -21,8 +21,10 @@
3322     *
3323     */
3324    
3325     +#include <linux/firmware.h>
3326     #include "drmP.h"
3327     #include "radeon.h"
3328     +#include "radeon_ucode.h"
3329     #include "cikd.h"
3330     #include "r600_dpm.h"
3331     #include "ci_dpm.h"
3332     @@ -5106,6 +5108,12 @@ int ci_dpm_init(struct radeon_device *rdev)
3333     pi->mclk_dpm_key_disabled = 0;
3334     pi->pcie_dpm_key_disabled = 0;
3335    
3336     + /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
3337     + if ((rdev->pdev->device == 0x6658) &&
3338     + (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
3339     + pi->mclk_dpm_key_disabled = 1;
3340     + }
3341     +
3342     pi->caps_sclk_ds = true;
3343    
3344     pi->mclk_strobe_mode_threshold = 40000;
3345     diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
3346     index 986f9e7364ca..270f68a6b724 100644
3347     --- a/drivers/gpu/drm/radeon/cik.c
3348     +++ b/drivers/gpu/drm/radeon/cik.c
3349     @@ -38,6 +38,7 @@ MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
3350     MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
3351     MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
3352     MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
3353     +MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin");
3354     MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
3355     MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
3356     MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
3357     @@ -46,6 +47,7 @@ MODULE_FIRMWARE("radeon/HAWAII_me.bin");
3358     MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
3359     MODULE_FIRMWARE("radeon/HAWAII_mec.bin");
3360     MODULE_FIRMWARE("radeon/HAWAII_mc.bin");
3361     +MODULE_FIRMWARE("radeon/HAWAII_mc2.bin");
3362     MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
3363     MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
3364     MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
3365     @@ -1702,20 +1704,20 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
3366     const __be32 *fw_data;
3367     u32 running, blackout = 0;
3368     u32 *io_mc_regs;
3369     - int i, ucode_size, regs_size;
3370     + int i, regs_size, ucode_size;
3371    
3372     if (!rdev->mc_fw)
3373     return -EINVAL;
3374    
3375     + ucode_size = rdev->mc_fw->size / 4;
3376     +
3377     switch (rdev->family) {
3378     case CHIP_BONAIRE:
3379     io_mc_regs = (u32 *)&bonaire_io_mc_regs;
3380     - ucode_size = CIK_MC_UCODE_SIZE;
3381     regs_size = BONAIRE_IO_MC_REGS_SIZE;
3382     break;
3383     case CHIP_HAWAII:
3384     io_mc_regs = (u32 *)&hawaii_io_mc_regs;
3385     - ucode_size = HAWAII_MC_UCODE_SIZE;
3386     regs_size = HAWAII_IO_MC_REGS_SIZE;
3387     break;
3388     default:
3389     @@ -1782,7 +1784,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
3390     const char *chip_name;
3391     size_t pfp_req_size, me_req_size, ce_req_size,
3392     mec_req_size, rlc_req_size, mc_req_size = 0,
3393     - sdma_req_size, smc_req_size = 0;
3394     + sdma_req_size, smc_req_size = 0, mc2_req_size = 0;
3395     char fw_name[30];
3396     int err;
3397    
3398     @@ -1796,7 +1798,8 @@ static int cik_init_microcode(struct radeon_device *rdev)
3399     ce_req_size = CIK_CE_UCODE_SIZE * 4;
3400     mec_req_size = CIK_MEC_UCODE_SIZE * 4;
3401     rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
3402     - mc_req_size = CIK_MC_UCODE_SIZE * 4;
3403     + mc_req_size = BONAIRE_MC_UCODE_SIZE * 4;
3404     + mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4;
3405     sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
3406     smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
3407     break;
3408     @@ -1808,6 +1811,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
3409     mec_req_size = CIK_MEC_UCODE_SIZE * 4;
3410     rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
3411     mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
3412     + mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4;
3413     sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
3414     smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
3415     break;
3416     @@ -1903,16 +1907,22 @@ static int cik_init_microcode(struct radeon_device *rdev)
3417    
3418     /* No SMC, MC ucode on APUs */
3419     if (!(rdev->flags & RADEON_IS_IGP)) {
3420     - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
3421     + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
3422     err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
3423     - if (err)
3424     - goto out;
3425     - if (rdev->mc_fw->size != mc_req_size) {
3426     + if (err) {
3427     + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
3428     + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
3429     + if (err)
3430     + goto out;
3431     + }
3432     + if ((rdev->mc_fw->size != mc_req_size) &&
3433     + (rdev->mc_fw->size != mc2_req_size)){
3434     printk(KERN_ERR
3435     "cik_mc: Bogus length %zu in firmware \"%s\"\n",
3436     rdev->mc_fw->size, fw_name);
3437     err = -EINVAL;
3438     }
3439     + DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
3440    
3441     snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
3442     err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
3443     @@ -6662,6 +6672,19 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
3444     WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
3445     WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
3446     }
3447     + /* pflip */
3448     + if (rdev->num_crtc >= 2) {
3449     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
3450     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
3451     + }
3452     + if (rdev->num_crtc >= 4) {
3453     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
3454     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
3455     + }
3456     + if (rdev->num_crtc >= 6) {
3457     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
3458     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
3459     + }
3460    
3461     /* dac hotplug */
3462     WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
3463     @@ -7018,6 +7041,25 @@ int cik_irq_set(struct radeon_device *rdev)
3464     WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
3465     }
3466    
3467     + if (rdev->num_crtc >= 2) {
3468     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
3469     + GRPH_PFLIP_INT_MASK);
3470     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
3471     + GRPH_PFLIP_INT_MASK);
3472     + }
3473     + if (rdev->num_crtc >= 4) {
3474     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
3475     + GRPH_PFLIP_INT_MASK);
3476     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
3477     + GRPH_PFLIP_INT_MASK);
3478     + }
3479     + if (rdev->num_crtc >= 6) {
3480     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
3481     + GRPH_PFLIP_INT_MASK);
3482     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
3483     + GRPH_PFLIP_INT_MASK);
3484     + }
3485     +
3486     WREG32(DC_HPD1_INT_CONTROL, hpd1);
3487     WREG32(DC_HPD2_INT_CONTROL, hpd2);
3488     WREG32(DC_HPD3_INT_CONTROL, hpd3);
3489     @@ -7054,6 +7096,29 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
3490     rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
3491     rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
3492    
3493     + rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS +
3494     + EVERGREEN_CRTC0_REGISTER_OFFSET);
3495     + rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS +
3496     + EVERGREEN_CRTC1_REGISTER_OFFSET);
3497     + if (rdev->num_crtc >= 4) {
3498     + rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS +
3499     + EVERGREEN_CRTC2_REGISTER_OFFSET);
3500     + rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS +
3501     + EVERGREEN_CRTC3_REGISTER_OFFSET);
3502     + }
3503     + if (rdev->num_crtc >= 6) {
3504     + rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS +
3505     + EVERGREEN_CRTC4_REGISTER_OFFSET);
3506     + rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS +
3507     + EVERGREEN_CRTC5_REGISTER_OFFSET);
3508     + }
3509     +
3510     + if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
3511     + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
3512     + GRPH_PFLIP_INT_CLEAR);
3513     + if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
3514     + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
3515     + GRPH_PFLIP_INT_CLEAR);
3516     if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
3517     WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
3518     if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
3519     @@ -7064,6 +7129,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
3520     WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
3521    
3522     if (rdev->num_crtc >= 4) {
3523     + if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
3524     + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
3525     + GRPH_PFLIP_INT_CLEAR);
3526     + if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
3527     + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
3528     + GRPH_PFLIP_INT_CLEAR);
3529     if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
3530     WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
3531     if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
3532     @@ -7075,6 +7146,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
3533     }
3534    
3535     if (rdev->num_crtc >= 6) {
3536     + if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
3537     + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
3538     + GRPH_PFLIP_INT_CLEAR);
3539     + if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
3540     + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET,
3541     + GRPH_PFLIP_INT_CLEAR);
3542     if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
3543     WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
3544     if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
3545     @@ -7426,6 +7503,15 @@ restart_ih:
3546     break;
3547     }
3548     break;
3549     + case 8: /* D1 page flip */
3550     + case 10: /* D2 page flip */
3551     + case 12: /* D3 page flip */
3552     + case 14: /* D4 page flip */
3553     + case 16: /* D5 page flip */
3554     + case 18: /* D6 page flip */
3555     + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
3556     + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
3557     + break;
3558     case 42: /* HPD hotplug */
3559     switch (src_data) {
3560     case 0:
3561     diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
3562     index 94626ea90fa5..aac8f487e6df 100644
3563     --- a/drivers/gpu/drm/radeon/cik_sdma.c
3564     +++ b/drivers/gpu/drm/radeon/cik_sdma.c
3565     @@ -599,7 +599,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
3566     tmp = 0xCAFEDEAD;
3567     writel(tmp, ptr);
3568    
3569     - r = radeon_ring_lock(rdev, ring, 4);
3570     + r = radeon_ring_lock(rdev, ring, 5);
3571     if (r) {
3572     DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
3573     return r;
3574     diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
3575     index 98bae9d7b74d..d1b2c71b192a 100644
3576     --- a/drivers/gpu/drm/radeon/cikd.h
3577     +++ b/drivers/gpu/drm/radeon/cikd.h
3578     @@ -882,6 +882,15 @@
3579     # define DC_HPD6_RX_INTERRUPT (1 << 18)
3580     #define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780
3581    
3582     +/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
3583     +#define GRPH_INT_STATUS 0x6858
3584     +# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
3585     +# define GRPH_PFLIP_INT_CLEAR (1 << 8)
3586     +/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
3587     +#define GRPH_INT_CONTROL 0x685c
3588     +# define GRPH_PFLIP_INT_MASK (1 << 0)
3589     +# define GRPH_PFLIP_INT_TYPE (1 << 8)
3590     +
3591     #define DAC_AUTODETECT_INT_CONTROL 0x67c8
3592    
3593     #define DC_HPD1_INT_STATUS 0x601c
3594     diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
3595     index 94e858751994..0a65dc7e93e7 100644
3596     --- a/drivers/gpu/drm/radeon/dce6_afmt.c
3597     +++ b/drivers/gpu/drm/radeon/dce6_afmt.c
3598     @@ -309,11 +309,17 @@ int dce6_audio_init(struct radeon_device *rdev)
3599    
3600     rdev->audio.enabled = true;
3601    
3602     - if (ASIC_IS_DCE8(rdev))
3603     + if (ASIC_IS_DCE81(rdev)) /* KV: 4 streams, 7 endpoints */
3604     + rdev->audio.num_pins = 7;
3605     + else if (ASIC_IS_DCE83(rdev)) /* KB: 2 streams, 3 endpoints */
3606     + rdev->audio.num_pins = 3;
3607     + else if (ASIC_IS_DCE8(rdev)) /* BN/HW: 6 streams, 7 endpoints */
3608     + rdev->audio.num_pins = 7;
3609     + else if (ASIC_IS_DCE61(rdev)) /* TN: 4 streams, 6 endpoints */
3610     rdev->audio.num_pins = 6;
3611     - else if (ASIC_IS_DCE61(rdev))
3612     - rdev->audio.num_pins = 4;
3613     - else
3614     + else if (ASIC_IS_DCE64(rdev)) /* OL: 2 streams, 2 endpoints */
3615     + rdev->audio.num_pins = 2;
3616     + else /* SI: 6 streams, 6 endpoints */
3617     rdev->audio.num_pins = 6;
3618    
3619     for (i = 0; i < rdev->audio.num_pins; i++) {
3620     diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
3621     index 27b0ff16082e..971d9339ce80 100644
3622     --- a/drivers/gpu/drm/radeon/evergreen.c
3623     +++ b/drivers/gpu/drm/radeon/evergreen.c
3624     @@ -4375,7 +4375,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
3625     u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
3626     u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
3627     u32 grbm_int_cntl = 0;
3628     - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
3629     u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
3630     u32 dma_cntl, dma_cntl1 = 0;
3631     u32 thermal_int = 0;
3632     @@ -4558,15 +4557,21 @@ int evergreen_irq_set(struct radeon_device *rdev)
3633     WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
3634     }
3635    
3636     - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
3637     - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
3638     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
3639     + GRPH_PFLIP_INT_MASK);
3640     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
3641     + GRPH_PFLIP_INT_MASK);
3642     if (rdev->num_crtc >= 4) {
3643     - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
3644     - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
3645     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
3646     + GRPH_PFLIP_INT_MASK);
3647     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
3648     + GRPH_PFLIP_INT_MASK);
3649     }
3650     if (rdev->num_crtc >= 6) {
3651     - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
3652     - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
3653     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
3654     + GRPH_PFLIP_INT_MASK);
3655     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
3656     + GRPH_PFLIP_INT_MASK);
3657     }
3658    
3659     WREG32(DC_HPD1_INT_CONTROL, hpd1);
3660     @@ -4955,6 +4960,15 @@ restart_ih:
3661     break;
3662     }
3663     break;
3664     + case 8: /* D1 page flip */
3665     + case 10: /* D2 page flip */
3666     + case 12: /* D3 page flip */
3667     + case 14: /* D4 page flip */
3668     + case 16: /* D5 page flip */
3669     + case 18: /* D6 page flip */
3670     + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
3671     + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
3672     + break;
3673     case 42: /* HPD hotplug */
3674     switch (src_data) {
3675     case 0:
3676     diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
3677     index 647ef4079217..f28ab840cc23 100644
3678     --- a/drivers/gpu/drm/radeon/r600.c
3679     +++ b/drivers/gpu/drm/radeon/r600.c
3680     @@ -3509,7 +3509,6 @@ int r600_irq_set(struct radeon_device *rdev)
3681     u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3682     u32 grbm_int_cntl = 0;
3683     u32 hdmi0, hdmi1;
3684     - u32 d1grph = 0, d2grph = 0;
3685     u32 dma_cntl;
3686     u32 thermal_int = 0;
3687    
3688     @@ -3618,8 +3617,8 @@ int r600_irq_set(struct radeon_device *rdev)
3689     WREG32(CP_INT_CNTL, cp_int_cntl);
3690     WREG32(DMA_CNTL, dma_cntl);
3691     WREG32(DxMODE_INT_MASK, mode_int);
3692     - WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3693     - WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3694     + WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3695     + WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3696     WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3697     if (ASIC_IS_DCE3(rdev)) {
3698     WREG32(DC_HPD1_INT_CONTROL, hpd1);
3699     @@ -3922,6 +3921,14 @@ restart_ih:
3700     break;
3701     }
3702     break;
3703     + case 9: /* D1 pflip */
3704     + DRM_DEBUG("IH: D1 flip\n");
3705     + radeon_crtc_handle_flip(rdev, 0);
3706     + break;
3707     + case 11: /* D2 pflip */
3708     + DRM_DEBUG("IH: D2 flip\n");
3709     + radeon_crtc_handle_flip(rdev, 1);
3710     + break;
3711     case 19: /* HPD/DAC hotplug */
3712     switch (src_data) {
3713     case 0:
3714     diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
3715     index e4cc9b314ce9..813db8de52b7 100644
3716     --- a/drivers/gpu/drm/radeon/r600_dpm.c
3717     +++ b/drivers/gpu/drm/radeon/r600_dpm.c
3718     @@ -158,16 +158,18 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
3719     u32 line_time_us, vblank_lines;
3720     u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
3721    
3722     - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3723     - radeon_crtc = to_radeon_crtc(crtc);
3724     - if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
3725     - line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
3726     - radeon_crtc->hw_mode.clock;
3727     - vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
3728     - radeon_crtc->hw_mode.crtc_vdisplay +
3729     - (radeon_crtc->v_border * 2);
3730     - vblank_time_us = vblank_lines * line_time_us;
3731     - break;
3732     + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
3733     + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3734     + radeon_crtc = to_radeon_crtc(crtc);
3735     + if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
3736     + line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
3737     + radeon_crtc->hw_mode.clock;
3738     + vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
3739     + radeon_crtc->hw_mode.crtc_vdisplay +
3740     + (radeon_crtc->v_border * 2);
3741     + vblank_time_us = vblank_lines * line_time_us;
3742     + break;
3743     + }
3744     }
3745     }
3746    
3747     @@ -181,14 +183,15 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
3748     struct radeon_crtc *radeon_crtc;
3749     u32 vrefresh = 0;
3750    
3751     - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3752     - radeon_crtc = to_radeon_crtc(crtc);
3753     - if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
3754     - vrefresh = radeon_crtc->hw_mode.vrefresh;
3755     - break;
3756     + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
3757     + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3758     + radeon_crtc = to_radeon_crtc(crtc);
3759     + if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
3760     + vrefresh = radeon_crtc->hw_mode.vrefresh;
3761     + break;
3762     + }
3763     }
3764     }
3765     -
3766     return vrefresh;
3767     }
3768    
3769     diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
3770     index e887d027b6d0..08e86f90c9a4 100644
3771     --- a/drivers/gpu/drm/radeon/radeon.h
3772     +++ b/drivers/gpu/drm/radeon/radeon.h
3773     @@ -733,6 +733,12 @@ struct cik_irq_stat_regs {
3774     u32 disp_int_cont4;
3775     u32 disp_int_cont5;
3776     u32 disp_int_cont6;
3777     + u32 d1grph_int;
3778     + u32 d2grph_int;
3779     + u32 d3grph_int;
3780     + u32 d4grph_int;
3781     + u32 d5grph_int;
3782     + u32 d6grph_int;
3783     };
3784    
3785     union radeon_irq_stat_regs {
3786     @@ -742,7 +748,7 @@ union radeon_irq_stat_regs {
3787     struct cik_irq_stat_regs cik;
3788     };
3789    
3790     -#define RADEON_MAX_HPD_PINS 6
3791     +#define RADEON_MAX_HPD_PINS 7
3792     #define RADEON_MAX_CRTCS 6
3793     #define RADEON_MAX_AFMT_BLOCKS 7
3794    
3795     @@ -2242,6 +2248,7 @@ struct radeon_device {
3796     bool have_disp_power_ref;
3797     };
3798    
3799     +bool radeon_is_px(struct drm_device *dev);
3800     int radeon_device_init(struct radeon_device *rdev,
3801     struct drm_device *ddev,
3802     struct pci_dev *pdev,
3803     @@ -2552,6 +2559,9 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
3804     #define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
3805     #define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
3806     #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
3807     +#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
3808     +#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
3809     +#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI))
3810    
3811     #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
3812     (rdev->ddev->pdev->device == 0x6850) || \
3813     diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
3814     index fa9a9c02751e..a9fb0d016d38 100644
3815     --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
3816     +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
3817     @@ -59,7 +59,7 @@ struct atpx_mux {
3818     u16 mux;
3819     } __packed;
3820    
3821     -bool radeon_is_px(void) {
3822     +bool radeon_has_atpx(void) {
3823     return radeon_atpx_priv.atpx_detected;
3824     }
3825    
3826     @@ -528,6 +528,13 @@ static bool radeon_atpx_detect(void)
3827     has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
3828     }
3829    
3830     + /* some newer PX laptops mark the dGPU as a non-VGA display device */
3831     + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
3832     + vga_count++;
3833     +
3834     + has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
3835     + }
3836     +
3837     if (has_atpx && vga_count == 2) {
3838     acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
3839     printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
3840     diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
3841     index 044bc98fb459..7f370b309590 100644
3842     --- a/drivers/gpu/drm/radeon/radeon_device.c
3843     +++ b/drivers/gpu/drm/radeon/radeon_device.c
3844     @@ -102,11 +102,14 @@ static const char radeon_family_name[][16] = {
3845     "LAST",
3846     };
3847    
3848     -#if defined(CONFIG_VGA_SWITCHEROO)
3849     -bool radeon_is_px(void);
3850     -#else
3851     -static inline bool radeon_is_px(void) { return false; }
3852     -#endif
3853     +bool radeon_is_px(struct drm_device *dev)
3854     +{
3855     + struct radeon_device *rdev = dev->dev_private;
3856     +
3857     + if (rdev->flags & RADEON_IS_PX)
3858     + return true;
3859     + return false;
3860     +}
3861    
3862     /**
3863     * radeon_program_register_sequence - program an array of registers.
3864     @@ -1082,7 +1085,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
3865     {
3866     struct drm_device *dev = pci_get_drvdata(pdev);
3867    
3868     - if (radeon_is_px() && state == VGA_SWITCHEROO_OFF)
3869     + if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
3870     return;
3871    
3872     if (state == VGA_SWITCHEROO_ON) {
3873     @@ -1303,9 +1306,7 @@ int radeon_device_init(struct radeon_device *rdev,
3874     * ignore it */
3875     vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
3876    
3877     - if (radeon_runtime_pm == 1)
3878     - runtime = true;
3879     - if ((radeon_runtime_pm == -1) && radeon_is_px())
3880     + if (rdev->flags & RADEON_IS_PX)
3881     runtime = true;
3882     vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
3883     if (runtime)
3884     diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
3885     index 6e6f82c53fef..df6d0079d0af 100644
3886     --- a/drivers/gpu/drm/radeon/radeon_display.c
3887     +++ b/drivers/gpu/drm/radeon/radeon_display.c
3888     @@ -282,6 +282,10 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
3889     u32 update_pending;
3890     int vpos, hpos;
3891    
3892     + /* can happen during initialization */
3893     + if (radeon_crtc == NULL)
3894     + return;
3895     +
3896     spin_lock_irqsave(&rdev->ddev->event_lock, flags);
3897     work = radeon_crtc->unpin_work;
3898     if (work == NULL ||
3899     diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
3900     index f633c2782170..9c0056f0a68a 100644
3901     --- a/drivers/gpu/drm/radeon/radeon_drv.c
3902     +++ b/drivers/gpu/drm/radeon/radeon_drv.c
3903     @@ -113,6 +113,7 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
3904     unsigned int flags,
3905     int *vpos, int *hpos, ktime_t *stime,
3906     ktime_t *etime);
3907     +extern bool radeon_is_px(struct drm_device *dev);
3908     extern const struct drm_ioctl_desc radeon_ioctls_kms[];
3909     extern int radeon_max_kms_ioctl;
3910     int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
3911     @@ -142,11 +143,9 @@ void radeon_debugfs_cleanup(struct drm_minor *minor);
3912     #if defined(CONFIG_VGA_SWITCHEROO)
3913     void radeon_register_atpx_handler(void);
3914     void radeon_unregister_atpx_handler(void);
3915     -bool radeon_is_px(void);
3916     #else
3917     static inline void radeon_register_atpx_handler(void) {}
3918     static inline void radeon_unregister_atpx_handler(void) {}
3919     -static inline bool radeon_is_px(void) { return false; }
3920     #endif
3921    
3922     int radeon_no_wb;
3923     @@ -403,12 +402,7 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
3924     struct drm_device *drm_dev = pci_get_drvdata(pdev);
3925     int ret;
3926    
3927     - if (radeon_runtime_pm == 0) {
3928     - pm_runtime_forbid(dev);
3929     - return -EBUSY;
3930     - }
3931     -
3932     - if (radeon_runtime_pm == -1 && !radeon_is_px()) {
3933     + if (!radeon_is_px(drm_dev)) {
3934     pm_runtime_forbid(dev);
3935     return -EBUSY;
3936     }
3937     @@ -432,10 +426,7 @@ static int radeon_pmops_runtime_resume(struct device *dev)
3938     struct drm_device *drm_dev = pci_get_drvdata(pdev);
3939     int ret;
3940    
3941     - if (radeon_runtime_pm == 0)
3942     - return -EINVAL;
3943     -
3944     - if (radeon_runtime_pm == -1 && !radeon_is_px())
3945     + if (!radeon_is_px(drm_dev))
3946     return -EINVAL;
3947    
3948     drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
3949     @@ -460,14 +451,7 @@ static int radeon_pmops_runtime_idle(struct device *dev)
3950     struct drm_device *drm_dev = pci_get_drvdata(pdev);
3951     struct drm_crtc *crtc;
3952    
3953     - if (radeon_runtime_pm == 0) {
3954     - pm_runtime_forbid(dev);
3955     - return -EBUSY;
3956     - }
3957     -
3958     - /* are we PX enabled? */
3959     - if (radeon_runtime_pm == -1 && !radeon_is_px()) {
3960     - DRM_DEBUG_DRIVER("failing to power off - not px\n");
3961     + if (!radeon_is_px(drm_dev)) {
3962     pm_runtime_forbid(dev);
3963     return -EBUSY;
3964     }
3965     diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
3966     index 614ad549297f..9da5da4ffd17 100644
3967     --- a/drivers/gpu/drm/radeon/radeon_family.h
3968     +++ b/drivers/gpu/drm/radeon/radeon_family.h
3969     @@ -115,6 +115,7 @@ enum radeon_chip_flags {
3970     RADEON_NEW_MEMMAP = 0x00400000UL,
3971     RADEON_IS_PCI = 0x00800000UL,
3972     RADEON_IS_IGPGART = 0x01000000UL,
3973     + RADEON_IS_PX = 0x02000000UL,
3974     };
3975    
3976     #endif
3977     diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
3978     index 66ed3ea71440..ea34a31d3bc8 100644
3979     --- a/drivers/gpu/drm/radeon/radeon_kms.c
3980     +++ b/drivers/gpu/drm/radeon/radeon_kms.c
3981     @@ -35,9 +35,9 @@
3982     #include <linux/pm_runtime.h>
3983    
3984     #if defined(CONFIG_VGA_SWITCHEROO)
3985     -bool radeon_is_px(void);
3986     +bool radeon_has_atpx(void);
3987     #else
3988     -static inline bool radeon_is_px(void) { return false; }
3989     +static inline bool radeon_has_atpx(void) { return false; }
3990     #endif
3991    
3992     /**
3993     @@ -107,6 +107,11 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
3994     flags |= RADEON_IS_PCI;
3995     }
3996    
3997     + if ((radeon_runtime_pm != 0) &&
3998     + radeon_has_atpx() &&
3999     + ((flags & RADEON_IS_IGP) == 0))
4000     + flags |= RADEON_IS_PX;
4001     +
4002     /* radeon_device_init should report only fatal error
4003     * like memory allocation failure or iomapping failure,
4004     * or memory manager initialization failure, it must
4005     @@ -137,8 +142,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
4006     "Error during ACPI methods call\n");
4007     }
4008    
4009     - if ((radeon_runtime_pm == 1) ||
4010     - ((radeon_runtime_pm == -1) && radeon_is_px())) {
4011     + if (radeon_is_px(dev)) {
4012     pm_runtime_use_autosuspend(dev->dev);
4013     pm_runtime_set_autosuspend_delay(dev->dev, 5000);
4014     pm_runtime_set_active(dev->dev);
4015     diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
4016     index 8e8153e471c2..a957024eae24 100644
4017     --- a/drivers/gpu/drm/radeon/radeon_pm.c
4018     +++ b/drivers/gpu/drm/radeon/radeon_pm.c
4019     @@ -603,7 +603,6 @@ static const struct attribute_group *hwmon_groups[] = {
4020     static int radeon_hwmon_init(struct radeon_device *rdev)
4021     {
4022     int err = 0;
4023     - struct device *hwmon_dev;
4024    
4025     switch (rdev->pm.int_thermal_type) {
4026     case THERMAL_TYPE_RV6XX:
4027     @@ -616,11 +615,11 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
4028     case THERMAL_TYPE_KV:
4029     if (rdev->asic->pm.get_temperature == NULL)
4030     return err;
4031     - hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
4032     - "radeon", rdev,
4033     - hwmon_groups);
4034     - if (IS_ERR(hwmon_dev)) {
4035     - err = PTR_ERR(hwmon_dev);
4036     + rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
4037     + "radeon", rdev,
4038     + hwmon_groups);
4039     + if (IS_ERR(rdev->pm.int_hwmon_dev)) {
4040     + err = PTR_ERR(rdev->pm.int_hwmon_dev);
4041     dev_err(rdev->dev,
4042     "Unable to register hwmon device: %d\n", err);
4043     }
4044     @@ -632,6 +631,12 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
4045     return err;
4046     }
4047    
4048     +static void radeon_hwmon_fini(struct radeon_device *rdev)
4049     +{
4050     + if (rdev->pm.int_hwmon_dev)
4051     + hwmon_device_unregister(rdev->pm.int_hwmon_dev);
4052     +}
4053     +
4054     static void radeon_dpm_thermal_work_handler(struct work_struct *work)
4055     {
4056     struct radeon_device *rdev =
4057     @@ -1235,6 +1240,7 @@ int radeon_pm_init(struct radeon_device *rdev)
4058     case CHIP_RV670:
4059     case CHIP_RS780:
4060     case CHIP_RS880:
4061     + case CHIP_RV770:
4062     case CHIP_BARTS:
4063     case CHIP_TURKS:
4064     case CHIP_CAICOS:
4065     @@ -1251,7 +1257,6 @@ int radeon_pm_init(struct radeon_device *rdev)
4066     else
4067     rdev->pm.pm_method = PM_METHOD_PROFILE;
4068     break;
4069     - case CHIP_RV770:
4070     case CHIP_RV730:
4071     case CHIP_RV710:
4072     case CHIP_RV740:
4073     @@ -1331,6 +1336,8 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
4074     device_remove_file(rdev->dev, &dev_attr_power_method);
4075     }
4076    
4077     + radeon_hwmon_fini(rdev);
4078     +
4079     if (rdev->pm.power_state)
4080     kfree(rdev->pm.power_state);
4081     }
4082     @@ -1350,6 +1357,8 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
4083     }
4084     radeon_dpm_fini(rdev);
4085    
4086     + radeon_hwmon_fini(rdev);
4087     +
4088     if (rdev->pm.power_state)
4089     kfree(rdev->pm.power_state);
4090     }
4091     @@ -1375,12 +1384,14 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
4092    
4093     rdev->pm.active_crtcs = 0;
4094     rdev->pm.active_crtc_count = 0;
4095     - list_for_each_entry(crtc,
4096     - &ddev->mode_config.crtc_list, head) {
4097     - radeon_crtc = to_radeon_crtc(crtc);
4098     - if (radeon_crtc->enabled) {
4099     - rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
4100     - rdev->pm.active_crtc_count++;
4101     + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
4102     + list_for_each_entry(crtc,
4103     + &ddev->mode_config.crtc_list, head) {
4104     + radeon_crtc = to_radeon_crtc(crtc);
4105     + if (radeon_crtc->enabled) {
4106     + rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
4107     + rdev->pm.active_crtc_count++;
4108     + }
4109     }
4110     }
4111    
4112     @@ -1447,12 +1458,14 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
4113     /* update active crtc counts */
4114     rdev->pm.dpm.new_active_crtcs = 0;
4115     rdev->pm.dpm.new_active_crtc_count = 0;
4116     - list_for_each_entry(crtc,
4117     - &ddev->mode_config.crtc_list, head) {
4118     - radeon_crtc = to_radeon_crtc(crtc);
4119     - if (crtc->enabled) {
4120     - rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
4121     - rdev->pm.dpm.new_active_crtc_count++;
4122     + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
4123     + list_for_each_entry(crtc,
4124     + &ddev->mode_config.crtc_list, head) {
4125     + radeon_crtc = to_radeon_crtc(crtc);
4126     + if (crtc->enabled) {
4127     + rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
4128     + rdev->pm.dpm.new_active_crtc_count++;
4129     + }
4130     }
4131     }
4132    
4133     diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
4134     index a77cd274dfc3..58d12938c0b8 100644
4135     --- a/drivers/gpu/drm/radeon/radeon_ucode.h
4136     +++ b/drivers/gpu/drm/radeon/radeon_ucode.h
4137     @@ -57,9 +57,14 @@
4138     #define BTC_MC_UCODE_SIZE 6024
4139     #define CAYMAN_MC_UCODE_SIZE 6037
4140     #define SI_MC_UCODE_SIZE 7769
4141     +#define TAHITI_MC_UCODE_SIZE 7808
4142     +#define PITCAIRN_MC_UCODE_SIZE 7775
4143     +#define VERDE_MC_UCODE_SIZE 7875
4144     #define OLAND_MC_UCODE_SIZE 7863
4145     -#define CIK_MC_UCODE_SIZE 7866
4146     +#define BONAIRE_MC_UCODE_SIZE 7866
4147     +#define BONAIRE_MC2_UCODE_SIZE 7948
4148     #define HAWAII_MC_UCODE_SIZE 7933
4149     +#define HAWAII_MC2_UCODE_SIZE 8091
4150    
4151     /* SDMA */
4152     #define CIK_SDMA_UCODE_SIZE 1050
4153     diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
4154     index 3e6804b2b2ef..414e07928693 100644
4155     --- a/drivers/gpu/drm/radeon/radeon_uvd.c
4156     +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
4157     @@ -465,6 +465,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
4158     cmd = radeon_get_ib_value(p, p->idx) >> 1;
4159    
4160     if (cmd < 0x4) {
4161     + if (end <= start) {
4162     + DRM_ERROR("invalid reloc offset %X!\n", offset);
4163     + return -EINVAL;
4164     + }
4165     if ((end - start) < buf_sizes[cmd]) {
4166     DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
4167     (unsigned)(end - start), buf_sizes[cmd]);
4168     diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
4169     index 9a124d0608b3..cb7508dc94f3 100644
4170     --- a/drivers/gpu/drm/radeon/si.c
4171     +++ b/drivers/gpu/drm/radeon/si.c
4172     @@ -39,30 +39,35 @@ MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
4173     MODULE_FIRMWARE("radeon/TAHITI_me.bin");
4174     MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
4175     MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
4176     +MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
4177     MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
4178     MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
4179     MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
4180     MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
4181     MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
4182     MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
4183     +MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
4184     MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
4185     MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
4186     MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
4187     MODULE_FIRMWARE("radeon/VERDE_me.bin");
4188     MODULE_FIRMWARE("radeon/VERDE_ce.bin");
4189     MODULE_FIRMWARE("radeon/VERDE_mc.bin");
4190     +MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
4191     MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
4192     MODULE_FIRMWARE("radeon/VERDE_smc.bin");
4193     MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
4194     MODULE_FIRMWARE("radeon/OLAND_me.bin");
4195     MODULE_FIRMWARE("radeon/OLAND_ce.bin");
4196     MODULE_FIRMWARE("radeon/OLAND_mc.bin");
4197     +MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
4198     MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
4199     MODULE_FIRMWARE("radeon/OLAND_smc.bin");
4200     MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
4201     MODULE_FIRMWARE("radeon/HAINAN_me.bin");
4202     MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
4203     MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
4204     +MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
4205     MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
4206     MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
4207    
4208     @@ -1467,36 +1472,33 @@ int si_mc_load_microcode(struct radeon_device *rdev)
4209     const __be32 *fw_data;
4210     u32 running, blackout = 0;
4211     u32 *io_mc_regs;
4212     - int i, ucode_size, regs_size;
4213     + int i, regs_size, ucode_size;
4214    
4215     if (!rdev->mc_fw)
4216     return -EINVAL;
4217    
4218     + ucode_size = rdev->mc_fw->size / 4;
4219     +
4220     switch (rdev->family) {
4221     case CHIP_TAHITI:
4222     io_mc_regs = (u32 *)&tahiti_io_mc_regs;
4223     - ucode_size = SI_MC_UCODE_SIZE;
4224     regs_size = TAHITI_IO_MC_REGS_SIZE;
4225     break;
4226     case CHIP_PITCAIRN:
4227     io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
4228     - ucode_size = SI_MC_UCODE_SIZE;
4229     regs_size = TAHITI_IO_MC_REGS_SIZE;
4230     break;
4231     case CHIP_VERDE:
4232     default:
4233     io_mc_regs = (u32 *)&verde_io_mc_regs;
4234     - ucode_size = SI_MC_UCODE_SIZE;
4235     regs_size = TAHITI_IO_MC_REGS_SIZE;
4236     break;
4237     case CHIP_OLAND:
4238     io_mc_regs = (u32 *)&oland_io_mc_regs;
4239     - ucode_size = OLAND_MC_UCODE_SIZE;
4240     regs_size = TAHITI_IO_MC_REGS_SIZE;
4241     break;
4242     case CHIP_HAINAN:
4243     io_mc_regs = (u32 *)&hainan_io_mc_regs;
4244     - ucode_size = OLAND_MC_UCODE_SIZE;
4245     regs_size = TAHITI_IO_MC_REGS_SIZE;
4246     break;
4247     }
4248     @@ -1552,7 +1554,7 @@ static int si_init_microcode(struct radeon_device *rdev)
4249     const char *chip_name;
4250     const char *rlc_chip_name;
4251     size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
4252     - size_t smc_req_size;
4253     + size_t smc_req_size, mc2_req_size;
4254     char fw_name[30];
4255     int err;
4256    
4257     @@ -1567,6 +1569,7 @@ static int si_init_microcode(struct radeon_device *rdev)
4258     ce_req_size = SI_CE_UCODE_SIZE * 4;
4259     rlc_req_size = SI_RLC_UCODE_SIZE * 4;
4260     mc_req_size = SI_MC_UCODE_SIZE * 4;
4261     + mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
4262     smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
4263     break;
4264     case CHIP_PITCAIRN:
4265     @@ -1577,6 +1580,7 @@ static int si_init_microcode(struct radeon_device *rdev)
4266     ce_req_size = SI_CE_UCODE_SIZE * 4;
4267     rlc_req_size = SI_RLC_UCODE_SIZE * 4;
4268     mc_req_size = SI_MC_UCODE_SIZE * 4;
4269     + mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
4270     smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
4271     break;
4272     case CHIP_VERDE:
4273     @@ -1587,6 +1591,7 @@ static int si_init_microcode(struct radeon_device *rdev)
4274     ce_req_size = SI_CE_UCODE_SIZE * 4;
4275     rlc_req_size = SI_RLC_UCODE_SIZE * 4;
4276     mc_req_size = SI_MC_UCODE_SIZE * 4;
4277     + mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
4278     smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
4279     break;
4280     case CHIP_OLAND:
4281     @@ -1596,7 +1601,7 @@ static int si_init_microcode(struct radeon_device *rdev)
4282     me_req_size = SI_PM4_UCODE_SIZE * 4;
4283     ce_req_size = SI_CE_UCODE_SIZE * 4;
4284     rlc_req_size = SI_RLC_UCODE_SIZE * 4;
4285     - mc_req_size = OLAND_MC_UCODE_SIZE * 4;
4286     + mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
4287     smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
4288     break;
4289     case CHIP_HAINAN:
4290     @@ -1606,7 +1611,7 @@ static int si_init_microcode(struct radeon_device *rdev)
4291     me_req_size = SI_PM4_UCODE_SIZE * 4;
4292     ce_req_size = SI_CE_UCODE_SIZE * 4;
4293     rlc_req_size = SI_RLC_UCODE_SIZE * 4;
4294     - mc_req_size = OLAND_MC_UCODE_SIZE * 4;
4295     + mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
4296     smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
4297     break;
4298     default: BUG();
4299     @@ -1659,16 +1664,22 @@ static int si_init_microcode(struct radeon_device *rdev)
4300     err = -EINVAL;
4301     }
4302    
4303     - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
4304     + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
4305     err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
4306     - if (err)
4307     - goto out;
4308     - if (rdev->mc_fw->size != mc_req_size) {
4309     + if (err) {
4310     + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
4311     + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
4312     + if (err)
4313     + goto out;
4314     + }
4315     + if ((rdev->mc_fw->size != mc_req_size) &&
4316     + (rdev->mc_fw->size != mc2_req_size)) {
4317     printk(KERN_ERR
4318     "si_mc: Bogus length %zu in firmware \"%s\"\n",
4319     rdev->mc_fw->size, fw_name);
4320     err = -EINVAL;
4321     }
4322     + DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
4323    
4324     snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
4325     err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
4326     @@ -5777,7 +5788,6 @@ int si_irq_set(struct radeon_device *rdev)
4327     u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
4328     u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
4329     u32 grbm_int_cntl = 0;
4330     - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
4331     u32 dma_cntl, dma_cntl1;
4332     u32 thermal_int = 0;
4333    
4334     @@ -5916,16 +5926,22 @@ int si_irq_set(struct radeon_device *rdev)
4335     }
4336    
4337     if (rdev->num_crtc >= 2) {
4338     - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
4339     - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
4340     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
4341     + GRPH_PFLIP_INT_MASK);
4342     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
4343     + GRPH_PFLIP_INT_MASK);
4344     }
4345     if (rdev->num_crtc >= 4) {
4346     - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
4347     - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
4348     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
4349     + GRPH_PFLIP_INT_MASK);
4350     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
4351     + GRPH_PFLIP_INT_MASK);
4352     }
4353     if (rdev->num_crtc >= 6) {
4354     - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
4355     - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
4356     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
4357     + GRPH_PFLIP_INT_MASK);
4358     + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
4359     + GRPH_PFLIP_INT_MASK);
4360     }
4361    
4362     if (!ASIC_IS_NODCE(rdev)) {
4363     @@ -6289,6 +6305,15 @@ restart_ih:
4364     break;
4365     }
4366     break;
4367     + case 8: /* D1 page flip */
4368     + case 10: /* D2 page flip */
4369     + case 12: /* D3 page flip */
4370     + case 14: /* D4 page flip */
4371     + case 16: /* D5 page flip */
4372     + case 18: /* D6 page flip */
4373     + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
4374     + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
4375     + break;
4376     case 42: /* HPD hotplug */
4377     switch (src_data) {
4378     case 0:
4379     diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
4380     index d4a68af1a279..c310a0aeebb9 100644
4381     --- a/drivers/gpu/drm/radeon/uvd_v1_0.c
4382     +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
4383     @@ -83,7 +83,10 @@ int uvd_v1_0_init(struct radeon_device *rdev)
4384     int r;
4385    
4386     /* raise clocks while booting up the VCPU */
4387     - radeon_set_uvd_clocks(rdev, 53300, 40000);
4388     + if (rdev->family < CHIP_RV740)
4389     + radeon_set_uvd_clocks(rdev, 10000, 10000);
4390     + else
4391     + radeon_set_uvd_clocks(rdev, 53300, 40000);
4392    
4393     r = uvd_v1_0_start(rdev);
4394     if (r)
4395     @@ -407,7 +410,10 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
4396     struct radeon_fence *fence = NULL;
4397     int r;
4398    
4399     - r = radeon_set_uvd_clocks(rdev, 53300, 40000);
4400     + if (rdev->family < CHIP_RV740)
4401     + r = radeon_set_uvd_clocks(rdev, 10000, 10000);
4402     + else
4403     + r = radeon_set_uvd_clocks(rdev, 53300, 40000);
4404     if (r) {
4405     DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
4406     return r;
4407     diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
4408     index db9017adfe2b..498b37e39058 100644
4409     --- a/drivers/gpu/host1x/hw/intr_hw.c
4410     +++ b/drivers/gpu/host1x/hw/intr_hw.c
4411     @@ -47,7 +47,7 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
4412     unsigned long reg;
4413     int i, id;
4414    
4415     - for (i = 0; i <= BIT_WORD(host->info->nb_pts); i++) {
4416     + for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
4417     reg = host1x_sync_readl(host,
4418     HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
4419     for_each_set_bit(id, &reg, BITS_PER_LONG) {
4420     @@ -64,7 +64,7 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
4421     {
4422     u32 i;
4423    
4424     - for (i = 0; i <= BIT_WORD(host->info->nb_pts); ++i) {
4425     + for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
4426     host1x_sync_writel(host, 0xffffffffu,
4427     HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
4428     host1x_sync_writel(host, 0xffffffffu,
4429     diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
4430     index bbb0b0d463f7..15993103c1a2 100644
4431     --- a/drivers/hwmon/coretemp.c
4432     +++ b/drivers/hwmon/coretemp.c
4433     @@ -369,12 +369,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
4434     if (cpu_has_tjmax(c))
4435     dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
4436     } else {
4437     - val = (eax >> 16) & 0x7f;
4438     + val = (eax >> 16) & 0xff;
4439     /*
4440     * If the TjMax is not plausible, an assumption
4441     * will be used
4442     */
4443     - if (val >= 85) {
4444     + if (val) {
4445     dev_dbg(dev, "TjMax is %d degrees C\n", val);
4446     return val * 1000;
4447     }
4448     diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
4449     index 90ec1173b8a1..01723f04fe45 100644
4450     --- a/drivers/hwmon/emc1403.c
4451     +++ b/drivers/hwmon/emc1403.c
4452     @@ -163,7 +163,7 @@ static ssize_t store_hyst(struct device *dev,
4453     if (retval < 0)
4454     goto fail;
4455    
4456     - hyst = val - retval * 1000;
4457     + hyst = retval * 1000 - val;
4458     hyst = DIV_ROUND_CLOSEST(hyst, 1000);
4459     if (hyst < 0 || hyst > 255) {
4460     retval = -ERANGE;
4461     @@ -330,7 +330,7 @@ static int emc1403_detect(struct i2c_client *client,
4462     }
4463    
4464     id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
4465     - if (id != 0x01)
4466     + if (id < 0x01 || id > 0x04)
4467     return -ENODEV;
4468    
4469     return 0;
4470     @@ -355,9 +355,9 @@ static int emc1403_probe(struct i2c_client *client,
4471     if (id->driver_data)
4472     data->groups[1] = &emc1404_group;
4473    
4474     - hwmon_dev = hwmon_device_register_with_groups(&client->dev,
4475     - client->name, data,
4476     - data->groups);
4477     + hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
4478     + client->name, data,
4479     + data->groups);
4480     if (IS_ERR(hwmon_dev))
4481     return PTR_ERR(hwmon_dev);
4482    
4483     diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
4484     index de17c5593d97..c5eec0290b37 100644
4485     --- a/drivers/i2c/busses/Kconfig
4486     +++ b/drivers/i2c/busses/Kconfig
4487     @@ -110,6 +110,7 @@ config I2C_I801
4488     Wellsburg (PCH)
4489     Coleto Creek (PCH)
4490     Wildcat Point-LP (PCH)
4491     + BayTrail (SOC)
4492    
4493     This driver can also be built as a module. If so, the module
4494     will be called i2c-i801.
4495     diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
4496     index 14c4b30d4ccc..d95b93016b0d 100644
4497     --- a/drivers/i2c/busses/i2c-designware-core.c
4498     +++ b/drivers/i2c/busses/i2c-designware-core.c
4499     @@ -417,6 +417,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
4500     */
4501     dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
4502    
4503     + /* enforce disabled interrupts (due to HW issues) */
4504     + i2c_dw_disable_int(dev);
4505     +
4506     /* Enable the adapter */
4507     __i2c_dw_enable(dev, true);
4508    
4509     diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
4510     index 349c2d35e792..899f55937ca6 100644
4511     --- a/drivers/i2c/busses/i2c-i801.c
4512     +++ b/drivers/i2c/busses/i2c-i801.c
4513     @@ -60,6 +60,7 @@
4514     Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes
4515     Coleto Creek (PCH) 0x23b0 32 hard yes yes yes
4516     Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
4517     + BayTrail (SOC) 0x0f12 32 hard yes yes yes
4518    
4519     Features supported by this driver:
4520     Software PEC no
4521     @@ -161,6 +162,7 @@
4522     STATUS_ERROR_FLAGS)
4523    
4524     /* Older devices have their ID defined in <linux/pci_ids.h> */
4525     +#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
4526     #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
4527     #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
4528     /* Patsburg also has three 'Integrated Device Function' SMBus controllers */
4529     @@ -822,6 +824,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
4530     { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
4531     { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) },
4532     { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
4533     + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) },
4534     { 0, }
4535     };
4536    
4537     diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
4538     index 0282d4d42805..d3c5d6216575 100644
4539     --- a/drivers/i2c/busses/i2c-rcar.c
4540     +++ b/drivers/i2c/busses/i2c-rcar.c
4541     @@ -561,6 +561,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
4542    
4543     ret = -EINVAL;
4544     for (i = 0; i < num; i++) {
4545     + /* This HW can't send STOP after address phase */
4546     + if (msgs[i].len == 0) {
4547     + ret = -EOPNOTSUPP;
4548     + break;
4549     + }
4550     +
4551     /*-------------- spin lock -----------------*/
4552     spin_lock_irqsave(&priv->lock, flags);
4553    
4554     @@ -625,7 +631,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
4555    
4556     static u32 rcar_i2c_func(struct i2c_adapter *adap)
4557     {
4558     - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
4559     + /* This HW can't do SMBUS_QUICK and NOSTART */
4560     + return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
4561     }
4562    
4563     static const struct i2c_algorithm rcar_i2c_algo = {
4564     diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
4565     index 684d21e71e4a..93ec37649346 100644
4566     --- a/drivers/i2c/busses/i2c-s3c2410.c
4567     +++ b/drivers/i2c/busses/i2c-s3c2410.c
4568     @@ -1247,10 +1247,10 @@ static int s3c24xx_i2c_resume(struct device *dev)
4569     struct platform_device *pdev = to_platform_device(dev);
4570     struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
4571    
4572     - i2c->suspended = 0;
4573     clk_prepare_enable(i2c->clk);
4574     s3c24xx_i2c_init(i2c);
4575     clk_disable_unprepare(i2c->clk);
4576     + i2c->suspended = 0;
4577    
4578     return 0;
4579     }
4580     diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
4581     index df7f1e1157ae..27a91768cc72 100644
4582     --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
4583     +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
4584     @@ -661,6 +661,7 @@ static int inv_mpu_probe(struct i2c_client *client,
4585     {
4586     struct inv_mpu6050_state *st;
4587     struct iio_dev *indio_dev;
4588     + struct inv_mpu6050_platform_data *pdata;
4589     int result;
4590    
4591     if (!i2c_check_functionality(client->adapter,
4592     @@ -673,8 +674,10 @@ static int inv_mpu_probe(struct i2c_client *client,
4593    
4594     st = iio_priv(indio_dev);
4595     st->client = client;
4596     - st->plat_data = *(struct inv_mpu6050_platform_data
4597     - *)dev_get_platdata(&client->dev);
4598     + pdata = (struct inv_mpu6050_platform_data
4599     + *)dev_get_platdata(&client->dev);
4600     + if (pdata)
4601     + st->plat_data = *pdata;
4602     /* power is turned on inside check chip type*/
4603     result = inv_check_and_setup_chip(st, id);
4604     if (result)
4605     diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
4606     index f596b413a35b..51766b3bc678 100644
4607     --- a/drivers/infiniband/ulp/isert/ib_isert.c
4608     +++ b/drivers/infiniband/ulp/isert/ib_isert.c
4609     @@ -28,6 +28,7 @@
4610     #include <target/target_core_base.h>
4611     #include <target/target_core_fabric.h>
4612     #include <target/iscsi/iscsi_transport.h>
4613     +#include <linux/semaphore.h>
4614    
4615     #include "isert_proto.h"
4616     #include "ib_isert.h"
4617     @@ -582,11 +583,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
4618     goto out_conn_dev;
4619    
4620     mutex_lock(&isert_np->np_accept_mutex);
4621     - list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
4622     + list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
4623     mutex_unlock(&isert_np->np_accept_mutex);
4624    
4625     - pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
4626     - wake_up(&isert_np->np_accept_wq);
4627     + pr_debug("isert_connect_request() up np_sem np: %p\n", np);
4628     + up(&isert_np->np_sem);
4629     return 0;
4630    
4631     out_conn_dev:
4632     @@ -2569,7 +2570,7 @@ isert_setup_np(struct iscsi_np *np,
4633     pr_err("Unable to allocate struct isert_np\n");
4634     return -ENOMEM;
4635     }
4636     - init_waitqueue_head(&isert_np->np_accept_wq);
4637     + sema_init(&isert_np->np_sem, 0);
4638     mutex_init(&isert_np->np_accept_mutex);
4639     INIT_LIST_HEAD(&isert_np->np_accept_list);
4640     init_completion(&isert_np->np_login_comp);
4641     @@ -2618,18 +2619,6 @@ out:
4642     }
4643    
4644     static int
4645     -isert_check_accept_queue(struct isert_np *isert_np)
4646     -{
4647     - int empty;
4648     -
4649     - mutex_lock(&isert_np->np_accept_mutex);
4650     - empty = list_empty(&isert_np->np_accept_list);
4651     - mutex_unlock(&isert_np->np_accept_mutex);
4652     -
4653     - return empty;
4654     -}
4655     -
4656     -static int
4657     isert_rdma_accept(struct isert_conn *isert_conn)
4658     {
4659     struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
4660     @@ -2721,16 +2710,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
4661     int max_accept = 0, ret;
4662    
4663     accept_wait:
4664     - ret = wait_event_interruptible(isert_np->np_accept_wq,
4665     - !isert_check_accept_queue(isert_np) ||
4666     - np->np_thread_state == ISCSI_NP_THREAD_RESET);
4667     + ret = down_interruptible(&isert_np->np_sem);
4668     if (max_accept > 5)
4669     return -ENODEV;
4670    
4671     spin_lock_bh(&np->np_thread_lock);
4672     if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
4673     spin_unlock_bh(&np->np_thread_lock);
4674     - pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
4675     + pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
4676     return -ENODEV;
4677     }
4678     spin_unlock_bh(&np->np_thread_lock);
4679     diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
4680     index f6ae7f5dd408..c3578f6091d2 100644
4681     --- a/drivers/infiniband/ulp/isert/ib_isert.h
4682     +++ b/drivers/infiniband/ulp/isert/ib_isert.h
4683     @@ -158,7 +158,7 @@ struct isert_device {
4684     };
4685    
4686     struct isert_np {
4687     - wait_queue_head_t np_accept_wq;
4688     + struct semaphore np_sem;
4689     struct rdma_cm_id *np_cm_id;
4690     struct mutex np_accept_mutex;
4691     struct list_head np_accept_list;
4692     diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
4693     index 2626773ff29b..2dd1d0dd4f7d 100644
4694     --- a/drivers/input/keyboard/atkbd.c
4695     +++ b/drivers/input/keyboard/atkbd.c
4696     @@ -243,6 +243,12 @@ static void (*atkbd_platform_fixup)(struct atkbd *, const void *data);
4697     static void *atkbd_platform_fixup_data;
4698     static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int);
4699    
4700     +/*
4701     + * Certain keyboards to not like ATKBD_CMD_RESET_DIS and stop responding
4702     + * to many commands until full reset (ATKBD_CMD_RESET_BAT) is performed.
4703     + */
4704     +static bool atkbd_skip_deactivate;
4705     +
4706     static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
4707     ssize_t (*handler)(struct atkbd *, char *));
4708     static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
4709     @@ -768,7 +774,8 @@ static int atkbd_probe(struct atkbd *atkbd)
4710     * Make sure nothing is coming from the keyboard and disturbs our
4711     * internal state.
4712     */
4713     - atkbd_deactivate(atkbd);
4714     + if (!atkbd_skip_deactivate)
4715     + atkbd_deactivate(atkbd);
4716    
4717     return 0;
4718     }
4719     @@ -1638,6 +1645,12 @@ static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id)
4720     return 1;
4721     }
4722    
4723     +static int __init atkbd_deactivate_fixup(const struct dmi_system_id *id)
4724     +{
4725     + atkbd_skip_deactivate = true;
4726     + return 1;
4727     +}
4728     +
4729     static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
4730     {
4731     .matches = {
4732     @@ -1775,6 +1788,20 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
4733     .callback = atkbd_setup_scancode_fixup,
4734     .driver_data = atkbd_oqo_01plus_scancode_fixup,
4735     },
4736     + {
4737     + .matches = {
4738     + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
4739     + DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
4740     + },
4741     + .callback = atkbd_deactivate_fixup,
4742     + },
4743     + {
4744     + .matches = {
4745     + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
4746     + DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
4747     + },
4748     + .callback = atkbd_deactivate_fixup,
4749     + },
4750     { }
4751     };
4752    
4753     diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
4754     index ef1cf52f8bb9..230cdcf8e6fe 100644
4755     --- a/drivers/input/mouse/elantech.c
4756     +++ b/drivers/input/mouse/elantech.c
4757     @@ -11,6 +11,7 @@
4758     */
4759    
4760     #include <linux/delay.h>
4761     +#include <linux/dmi.h>
4762     #include <linux/slab.h>
4763     #include <linux/module.h>
4764     #include <linux/input.h>
4765     @@ -831,7 +832,11 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
4766     break;
4767    
4768     case 3:
4769     - etd->reg_10 = 0x0b;
4770     + if (etd->set_hw_resolution)
4771     + etd->reg_10 = 0x0b;
4772     + else
4773     + etd->reg_10 = 0x03;
4774     +
4775     if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
4776     rc = -1;
4777    
4778     @@ -1331,6 +1336,22 @@ static int elantech_reconnect(struct psmouse *psmouse)
4779     }
4780    
4781     /*
4782     + * Some hw_version 3 models go into error state when we try to set bit 3 of r10
4783     + */
4784     +static const struct dmi_system_id no_hw_res_dmi_table[] = {
4785     +#if defined(CONFIG_DMI) && defined(CONFIG_X86)
4786     + {
4787     + /* Gigabyte U2442 */
4788     + .matches = {
4789     + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
4790     + DMI_MATCH(DMI_PRODUCT_NAME, "U2442"),
4791     + },
4792     + },
4793     +#endif
4794     + { }
4795     +};
4796     +
4797     +/*
4798     * determine hardware version and set some properties according to it.
4799     */
4800     static int elantech_set_properties(struct elantech_data *etd)
4801     @@ -1389,6 +1410,9 @@ static int elantech_set_properties(struct elantech_data *etd)
4802     */
4803     etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
4804    
4805     + /* Enable real hardware resolution on hw_version 3 ? */
4806     + etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
4807     +
4808     return 0;
4809     }
4810    
4811     diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
4812     index 036a04abaef7..9e0e2a1f340d 100644
4813     --- a/drivers/input/mouse/elantech.h
4814     +++ b/drivers/input/mouse/elantech.h
4815     @@ -130,6 +130,7 @@ struct elantech_data {
4816     bool jumpy_cursor;
4817     bool reports_pressure;
4818     bool crc_enabled;
4819     + bool set_hw_resolution;
4820     unsigned char hw_version;
4821     unsigned int fw_version;
4822     unsigned int single_finger_reports;
4823     diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
4824     index 3c511c4adaca..c5ec703c727e 100644
4825     --- a/drivers/input/mouse/synaptics.c
4826     +++ b/drivers/input/mouse/synaptics.c
4827     @@ -117,6 +117,81 @@ void synaptics_reset(struct psmouse *psmouse)
4828     }
4829    
4830     #ifdef CONFIG_MOUSE_PS2_SYNAPTICS
4831     +struct min_max_quirk {
4832     + const char * const *pnp_ids;
4833     + int x_min, x_max, y_min, y_max;
4834     +};
4835     +
4836     +static const struct min_max_quirk min_max_pnpid_table[] = {
4837     + {
4838     + (const char * const []){"LEN0033", NULL},
4839     + 1024, 5052, 2258, 4832
4840     + },
4841     + {
4842     + (const char * const []){"LEN0035", "LEN0042", NULL},
4843     + 1232, 5710, 1156, 4696
4844     + },
4845     + {
4846     + (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
4847     + 1024, 5112, 2024, 4832
4848     + },
4849     + {
4850     + (const char * const []){"LEN2001", NULL},
4851     + 1024, 5022, 2508, 4832
4852     + },
4853     + { }
4854     +};
4855     +
4856     +/* This list has been kindly provided by Synaptics. */
4857     +static const char * const topbuttonpad_pnp_ids[] = {
4858     + "LEN0017",
4859     + "LEN0018",
4860     + "LEN0019",
4861     + "LEN0023",
4862     + "LEN002A",
4863     + "LEN002B",
4864     + "LEN002C",
4865     + "LEN002D",
4866     + "LEN002E",
4867     + "LEN0033", /* Helix */
4868     + "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
4869     + "LEN0035", /* X240 */
4870     + "LEN0036", /* T440 */
4871     + "LEN0037",
4872     + "LEN0038",
4873     + "LEN0041",
4874     + "LEN0042", /* Yoga */
4875     + "LEN0045",
4876     + "LEN0046",
4877     + "LEN0047",
4878     + "LEN0048",
4879     + "LEN0049",
4880     + "LEN2000",
4881     + "LEN2001", /* Edge E431 */
4882     + "LEN2002",
4883     + "LEN2003",
4884     + "LEN2004", /* L440 */
4885     + "LEN2005",
4886     + "LEN2006",
4887     + "LEN2007",
4888     + "LEN2008",
4889     + "LEN2009",
4890     + "LEN200A",
4891     + "LEN200B",
4892     + NULL
4893     +};
4894     +
4895     +static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
4896     +{
4897     + int i;
4898     +
4899     + if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
4900     + for (i = 0; ids[i]; i++)
4901     + if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
4902     + return true;
4903     +
4904     + return false;
4905     +}
4906    
4907     /*****************************************************************************
4908     * Synaptics communications functions
4909     @@ -266,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse)
4910     * Resolution is left zero if touchpad does not support the query
4911     */
4912    
4913     -static const int *quirk_min_max;
4914     -
4915     static int synaptics_resolution(struct psmouse *psmouse)
4916     {
4917     struct synaptics_data *priv = psmouse->private;
4918     unsigned char resp[3];
4919     + int i;
4920    
4921     - if (quirk_min_max) {
4922     - priv->x_min = quirk_min_max[0];
4923     - priv->x_max = quirk_min_max[1];
4924     - priv->y_min = quirk_min_max[2];
4925     - priv->y_max = quirk_min_max[3];
4926     - return 0;
4927     - }
4928     + for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
4929     + if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
4930     + priv->x_min = min_max_pnpid_table[i].x_min;
4931     + priv->x_max = min_max_pnpid_table[i].x_max;
4932     + priv->y_min = min_max_pnpid_table[i].y_min;
4933     + priv->y_max = min_max_pnpid_table[i].y_max;
4934     + return 0;
4935     + }
4936    
4937     if (SYN_ID_MAJOR(priv->identity) < 4)
4938     return 0;
4939     @@ -1255,8 +1330,10 @@ static void set_abs_position_params(struct input_dev *dev,
4940     input_abs_set_res(dev, y_code, priv->y_res);
4941     }
4942    
4943     -static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
4944     +static void set_input_params(struct psmouse *psmouse,
4945     + struct synaptics_data *priv)
4946     {
4947     + struct input_dev *dev = psmouse->dev;
4948     int i;
4949    
4950     /* Things that apply to both modes */
4951     @@ -1325,6 +1402,8 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
4952    
4953     if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
4954     __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
4955     + if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
4956     + __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
4957     /* Clickpads report only left button */
4958     __clear_bit(BTN_RIGHT, dev->keybit);
4959     __clear_bit(BTN_MIDDLE, dev->keybit);
4960     @@ -1496,104 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
4961     { }
4962     };
4963    
4964     -static const struct dmi_system_id min_max_dmi_table[] __initconst = {
4965     -#if defined(CONFIG_DMI)
4966     - {
4967     - /* Lenovo ThinkPad Helix */
4968     - .matches = {
4969     - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4970     - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
4971     - },
4972     - .driver_data = (int []){1024, 5052, 2258, 4832},
4973     - },
4974     - {
4975     - /* Lenovo ThinkPad X240 */
4976     - .matches = {
4977     - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4978     - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
4979     - },
4980     - .driver_data = (int []){1232, 5710, 1156, 4696},
4981     - },
4982     - {
4983     - /* Lenovo ThinkPad Edge E431 */
4984     - .matches = {
4985     - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4986     - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
4987     - },
4988     - .driver_data = (int []){1024, 5022, 2508, 4832},
4989     - },
4990     - {
4991     - /* Lenovo ThinkPad T431s */
4992     - .matches = {
4993     - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4994     - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
4995     - },
4996     - .driver_data = (int []){1024, 5112, 2024, 4832},
4997     - },
4998     - {
4999     - /* Lenovo ThinkPad T440s */
5000     - .matches = {
5001     - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
5002     - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
5003     - },
5004     - .driver_data = (int []){1024, 5112, 2024, 4832},
5005     - },
5006     - {
5007     - /* Lenovo ThinkPad L440 */
5008     - .matches = {
5009     - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
5010     - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
5011     - },
5012     - .driver_data = (int []){1024, 5112, 2024, 4832},
5013     - },
5014     - {
5015     - /* Lenovo ThinkPad T540p */
5016     - .matches = {
5017     - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
5018     - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
5019     - },
5020     - .driver_data = (int []){1024, 5056, 2058, 4832},
5021     - },
5022     - {
5023     - /* Lenovo ThinkPad L540 */
5024     - .matches = {
5025     - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
5026     - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
5027     - },
5028     - .driver_data = (int []){1024, 5112, 2024, 4832},
5029     - },
5030     - {
5031     - /* Lenovo Yoga S1 */
5032     - .matches = {
5033     - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
5034     - DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
5035     - "ThinkPad S1 Yoga"),
5036     - },
5037     - .driver_data = (int []){1232, 5710, 1156, 4696},
5038     - },
5039     - {
5040     - /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
5041     - .matches = {
5042     - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
5043     - DMI_MATCH(DMI_PRODUCT_VERSION,
5044     - "ThinkPad X1 Carbon 2nd"),
5045     - },
5046     - .driver_data = (int []){1024, 5112, 2024, 4832},
5047     - },
5048     -#endif
5049     - { }
5050     -};
5051     -
5052     void __init synaptics_module_init(void)
5053     {
5054     - const struct dmi_system_id *min_max_dmi;
5055     -
5056     impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
5057     broken_olpc_ec = dmi_check_system(olpc_dmi_table);
5058     -
5059     - min_max_dmi = dmi_first_match(min_max_dmi_table);
5060     - if (min_max_dmi)
5061     - quirk_min_max = min_max_dmi->driver_data;
5062     }
5063    
5064     static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
5065     @@ -1643,7 +1628,7 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
5066     priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
5067     priv->board_id, priv->firmware_id);
5068    
5069     - set_input_params(psmouse->dev, priv);
5070     + set_input_params(psmouse, priv);
5071    
5072     /*
5073     * Encode touchpad model so that it can be used to set
5074     diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
5075     index 0ec9abbe31fe..381b20d4c561 100644
5076     --- a/drivers/input/serio/i8042-x86ia64io.h
5077     +++ b/drivers/input/serio/i8042-x86ia64io.h
5078     @@ -702,6 +702,17 @@ static int i8042_pnp_aux_irq;
5079     static char i8042_pnp_kbd_name[32];
5080     static char i8042_pnp_aux_name[32];
5081    
5082     +static void i8042_pnp_id_to_string(struct pnp_id *id, char *dst, int dst_size)
5083     +{
5084     + strlcpy(dst, "PNP:", dst_size);
5085     +
5086     + while (id) {
5087     + strlcat(dst, " ", dst_size);
5088     + strlcat(dst, id->id, dst_size);
5089     + id = id->next;
5090     + }
5091     +}
5092     +
5093     static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *did)
5094     {
5095     if (pnp_port_valid(dev, 0) && pnp_port_len(dev, 0) == 1)
5096     @@ -718,6 +729,8 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
5097     strlcat(i8042_pnp_kbd_name, ":", sizeof(i8042_pnp_kbd_name));
5098     strlcat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name));
5099     }
5100     + i8042_pnp_id_to_string(dev->id, i8042_kbd_firmware_id,
5101     + sizeof(i8042_kbd_firmware_id));
5102    
5103     /* Keyboard ports are always supposed to be wakeup-enabled */
5104     device_set_wakeup_enable(&dev->dev, true);
5105     @@ -742,6 +755,8 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
5106     strlcat(i8042_pnp_aux_name, ":", sizeof(i8042_pnp_aux_name));
5107     strlcat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name));
5108     }
5109     + i8042_pnp_id_to_string(dev->id, i8042_aux_firmware_id,
5110     + sizeof(i8042_aux_firmware_id));
5111    
5112     i8042_pnp_aux_devices++;
5113     return 0;
5114     diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
5115     index 020053fa5aaa..3807c3e971cc 100644
5116     --- a/drivers/input/serio/i8042.c
5117     +++ b/drivers/input/serio/i8042.c
5118     @@ -87,6 +87,8 @@ MODULE_PARM_DESC(debug, "Turn i8042 debugging mode on and off");
5119     #endif
5120    
5121     static bool i8042_bypass_aux_irq_test;
5122     +static char i8042_kbd_firmware_id[128];
5123     +static char i8042_aux_firmware_id[128];
5124    
5125     #include "i8042.h"
5126    
5127     @@ -1218,6 +1220,8 @@ static int __init i8042_create_kbd_port(void)
5128     serio->dev.parent = &i8042_platform_device->dev;
5129     strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
5130     strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
5131     + strlcpy(serio->firmware_id, i8042_kbd_firmware_id,
5132     + sizeof(serio->firmware_id));
5133    
5134     port->serio = serio;
5135     port->irq = I8042_KBD_IRQ;
5136     @@ -1244,6 +1248,8 @@ static int __init i8042_create_aux_port(int idx)
5137     if (idx < 0) {
5138     strlcpy(serio->name, "i8042 AUX port", sizeof(serio->name));
5139     strlcpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys));
5140     + strlcpy(serio->firmware_id, i8042_aux_firmware_id,
5141     + sizeof(serio->firmware_id));
5142     serio->close = i8042_port_close;
5143     } else {
5144     snprintf(serio->name, sizeof(serio->name), "i8042 AUX%d port", idx);
5145     diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
5146     index 8f4c4ab04bc2..b29134de983b 100644
5147     --- a/drivers/input/serio/serio.c
5148     +++ b/drivers/input/serio/serio.c
5149     @@ -451,6 +451,13 @@ static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *
5150     return retval;
5151     }
5152    
5153     +static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf)
5154     +{
5155     + struct serio *serio = to_serio_port(dev);
5156     +
5157     + return sprintf(buf, "%s\n", serio->firmware_id);
5158     +}
5159     +
5160     static DEVICE_ATTR_RO(type);
5161     static DEVICE_ATTR_RO(proto);
5162     static DEVICE_ATTR_RO(id);
5163     @@ -473,12 +480,14 @@ static DEVICE_ATTR_RO(modalias);
5164     static DEVICE_ATTR_WO(drvctl);
5165     static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
5166     static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
5167     +static DEVICE_ATTR_RO(firmware_id);
5168    
5169     static struct attribute *serio_device_attrs[] = {
5170     &dev_attr_modalias.attr,
5171     &dev_attr_description.attr,
5172     &dev_attr_drvctl.attr,
5173     &dev_attr_bind_mode.attr,
5174     + &dev_attr_firmware_id.attr,
5175     NULL
5176     };
5177    
5178     @@ -921,9 +930,14 @@ static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
5179     SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
5180     SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
5181     SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
5182     +
5183     SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
5184     serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
5185    
5186     + if (serio->firmware_id[0])
5187     + SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s",
5188     + serio->firmware_id);
5189     +
5190     return 0;
5191     }
5192     #undef SERIO_ADD_UEVENT_VAR
5193     diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
5194     index faf0da4bb3a2..71776ff5aedc 100644
5195     --- a/drivers/iommu/amd_iommu.c
5196     +++ b/drivers/iommu/amd_iommu.c
5197     @@ -3999,7 +3999,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
5198     iommu_flush_dte(iommu, devid);
5199     if (devid != alias) {
5200     irq_lookup_table[alias] = table;
5201     - set_dte_irq_entry(devid, table);
5202     + set_dte_irq_entry(alias, table);
5203     iommu_flush_dte(iommu, alias);
5204     }
5205    
5206     diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
5207     index 540956465ed2..939eb0d8fbf1 100644
5208     --- a/drivers/irqchip/irq-armada-370-xp.c
5209     +++ b/drivers/irqchip/irq-armada-370-xp.c
5210     @@ -130,8 +130,7 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
5211     struct msi_desc *desc)
5212     {
5213     struct msi_msg msg;
5214     - irq_hw_number_t hwirq;
5215     - int virq;
5216     + int virq, hwirq;
5217    
5218     hwirq = armada_370_xp_alloc_msi();
5219     if (hwirq < 0)
5220     @@ -157,8 +156,19 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
5221     unsigned int irq)
5222     {
5223     struct irq_data *d = irq_get_irq_data(irq);
5224     + unsigned long hwirq = d->hwirq;
5225     +
5226     irq_dispose_mapping(irq);
5227     - armada_370_xp_free_msi(d->hwirq);
5228     + armada_370_xp_free_msi(hwirq);
5229     +}
5230     +
5231     +static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev,
5232     + int nvec, int type)
5233     +{
5234     + /* We support MSI, but not MSI-X */
5235     + if (type == PCI_CAP_ID_MSI)
5236     + return 0;
5237     + return -EINVAL;
5238     }
5239    
5240     static struct irq_chip armada_370_xp_msi_irq_chip = {
5241     @@ -199,6 +209,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
5242    
5243     msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
5244     msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
5245     + msi_chip->check_device = armada_370_xp_check_msi_device;
5246     msi_chip->of_node = node;
5247    
5248     armada_370_xp_msi_domain =
5249     diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
5250     index 341c6016812d..ac2d41bd71a0 100644
5251     --- a/drivers/irqchip/irq-gic.c
5252     +++ b/drivers/irqchip/irq-gic.c
5253     @@ -246,10 +246,14 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
5254     bool force)
5255     {
5256     void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
5257     - unsigned int shift = (gic_irq(d) % 4) * 8;
5258     - unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
5259     + unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
5260     u32 val, mask, bit;
5261    
5262     + if (!force)
5263     + cpu = cpumask_any_and(mask_val, cpu_online_mask);
5264     + else
5265     + cpu = cpumask_first(mask_val);
5266     +
5267     if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
5268     return -EINVAL;
5269    
5270     diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
5271     index 605047428b5a..a7b369fc3554 100644
5272     --- a/drivers/leds/leds-pwm.c
5273     +++ b/drivers/leds/leds-pwm.c
5274     @@ -84,6 +84,15 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds)
5275     (sizeof(struct led_pwm_data) * num_leds);
5276     }
5277    
5278     +static void led_pwm_cleanup(struct led_pwm_priv *priv)
5279     +{
5280     + while (priv->num_leds--) {
5281     + led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
5282     + if (priv->leds[priv->num_leds].can_sleep)
5283     + cancel_work_sync(&priv->leds[priv->num_leds].work);
5284     + }
5285     +}
5286     +
5287     static int led_pwm_create_of(struct platform_device *pdev,
5288     struct led_pwm_priv *priv)
5289     {
5290     @@ -131,8 +140,7 @@ static int led_pwm_create_of(struct platform_device *pdev,
5291    
5292     return 0;
5293     err:
5294     - while (priv->num_leds--)
5295     - led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
5296     + led_pwm_cleanup(priv);
5297    
5298     return ret;
5299     }
5300     @@ -200,8 +208,8 @@ static int led_pwm_probe(struct platform_device *pdev)
5301     return 0;
5302    
5303     err:
5304     - while (i--)
5305     - led_classdev_unregister(&priv->leds[i].cdev);
5306     + priv->num_leds = i;
5307     + led_pwm_cleanup(priv);
5308    
5309     return ret;
5310     }
5311     @@ -209,13 +217,8 @@ err:
5312     static int led_pwm_remove(struct platform_device *pdev)
5313     {
5314     struct led_pwm_priv *priv = platform_get_drvdata(pdev);
5315     - int i;
5316    
5317     - for (i = 0; i < priv->num_leds; i++) {
5318     - led_classdev_unregister(&priv->leds[i].cdev);
5319     - if (priv->leds[i].can_sleep)
5320     - cancel_work_sync(&priv->leds[i].work);
5321     - }
5322     + led_pwm_cleanup(priv);
5323    
5324     return 0;
5325     }
5326     diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
5327     index b82b58f0bb85..9306e0bcb564 100644
5328     --- a/drivers/md/dm-cache-target.c
5329     +++ b/drivers/md/dm-cache-target.c
5330     @@ -2506,6 +2506,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
5331    
5332     } else {
5333     inc_hit_counter(cache, bio);
5334     + pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
5335    
5336     if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
5337     !is_dirty(cache, lookup_result.cblock))
5338     diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
5339     index 784695d22fde..53b213226c01 100644
5340     --- a/drivers/md/dm-crypt.c
5341     +++ b/drivers/md/dm-crypt.c
5342     @@ -19,7 +19,6 @@
5343     #include <linux/crypto.h>
5344     #include <linux/workqueue.h>
5345     #include <linux/backing-dev.h>
5346     -#include <linux/percpu.h>
5347     #include <linux/atomic.h>
5348     #include <linux/scatterlist.h>
5349     #include <asm/page.h>
5350     @@ -43,6 +42,7 @@ struct convert_context {
5351     struct bvec_iter iter_out;
5352     sector_t cc_sector;
5353     atomic_t cc_pending;
5354     + struct ablkcipher_request *req;
5355     };
5356    
5357     /*
5358     @@ -111,15 +111,7 @@ struct iv_tcw_private {
5359     enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
5360    
5361     /*
5362     - * Duplicated per-CPU state for cipher.
5363     - */
5364     -struct crypt_cpu {
5365     - struct ablkcipher_request *req;
5366     -};
5367     -
5368     -/*
5369     - * The fields in here must be read only after initialization,
5370     - * changing state should be in crypt_cpu.
5371     + * The fields in here must be read only after initialization.
5372     */
5373     struct crypt_config {
5374     struct dm_dev *dev;
5375     @@ -150,12 +142,6 @@ struct crypt_config {
5376     sector_t iv_offset;
5377     unsigned int iv_size;
5378    
5379     - /*
5380     - * Duplicated per cpu state. Access through
5381     - * per_cpu_ptr() only.
5382     - */
5383     - struct crypt_cpu __percpu *cpu;
5384     -
5385     /* ESSIV: struct crypto_cipher *essiv_tfm */
5386     void *iv_private;
5387     struct crypto_ablkcipher **tfms;
5388     @@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
5389     static void kcryptd_queue_crypt(struct dm_crypt_io *io);
5390     static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
5391    
5392     -static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
5393     -{
5394     - return this_cpu_ptr(cc->cpu);
5395     -}
5396     -
5397     /*
5398     * Use this to access cipher attributes that are the same for each CPU.
5399     */
5400     @@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
5401     static void crypt_alloc_req(struct crypt_config *cc,
5402     struct convert_context *ctx)
5403     {
5404     - struct crypt_cpu *this_cc = this_crypt_config(cc);
5405     unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
5406    
5407     - if (!this_cc->req)
5408     - this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
5409     + if (!ctx->req)
5410     + ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
5411    
5412     - ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
5413     - ablkcipher_request_set_callback(this_cc->req,
5414     + ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
5415     + ablkcipher_request_set_callback(ctx->req,
5416     CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
5417     - kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
5418     + kcryptd_async_done, dmreq_of_req(cc, ctx->req));
5419     }
5420    
5421     /*
5422     @@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
5423     static int crypt_convert(struct crypt_config *cc,
5424     struct convert_context *ctx)
5425     {
5426     - struct crypt_cpu *this_cc = this_crypt_config(cc);
5427     int r;
5428    
5429     atomic_set(&ctx->cc_pending, 1);
5430     @@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc,
5431    
5432     atomic_inc(&ctx->cc_pending);
5433    
5434     - r = crypt_convert_block(cc, ctx, this_cc->req);
5435     + r = crypt_convert_block(cc, ctx, ctx->req);
5436    
5437     switch (r) {
5438     /* async */
5439     @@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc,
5440     reinit_completion(&ctx->restart);
5441     /* fall through*/
5442     case -EINPROGRESS:
5443     - this_cc->req = NULL;
5444     + ctx->req = NULL;
5445     ctx->cc_sector++;
5446     continue;
5447    
5448     @@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
5449     io->sector = sector;
5450     io->error = 0;
5451     io->base_io = NULL;
5452     + io->ctx.req = NULL;
5453     atomic_set(&io->io_pending, 0);
5454    
5455     return io;
5456     @@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
5457     if (!atomic_dec_and_test(&io->io_pending))
5458     return;
5459    
5460     + if (io->ctx.req)
5461     + mempool_free(io->ctx.req, cc->req_pool);
5462     mempool_free(io, cc->io_pool);
5463    
5464     if (likely(!base_io))
5465     @@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
5466     static void crypt_dtr(struct dm_target *ti)
5467     {
5468     struct crypt_config *cc = ti->private;
5469     - struct crypt_cpu *cpu_cc;
5470     - int cpu;
5471    
5472     ti->private = NULL;
5473    
5474     @@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti)
5475     if (cc->crypt_queue)
5476     destroy_workqueue(cc->crypt_queue);
5477    
5478     - if (cc->cpu)
5479     - for_each_possible_cpu(cpu) {
5480     - cpu_cc = per_cpu_ptr(cc->cpu, cpu);
5481     - if (cpu_cc->req)
5482     - mempool_free(cpu_cc->req, cc->req_pool);
5483     - }
5484     -
5485     crypt_free_tfms(cc);
5486    
5487     if (cc->bs)
5488     @@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti)
5489     if (cc->dev)
5490     dm_put_device(ti, cc->dev);
5491    
5492     - if (cc->cpu)
5493     - free_percpu(cc->cpu);
5494     -
5495     kzfree(cc->cipher);
5496     kzfree(cc->cipher_string);
5497    
5498     @@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
5499     if (tmp)
5500     DMWARN("Ignoring unexpected additional cipher options");
5501    
5502     - cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
5503     - __alignof__(struct crypt_cpu));
5504     - if (!cc->cpu) {
5505     - ti->error = "Cannot allocate per cpu state";
5506     - goto bad_mem;
5507     - }
5508     -
5509     /*
5510     * For compatibility with the original dm-crypt mapping format, if
5511     * only the cipher name is supplied, use cbc-plain.
5512     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
5513     index 60cc50685c14..fa602b56b648 100644
5514     --- a/drivers/md/dm-thin.c
5515     +++ b/drivers/md/dm-thin.c
5516     @@ -25,6 +25,7 @@
5517     #define MAPPING_POOL_SIZE 1024
5518     #define PRISON_CELLS 1024
5519     #define COMMIT_PERIOD HZ
5520     +#define NO_SPACE_TIMEOUT (HZ * 60)
5521    
5522     DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
5523     "A percentage of time allocated for copy on write");
5524     @@ -173,6 +174,7 @@ struct pool {
5525     struct workqueue_struct *wq;
5526     struct work_struct worker;
5527     struct delayed_work waker;
5528     + struct delayed_work no_space_timeout;
5529    
5530     unsigned long last_commit_jiffies;
5531     unsigned ref_count;
5532     @@ -920,7 +922,7 @@ static int commit(struct pool *pool)
5533     {
5534     int r;
5535    
5536     - if (get_pool_mode(pool) != PM_WRITE)
5537     + if (get_pool_mode(pool) >= PM_READ_ONLY)
5538     return -EINVAL;
5539    
5540     r = dm_pool_commit_metadata(pool->pmd);
5541     @@ -1449,6 +1451,20 @@ static void do_waker(struct work_struct *ws)
5542     queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
5543     }
5544    
5545     +/*
5546     + * We're holding onto IO to allow userland time to react. After the
5547     + * timeout either the pool will have been resized (and thus back in
5548     + * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
5549     + */
5550     +static void do_no_space_timeout(struct work_struct *ws)
5551     +{
5552     + struct pool *pool = container_of(to_delayed_work(ws), struct pool,
5553     + no_space_timeout);
5554     +
5555     + if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
5556     + set_pool_mode(pool, PM_READ_ONLY);
5557     +}
5558     +
5559     /*----------------------------------------------------------------*/
5560    
5561     struct noflush_work {
5562     @@ -1574,6 +1590,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
5563     pool->process_discard = process_discard;
5564     pool->process_prepared_mapping = process_prepared_mapping;
5565     pool->process_prepared_discard = process_prepared_discard_passdown;
5566     +
5567     + if (!pool->pf.error_if_no_space)
5568     + queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT);
5569     break;
5570    
5571     case PM_WRITE:
5572     @@ -1956,6 +1975,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
5573    
5574     INIT_WORK(&pool->worker, do_worker);
5575     INIT_DELAYED_WORK(&pool->waker, do_waker);
5576     + INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
5577     spin_lock_init(&pool->lock);
5578     bio_list_init(&pool->deferred_bios);
5579     bio_list_init(&pool->deferred_flush_bios);
5580     @@ -2519,6 +2539,7 @@ static void pool_postsuspend(struct dm_target *ti)
5581     struct pool *pool = pt->pool;
5582    
5583     cancel_delayed_work(&pool->waker);
5584     + cancel_delayed_work(&pool->no_space_timeout);
5585     flush_workqueue(pool->wq);
5586     (void) commit(pool);
5587     }
5588     diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
5589     index 796007a5e0e1..7a7bab8947ae 100644
5590     --- a/drivers/md/dm-verity.c
5591     +++ b/drivers/md/dm-verity.c
5592     @@ -330,15 +330,17 @@ test_block_hash:
5593     return r;
5594     }
5595     }
5596     -
5597     todo = 1 << v->data_dev_block_bits;
5598     - while (io->iter.bi_size) {
5599     + do {
5600     u8 *page;
5601     + unsigned len;
5602     struct bio_vec bv = bio_iter_iovec(bio, io->iter);
5603    
5604     page = kmap_atomic(bv.bv_page);
5605     - r = crypto_shash_update(desc, page + bv.bv_offset,
5606     - bv.bv_len);
5607     + len = bv.bv_len;
5608     + if (likely(len >= todo))
5609     + len = todo;
5610     + r = crypto_shash_update(desc, page + bv.bv_offset, len);
5611     kunmap_atomic(page);
5612    
5613     if (r < 0) {
5614     @@ -346,8 +348,9 @@ test_block_hash:
5615     return r;
5616     }
5617    
5618     - bio_advance_iter(bio, &io->iter, bv.bv_len);
5619     - }
5620     + bio_advance_iter(bio, &io->iter, len);
5621     + todo -= len;
5622     + } while (todo);
5623    
5624     if (!v->version) {
5625     r = crypto_shash_update(desc, v->salt, v->salt_size);
5626     diff --git a/drivers/md/md.c b/drivers/md/md.c
5627     index 4ad5cc4e63e8..51c431c3a411 100644
5628     --- a/drivers/md/md.c
5629     +++ b/drivers/md/md.c
5630     @@ -8530,7 +8530,8 @@ static int md_notify_reboot(struct notifier_block *this,
5631     if (mddev_trylock(mddev)) {
5632     if (mddev->pers)
5633     __md_stop_writes(mddev);
5634     - mddev->safemode = 2;
5635     + if (mddev->persistent)
5636     + mddev->safemode = 2;
5637     mddev_unlock(mddev);
5638     }
5639     need_delay = 1;
5640     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
5641     index 33fc408e5eac..cb882aae9e20 100644
5642     --- a/drivers/md/raid10.c
5643     +++ b/drivers/md/raid10.c
5644     @@ -1172,6 +1172,13 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
5645     int max_sectors;
5646     int sectors;
5647    
5648     + /*
5649     + * Register the new request and wait if the reconstruction
5650     + * thread has put up a bar for new requests.
5651     + * Continue immediately if no resync is active currently.
5652     + */
5653     + wait_barrier(conf);
5654     +
5655     sectors = bio_sectors(bio);
5656     while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
5657     bio->bi_iter.bi_sector < conf->reshape_progress &&
5658     @@ -1552,12 +1559,6 @@ static void make_request(struct mddev *mddev, struct bio *bio)
5659    
5660     md_write_start(mddev, bio);
5661    
5662     - /*
5663     - * Register the new request and wait if the reconstruction
5664     - * thread has put up a bar for new requests.
5665     - * Continue immediately if no resync is active currently.
5666     - */
5667     - wait_barrier(conf);
5668    
5669     do {
5670    
5671     diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
5672     index e8a1ce204036..cdd7c1b7259b 100644
5673     --- a/drivers/media/i2c/ov7670.c
5674     +++ b/drivers/media/i2c/ov7670.c
5675     @@ -1109,7 +1109,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
5676     * windows that fall outside that.
5677     */
5678     for (i = 0; i < n_win_sizes; i++) {
5679     - struct ov7670_win_size *win = &info->devtype->win_sizes[index];
5680     + struct ov7670_win_size *win = &info->devtype->win_sizes[i];
5681     if (info->min_width && win->width < info->min_width)
5682     continue;
5683     if (info->min_height && win->height < info->min_height)
5684     diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
5685     index d5a7a135f75d..703560fa5e73 100644
5686     --- a/drivers/media/media-device.c
5687     +++ b/drivers/media/media-device.c
5688     @@ -93,6 +93,7 @@ static long media_device_enum_entities(struct media_device *mdev,
5689     struct media_entity *ent;
5690     struct media_entity_desc u_ent;
5691    
5692     + memset(&u_ent, 0, sizeof(u_ent));
5693     if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
5694     return -EFAULT;
5695    
5696     diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
5697     index 3aecaf465094..f0c9c42867de 100644
5698     --- a/drivers/media/tuners/fc2580.c
5699     +++ b/drivers/media/tuners/fc2580.c
5700     @@ -195,7 +195,7 @@ static int fc2580_set_params(struct dvb_frontend *fe)
5701    
5702     f_ref = 2UL * priv->cfg->clock / r_val;
5703     n_val = div_u64_rem(f_vco, f_ref, &k_val);
5704     - k_val_reg = 1UL * k_val * (1 << 20) / f_ref;
5705     + k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref);
5706    
5707     ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff));
5708     if (ret < 0)
5709     @@ -348,8 +348,8 @@ static int fc2580_set_params(struct dvb_frontend *fe)
5710     if (ret < 0)
5711     goto err;
5712    
5713     - ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \
5714     - fc2580_if_filter_lut[i].mul / 1000000000);
5715     + ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock *
5716     + fc2580_if_filter_lut[i].mul, 1000000000));
5717     if (ret < 0)
5718     goto err;
5719    
5720     diff --git a/drivers/media/tuners/fc2580_priv.h b/drivers/media/tuners/fc2580_priv.h
5721     index be38a9e637e0..646c99452136 100644
5722     --- a/drivers/media/tuners/fc2580_priv.h
5723     +++ b/drivers/media/tuners/fc2580_priv.h
5724     @@ -22,6 +22,7 @@
5725     #define FC2580_PRIV_H
5726    
5727     #include "fc2580.h"
5728     +#include <linux/math64.h>
5729    
5730     struct fc2580_reg_val {
5731     u8 reg;
5732     diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
5733     index b63a5e584aa0..fca336b65351 100644
5734     --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
5735     +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
5736     @@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
5737    
5738     static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
5739     {
5740     + if (get_user(kp->type, &up->type))
5741     + return -EFAULT;
5742     +
5743     switch (kp->type) {
5744     case V4L2_BUF_TYPE_VIDEO_CAPTURE:
5745     case V4L2_BUF_TYPE_VIDEO_OUTPUT:
5746     @@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
5747    
5748     static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
5749     {
5750     - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
5751     - get_user(kp->type, &up->type))
5752     - return -EFAULT;
5753     + if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
5754     + return -EFAULT;
5755     return __get_v4l2_format32(kp, up);
5756     }
5757    
5758     static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
5759     {
5760     if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
5761     - copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
5762     - return -EFAULT;
5763     + copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
5764     + return -EFAULT;
5765     return __get_v4l2_format32(&kp->format, &up->format);
5766     }
5767    
5768     diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
5769     index 110c03627051..b59a17fb7c3e 100644
5770     --- a/drivers/memory/mvebu-devbus.c
5771     +++ b/drivers/memory/mvebu-devbus.c
5772     @@ -108,8 +108,19 @@ static int devbus_set_timing_params(struct devbus *devbus,
5773     node->full_name);
5774     return err;
5775     }
5776     - /* Convert bit width to byte width */
5777     - r.bus_width /= 8;
5778     +
5779     + /*
5780     + * The bus width is encoded into the register as 0 for 8 bits,
5781     + * and 1 for 16 bits, so we do the necessary conversion here.
5782     + */
5783     + if (r.bus_width == 8)
5784     + r.bus_width = 0;
5785     + else if (r.bus_width == 16)
5786     + r.bus_width = 1;
5787     + else {
5788     + dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width);
5789     + return -EINVAL;
5790     + }
5791    
5792     err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
5793     &r.badr_skew);
5794     diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
5795     index 6d91933c4cdd..0af6e060e238 100644
5796     --- a/drivers/net/ethernet/intel/e1000e/netdev.c
5797     +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
5798     @@ -2976,11 +2976,21 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
5799     u32 rctl, rfctl;
5800     u32 pages = 0;
5801    
5802     - /* Workaround Si errata on PCHx - configure jumbo frame flow */
5803     - if ((hw->mac.type >= e1000_pch2lan) &&
5804     - (adapter->netdev->mtu > ETH_DATA_LEN) &&
5805     - e1000_lv_jumbo_workaround_ich8lan(hw, true))
5806     - e_dbg("failed to enable jumbo frame workaround mode\n");
5807     + /* Workaround Si errata on PCHx - configure jumbo frame flow.
5808     + * If jumbo frames not set, program related MAC/PHY registers
5809     + * to h/w defaults
5810     + */
5811     + if (hw->mac.type >= e1000_pch2lan) {
5812     + s32 ret_val;
5813     +
5814     + if (adapter->netdev->mtu > ETH_DATA_LEN)
5815     + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
5816     + else
5817     + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
5818     +
5819     + if (ret_val)
5820     + e_dbg("failed to enable|disable jumbo frame workaround mode\n");
5821     + }
5822    
5823     /* Program MC offset vector base */
5824     rctl = er32(RCTL);
5825     diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
5826     index b9d1c1c8ca5a..178506a201d5 100644
5827     --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
5828     +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
5829     @@ -1776,7 +1776,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
5830     u32 v_retval, u8 *msg, u16 msglen)
5831     {
5832     struct i40e_hw *hw = &pf->hw;
5833     - int local_vf_id = vf_id - hw->func_caps.vf_base_id;
5834     + unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
5835     struct i40e_vf *vf;
5836     int ret;
5837    
5838     diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
5839     index 46d31a49f5ea..d9c7eb279141 100644
5840     --- a/drivers/net/ethernet/intel/igb/igb_main.c
5841     +++ b/drivers/net/ethernet/intel/igb/igb_main.c
5842     @@ -1014,6 +1014,12 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
5843     {
5844     struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
5845    
5846     + /* Coming from igb_set_interrupt_capability, the vectors are not yet
5847     + * allocated. So, q_vector is NULL so we should stop here.
5848     + */
5849     + if (!q_vector)
5850     + return;
5851     +
5852     if (q_vector->tx.ring)
5853     adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
5854    
5855     @@ -1121,6 +1127,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
5856    
5857     /* If we can't do MSI-X, try MSI */
5858     msi_only:
5859     + adapter->flags &= ~IGB_FLAG_HAS_MSIX;
5860     #ifdef CONFIG_PCI_IOV
5861     /* disable SR-IOV for non MSI-X configurations */
5862     if (adapter->vf_data) {
5863     diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
5864     index 925034b80e9c..93598cd7ee6a 100644
5865     --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
5866     +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
5867     @@ -426,6 +426,12 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
5868     bool blocked;
5869     int err;
5870    
5871     + if (!wl->ucode.bcm43xx_bomminor) {
5872     + err = brcms_request_fw(wl, wl->wlc->hw->d11core);
5873     + if (err)
5874     + return -ENOENT;
5875     + }
5876     +
5877     ieee80211_wake_queues(hw);
5878     spin_lock_bh(&wl->lock);
5879     blocked = brcms_rfkill_set_hw_state(wl);
5880     @@ -433,14 +439,6 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
5881     if (!blocked)
5882     wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
5883    
5884     - if (!wl->ucode.bcm43xx_bomminor) {
5885     - err = brcms_request_fw(wl, wl->wlc->hw->d11core);
5886     - if (err) {
5887     - brcms_remove(wl->wlc->hw->d11core);
5888     - return -ENOENT;
5889     - }
5890     - }
5891     -
5892     spin_lock_bh(&wl->lock);
5893     /* avoid acknowledging frames before a non-monitor device is added */
5894     wl->mute_tx = true;
5895     diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
5896     index 2a59da2ff87a..e1d546665ae8 100644
5897     --- a/drivers/net/wireless/iwlwifi/iwl-7000.c
5898     +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
5899     @@ -67,8 +67,8 @@
5900     #include "iwl-agn-hw.h"
5901    
5902     /* Highest firmware API version supported */
5903     -#define IWL7260_UCODE_API_MAX 8
5904     -#define IWL3160_UCODE_API_MAX 8
5905     +#define IWL7260_UCODE_API_MAX 9
5906     +#define IWL3160_UCODE_API_MAX 9
5907    
5908     /* Oldest version we won't warn about */
5909     #define IWL7260_UCODE_API_OK 7
5910     @@ -223,3 +223,4 @@ const struct iwl_cfg iwl7265_n_cfg = {
5911    
5912     MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
5913     MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
5914     +MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
5915     diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
5916     index 18a895a949d4..6b22681068a7 100644
5917     --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
5918     +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
5919     @@ -188,7 +188,7 @@ static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
5920     cpu_to_le32(0xcc00aaaa),
5921     cpu_to_le32(0x0000aaaa),
5922     cpu_to_le32(0xc0004000),
5923     - cpu_to_le32(0x00000000),
5924     + cpu_to_le32(0x00004000),
5925     cpu_to_le32(0xf0005000),
5926     cpu_to_le32(0xf0005000),
5927     },
5928     @@ -211,16 +211,16 @@ static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
5929     /* Tx Tx disabled */
5930     cpu_to_le32(0xaaaaaaaa),
5931     cpu_to_le32(0xaaaaaaaa),
5932     - cpu_to_le32(0xaaaaaaaa),
5933     + cpu_to_le32(0xeeaaaaaa),
5934     cpu_to_le32(0xaaaaaaaa),
5935     cpu_to_le32(0xcc00ff28),
5936     cpu_to_le32(0x0000aaaa),
5937     cpu_to_le32(0xcc00aaaa),
5938     cpu_to_le32(0x0000aaaa),
5939     - cpu_to_le32(0xC0004000),
5940     - cpu_to_le32(0xC0004000),
5941     - cpu_to_le32(0xF0005000),
5942     - cpu_to_le32(0xF0005000),
5943     + cpu_to_le32(0xc0004000),
5944     + cpu_to_le32(0xc0004000),
5945     + cpu_to_le32(0xf0005000),
5946     + cpu_to_le32(0xf0005000),
5947     },
5948     };
5949    
5950     diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
5951     index 9a856e5031f1..d06414ef15c5 100644
5952     --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
5953     +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
5954     @@ -606,7 +606,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
5955     if (ret)
5956     goto out_remove_mac;
5957    
5958     - if (!mvm->bf_allowed_vif &&
5959     + if (!mvm->bf_allowed_vif && false &&
5960     vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
5961     mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
5962     mvm->bf_allowed_vif = mvmvif;
5963     @@ -796,7 +796,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
5964     memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
5965     len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
5966    
5967     - ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
5968     + ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
5969     if (ret)
5970     IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
5971     }
5972     @@ -812,7 +812,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
5973     if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
5974     return;
5975    
5976     - ieee80211_iterate_active_interfaces(
5977     + ieee80211_iterate_active_interfaces_atomic(
5978     mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
5979     iwl_mvm_mc_iface_iterator, &iter_data);
5980     }
5981     @@ -971,6 +971,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
5982     */
5983     iwl_mvm_remove_time_event(mvm, mvmvif,
5984     &mvmvif->time_event_data);
5985     + iwl_mvm_sf_update(mvm, vif, false);
5986     } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
5987     BSS_CHANGED_QOS)) {
5988     ret = iwl_mvm_power_update_mode(mvm, vif);
5989     diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
5990     index 5bc871513505..03179d0b08c2 100644
5991     --- a/drivers/net/wireless/iwlwifi/mvm/rs.c
5992     +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
5993     @@ -59,7 +59,7 @@
5994     /* max allowed rate miss before sync LQ cmd */
5995     #define IWL_MISSED_RATE_MAX 15
5996     #define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ)
5997     -
5998     +#define RS_IDLE_TIMEOUT (5*HZ)
5999    
6000     static u8 rs_ht_to_legacy[] = {
6001     [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
6002     @@ -142,7 +142,7 @@ enum rs_column_mode {
6003     RS_MIMO2,
6004     };
6005    
6006     -#define MAX_NEXT_COLUMNS 5
6007     +#define MAX_NEXT_COLUMNS 7
6008     #define MAX_COLUMN_CHECKS 3
6009    
6010     typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
6011     @@ -212,8 +212,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
6012     RS_COLUMN_LEGACY_ANT_B,
6013     RS_COLUMN_SISO_ANT_A,
6014     RS_COLUMN_SISO_ANT_B,
6015     - RS_COLUMN_MIMO2,
6016     - RS_COLUMN_MIMO2_SGI,
6017     + RS_COLUMN_INVALID,
6018     + RS_COLUMN_INVALID,
6019     + RS_COLUMN_INVALID,
6020     + RS_COLUMN_INVALID,
6021     },
6022     },
6023     [RS_COLUMN_LEGACY_ANT_B] = {
6024     @@ -223,8 +225,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
6025     RS_COLUMN_LEGACY_ANT_A,
6026     RS_COLUMN_SISO_ANT_A,
6027     RS_COLUMN_SISO_ANT_B,
6028     - RS_COLUMN_MIMO2,
6029     - RS_COLUMN_MIMO2_SGI,
6030     + RS_COLUMN_INVALID,
6031     + RS_COLUMN_INVALID,
6032     + RS_COLUMN_INVALID,
6033     + RS_COLUMN_INVALID,
6034     },
6035     },
6036     [RS_COLUMN_SISO_ANT_A] = {
6037     @@ -235,7 +239,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
6038     RS_COLUMN_MIMO2,
6039     RS_COLUMN_SISO_ANT_A_SGI,
6040     RS_COLUMN_SISO_ANT_B_SGI,
6041     - RS_COLUMN_MIMO2_SGI,
6042     + RS_COLUMN_LEGACY_ANT_A,
6043     + RS_COLUMN_LEGACY_ANT_B,
6044     + RS_COLUMN_INVALID,
6045     },
6046     .checks = {
6047     rs_siso_allow,
6048     @@ -249,7 +255,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
6049     RS_COLUMN_MIMO2,
6050     RS_COLUMN_SISO_ANT_B_SGI,
6051     RS_COLUMN_SISO_ANT_A_SGI,
6052     - RS_COLUMN_MIMO2_SGI,
6053     + RS_COLUMN_LEGACY_ANT_A,
6054     + RS_COLUMN_LEGACY_ANT_B,
6055     + RS_COLUMN_INVALID,
6056     },
6057     .checks = {
6058     rs_siso_allow,
6059     @@ -265,6 +273,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
6060     RS_COLUMN_SISO_ANT_A,
6061     RS_COLUMN_SISO_ANT_B,
6062     RS_COLUMN_MIMO2,
6063     + RS_COLUMN_LEGACY_ANT_A,
6064     + RS_COLUMN_LEGACY_ANT_B,
6065     },
6066     .checks = {
6067     rs_siso_allow,
6068     @@ -281,6 +291,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
6069     RS_COLUMN_SISO_ANT_B,
6070     RS_COLUMN_SISO_ANT_A,
6071     RS_COLUMN_MIMO2,
6072     + RS_COLUMN_LEGACY_ANT_A,
6073     + RS_COLUMN_LEGACY_ANT_B,
6074     },
6075     .checks = {
6076     rs_siso_allow,
6077     @@ -296,6 +308,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
6078     RS_COLUMN_SISO_ANT_A_SGI,
6079     RS_COLUMN_SISO_ANT_B_SGI,
6080     RS_COLUMN_MIMO2_SGI,
6081     + RS_COLUMN_LEGACY_ANT_A,
6082     + RS_COLUMN_LEGACY_ANT_B,
6083     },
6084     .checks = {
6085     rs_mimo_allow,
6086     @@ -311,6 +325,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
6087     RS_COLUMN_SISO_ANT_A,
6088     RS_COLUMN_SISO_ANT_B,
6089     RS_COLUMN_MIMO2,
6090     + RS_COLUMN_LEGACY_ANT_A,
6091     + RS_COLUMN_LEGACY_ANT_B,
6092     },
6093     .checks = {
6094     rs_mimo_allow,
6095     @@ -503,6 +519,16 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
6096     window->average_tpt = IWL_INVALID_VALUE;
6097     }
6098    
6099     +static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
6100     + struct iwl_scale_tbl_info *tbl)
6101     +{
6102     + int i;
6103     +
6104     + IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
6105     + for (i = 0; i < IWL_RATE_COUNT; i++)
6106     + rs_rate_scale_clear_window(&tbl->win[i]);
6107     +}
6108     +
6109     static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
6110     {
6111     return (ant_type & valid_antenna) == ant_type;
6112     @@ -975,6 +1001,13 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
6113     return;
6114     }
6115    
6116     +#ifdef CPTCFG_MAC80211_DEBUGFS
6117     + /* Disable last tx check if we are debugging with fixed rate */
6118     + if (lq_sta->dbg_fixed_rate) {
6119     + IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
6120     + return;
6121     + }
6122     +#endif
6123     if (!ieee80211_is_data(hdr->frame_control) ||
6124     info->flags & IEEE80211_TX_CTL_NO_ACK)
6125     return;
6126     @@ -1017,6 +1050,18 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
6127     mac_index++;
6128     }
6129    
6130     + if (time_after(jiffies,
6131     + (unsigned long)(lq_sta->last_tx + RS_IDLE_TIMEOUT))) {
6132     + int tid;
6133     + IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
6134     + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
6135     + ieee80211_stop_tx_ba_session(sta, tid);
6136     +
6137     + iwl_mvm_rs_rate_init(mvm, sta, sband->band, false);
6138     + return;
6139     + }
6140     + lq_sta->last_tx = jiffies;
6141     +
6142     /* Here we actually compare this rate to the latest LQ command */
6143     if ((mac_index < 0) ||
6144     (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
6145     @@ -1362,7 +1407,6 @@ static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
6146     static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
6147     {
6148     struct iwl_scale_tbl_info *tbl;
6149     - int i;
6150     int active_tbl;
6151     int flush_interval_passed = 0;
6152     struct iwl_mvm *mvm;
6153     @@ -1423,9 +1467,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
6154    
6155     IWL_DEBUG_RATE(mvm,
6156     "LQ: stay in table clear win\n");
6157     - for (i = 0; i < IWL_RATE_COUNT; i++)
6158     - rs_rate_scale_clear_window(
6159     - &(tbl->win[i]));
6160     + rs_rate_scale_clear_tbl_windows(mvm, tbl);
6161     }
6162     }
6163    
6164     @@ -1433,9 +1475,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
6165     * bitmaps and stats in active table (this will become the new
6166     * "search" table). */
6167     if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
6168     - IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
6169     - for (i = 0; i < IWL_RATE_COUNT; i++)
6170     - rs_rate_scale_clear_window(&(tbl->win[i]));
6171     + rs_rate_scale_clear_tbl_windows(mvm, tbl);
6172     }
6173     }
6174     }
6175     @@ -1628,85 +1668,76 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
6176     {
6177     enum rs_action action = RS_ACTION_STAY;
6178    
6179     - /* Too many failures, decrease rate */
6180     if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) {
6181     IWL_DEBUG_RATE(mvm,
6182     - "decrease rate because of low SR\n");
6183     - action = RS_ACTION_DOWNSCALE;
6184     - /* No throughput measured yet for adjacent rates; try increase. */
6185     - } else if ((low_tpt == IWL_INVALID_VALUE) &&
6186     - (high_tpt == IWL_INVALID_VALUE)) {
6187     - if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) {
6188     - IWL_DEBUG_RATE(mvm,
6189     - "Good SR and no high rate measurement. "
6190     - "Increase rate\n");
6191     - action = RS_ACTION_UPSCALE;
6192     - } else if (low != IWL_RATE_INVALID) {
6193     - IWL_DEBUG_RATE(mvm,
6194     - "Remain in current rate\n");
6195     - action = RS_ACTION_STAY;
6196     - }
6197     + "Decrease rate because of low SR\n");
6198     + return RS_ACTION_DOWNSCALE;
6199     }
6200    
6201     - /* Both adjacent throughputs are measured, but neither one has better
6202     - * throughput; we're using the best rate, don't change it!
6203     - */
6204     - else if ((low_tpt != IWL_INVALID_VALUE) &&
6205     - (high_tpt != IWL_INVALID_VALUE) &&
6206     - (low_tpt < current_tpt) &&
6207     - (high_tpt < current_tpt)) {
6208     + if ((low_tpt == IWL_INVALID_VALUE) &&
6209     + (high_tpt == IWL_INVALID_VALUE) &&
6210     + (high != IWL_RATE_INVALID)) {
6211     IWL_DEBUG_RATE(mvm,
6212     - "Both high and low are worse. "
6213     - "Maintain rate\n");
6214     - action = RS_ACTION_STAY;
6215     + "No data about high/low rates. Increase rate\n");
6216     + return RS_ACTION_UPSCALE;
6217     }
6218    
6219     - /* At least one adjacent rate's throughput is measured,
6220     - * and may have better performance.
6221     - */
6222     - else {
6223     - /* Higher adjacent rate's throughput is measured */
6224     - if (high_tpt != IWL_INVALID_VALUE) {
6225     - /* Higher rate has better throughput */
6226     - if (high_tpt > current_tpt &&
6227     - sr >= IWL_RATE_INCREASE_TH) {
6228     - IWL_DEBUG_RATE(mvm,
6229     - "Higher rate is better and good "
6230     - "SR. Increate rate\n");
6231     - action = RS_ACTION_UPSCALE;
6232     - } else {
6233     - IWL_DEBUG_RATE(mvm,
6234     - "Higher rate isn't better OR "
6235     - "no good SR. Maintain rate\n");
6236     - action = RS_ACTION_STAY;
6237     - }
6238     + if ((high_tpt == IWL_INVALID_VALUE) &&
6239     + (high != IWL_RATE_INVALID) &&
6240     + (low_tpt != IWL_INVALID_VALUE) &&
6241     + (low_tpt < current_tpt)) {
6242     + IWL_DEBUG_RATE(mvm,
6243     + "No data about high rate and low rate is worse. Increase rate\n");
6244     + return RS_ACTION_UPSCALE;
6245     + }
6246    
6247     - /* Lower adjacent rate's throughput is measured */
6248     - } else if (low_tpt != IWL_INVALID_VALUE) {
6249     - /* Lower rate has better throughput */
6250     - if (low_tpt > current_tpt) {
6251     - IWL_DEBUG_RATE(mvm,
6252     - "Lower rate is better. "
6253     - "Decrease rate\n");
6254     - action = RS_ACTION_DOWNSCALE;
6255     - } else if (sr >= IWL_RATE_INCREASE_TH) {
6256     - IWL_DEBUG_RATE(mvm,
6257     - "Lower rate isn't better and "
6258     - "good SR. Increase rate\n");
6259     - action = RS_ACTION_UPSCALE;
6260     - }
6261     - }
6262     + if ((high_tpt != IWL_INVALID_VALUE) &&
6263     + (high_tpt > current_tpt)) {
6264     + IWL_DEBUG_RATE(mvm,
6265     + "Higher rate is better. Increate rate\n");
6266     + return RS_ACTION_UPSCALE;
6267     }
6268    
6269     - /* Sanity check; asked for decrease, but success rate or throughput
6270     - * has been good at old rate. Don't change it.
6271     - */
6272     - if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID) &&
6273     - ((sr > IWL_RATE_HIGH_TH) ||
6274     - (current_tpt > (100 * tbl->expected_tpt[low])))) {
6275     + if ((low_tpt != IWL_INVALID_VALUE) &&
6276     + (high_tpt != IWL_INVALID_VALUE) &&
6277     + (low_tpt < current_tpt) &&
6278     + (high_tpt < current_tpt)) {
6279     + IWL_DEBUG_RATE(mvm,
6280     + "Both high and low are worse. Maintain rate\n");
6281     + return RS_ACTION_STAY;
6282     + }
6283     +
6284     + if ((low_tpt != IWL_INVALID_VALUE) &&
6285     + (low_tpt > current_tpt)) {
6286     IWL_DEBUG_RATE(mvm,
6287     - "Sanity check failed. Maintain rate\n");
6288     - action = RS_ACTION_STAY;
6289     + "Lower rate is better\n");
6290     + action = RS_ACTION_DOWNSCALE;
6291     + goto out;
6292     + }
6293     +
6294     + if ((low_tpt == IWL_INVALID_VALUE) &&
6295     + (low != IWL_RATE_INVALID)) {
6296     + IWL_DEBUG_RATE(mvm,
6297     + "No data about lower rate\n");
6298     + action = RS_ACTION_DOWNSCALE;
6299     + goto out;
6300     + }
6301     +
6302     + IWL_DEBUG_RATE(mvm, "Maintain rate\n");
6303     +
6304     +out:
6305     + if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) {
6306     + if (sr >= RS_SR_NO_DECREASE) {
6307     + IWL_DEBUG_RATE(mvm,
6308     + "SR is above NO DECREASE. Avoid downscale\n");
6309     + action = RS_ACTION_STAY;
6310     + } else if (current_tpt > (100 * tbl->expected_tpt[low])) {
6311     + IWL_DEBUG_RATE(mvm,
6312     + "Current TPT is higher than max expected in low rate. Avoid downscale\n");
6313     + action = RS_ACTION_STAY;
6314     + } else {
6315     + IWL_DEBUG_RATE(mvm, "Decrease rate\n");
6316     + }
6317     }
6318    
6319     return action;
6320     @@ -1725,7 +1756,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
6321     int low = IWL_RATE_INVALID;
6322     int high = IWL_RATE_INVALID;
6323     int index;
6324     - int i;
6325     struct iwl_rate_scale_data *window = NULL;
6326     int current_tpt = IWL_INVALID_VALUE;
6327     int low_tpt = IWL_INVALID_VALUE;
6328     @@ -1781,6 +1811,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
6329     "Aggregation changed: prev %d current %d. Update expected TPT table\n",
6330     prev_agg, lq_sta->is_agg);
6331     rs_set_expected_tpt_table(lq_sta, tbl);
6332     + rs_rate_scale_clear_tbl_windows(mvm, tbl);
6333     }
6334    
6335     /* current tx rate */
6336     @@ -2010,8 +2041,7 @@ lq_update:
6337     if (lq_sta->search_better_tbl) {
6338     /* Access the "search" table, clear its history. */
6339     tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
6340     - for (i = 0; i < IWL_RATE_COUNT; i++)
6341     - rs_rate_scale_clear_window(&(tbl->win[i]));
6342     + rs_rate_scale_clear_tbl_windows(mvm, tbl);
6343    
6344     /* Use new "search" start rate */
6345     index = tbl->rate.index;
6346     @@ -2032,8 +2062,18 @@ lq_update:
6347     * stay with best antenna legacy modulation for a while
6348     * before next round of mode comparisons. */
6349     tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
6350     - if (is_legacy(&tbl1->rate) && !sta->ht_cap.ht_supported) {
6351     + if (is_legacy(&tbl1->rate)) {
6352     IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
6353     +
6354     + if (tid != IWL_MAX_TID_COUNT) {
6355     + tid_data = &sta_priv->tid_data[tid];
6356     + if (tid_data->state != IWL_AGG_OFF) {
6357     + IWL_DEBUG_RATE(mvm,
6358     + "Stop aggregation on tid %d\n",
6359     + tid);
6360     + ieee80211_stop_tx_ba_session(sta, tid);
6361     + }
6362     + }
6363     rs_set_stay_in_table(mvm, 1, lq_sta);
6364     } else {
6365     /* If we're in an HT mode, and all 3 mode switch actions
6366     @@ -2265,10 +2305,10 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
6367     lq_sta->lq.sta_id = sta_priv->sta_id;
6368    
6369     for (j = 0; j < LQ_SIZE; j++)
6370     - for (i = 0; i < IWL_RATE_COUNT; i++)
6371     - rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
6372     + rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
6373    
6374     lq_sta->flush_timer = 0;
6375     + lq_sta->last_tx = jiffies;
6376    
6377     IWL_DEBUG_RATE(mvm,
6378     "LQ: *** rate scale station global init for station %d ***\n",
6379     @@ -2469,6 +2509,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
6380     if (is_siso(&rate)) {
6381     num_rates = RS_SECONDARY_SISO_NUM_RATES;
6382     num_retries = RS_SECONDARY_SISO_RETRIES;
6383     + lq_cmd->mimo_delim = index;
6384     } else if (is_legacy(&rate)) {
6385     num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
6386     num_retries = RS_LEGACY_RETRIES_PER_RATE;
6387     diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
6388     index 7bc6404f6986..f23b68b8eeff 100644
6389     --- a/drivers/net/wireless/iwlwifi/mvm/rs.h
6390     +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
6391     @@ -156,6 +156,7 @@ enum {
6392     #define IWL_RATE_HIGH_TH 10880 /* 85% */
6393     #define IWL_RATE_INCREASE_TH 6400 /* 50% */
6394     #define RS_SR_FORCE_DECREASE 1920 /* 15% */
6395     +#define RS_SR_NO_DECREASE 10880 /* 85% */
6396    
6397     #define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
6398     #define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
6399     @@ -310,6 +311,7 @@ struct iwl_lq_sta {
6400     u32 visited_columns; /* Bitmask marking which Tx columns were
6401     * explored during a search cycle
6402     */
6403     + u64 last_tx;
6404     bool is_vht;
6405     enum ieee80211_band band;
6406    
6407     diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
6408     index 8401627c0030..88809b2d1654 100644
6409     --- a/drivers/net/wireless/iwlwifi/mvm/sf.c
6410     +++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
6411     @@ -274,7 +274,8 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
6412     return -EINVAL;
6413     if (changed_vif->type != NL80211_IFTYPE_STATION) {
6414     new_state = SF_UNINIT;
6415     - } else if (changed_vif->bss_conf.assoc) {
6416     + } else if (changed_vif->bss_conf.assoc &&
6417     + changed_vif->bss_conf.dtim_period) {
6418     mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
6419     sta_id = mvmvif->ap_sta_id;
6420     new_state = SF_FULL_ON;
6421     diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
6422     index 3872ead75488..43e27a174430 100644
6423     --- a/drivers/net/wireless/iwlwifi/pcie/drv.c
6424     +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
6425     @@ -372,12 +372,14 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
6426     {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
6427     {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
6428     {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
6429     + {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)},
6430     {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
6431     {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
6432     {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
6433     {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
6434     {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
6435     {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
6436     + {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)},
6437     {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
6438     {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
6439     {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
6440     diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
6441     index f9507807b486..8d42fd9b0811 100644
6442     --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
6443     +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
6444     @@ -1563,6 +1563,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
6445     * PCI Tx retries from interfering with C3 CPU state */
6446     pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
6447    
6448     + trans->dev = &pdev->dev;
6449     + trans_pcie->pci_dev = pdev;
6450     + iwl_disable_interrupts(trans);
6451     +
6452     err = pci_enable_msi(pdev);
6453     if (err) {
6454     dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
6455     @@ -1574,8 +1578,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
6456     }
6457     }
6458    
6459     - trans->dev = &pdev->dev;
6460     - trans_pcie->pci_dev = pdev;
6461     trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
6462     trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
6463     snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
6464     @@ -1601,8 +1603,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
6465     goto out_pci_disable_msi;
6466     }
6467    
6468     - trans_pcie->inta_mask = CSR_INI_SET_MASK;
6469     -
6470     if (iwl_pcie_alloc_ict(trans))
6471     goto out_free_cmd_pool;
6472    
6473     @@ -1614,6 +1614,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
6474     goto out_free_ict;
6475     }
6476    
6477     + trans_pcie->inta_mask = CSR_INI_SET_MASK;
6478     +
6479     return trans;
6480    
6481     out_free_ict:
6482     diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
6483     index ddeb5a709aa3..a87ee9b6585a 100644
6484     --- a/drivers/net/wireless/rt2x00/rt2x00mac.c
6485     +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
6486     @@ -621,20 +621,18 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
6487     bss_conf->bssid);
6488    
6489     /*
6490     - * Update the beacon. This is only required on USB devices. PCI
6491     - * devices fetch beacons periodically.
6492     - */
6493     - if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
6494     - rt2x00queue_update_beacon(rt2x00dev, vif);
6495     -
6496     - /*
6497     * Start/stop beaconing.
6498     */
6499     if (changes & BSS_CHANGED_BEACON_ENABLED) {
6500     if (!bss_conf->enable_beacon && intf->enable_beacon) {
6501     - rt2x00queue_clear_beacon(rt2x00dev, vif);
6502     rt2x00dev->intf_beaconing--;
6503     intf->enable_beacon = false;
6504     + /*
6505     + * Clear beacon in the H/W for this vif. This is needed
6506     + * to disable beaconing on this particular interface
6507     + * and keep it running on other interfaces.
6508     + */
6509     + rt2x00queue_clear_beacon(rt2x00dev, vif);
6510    
6511     if (rt2x00dev->intf_beaconing == 0) {
6512     /*
6513     @@ -645,11 +643,15 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
6514     rt2x00queue_stop_queue(rt2x00dev->bcn);
6515     mutex_unlock(&intf->beacon_skb_mutex);
6516     }
6517     -
6518     -
6519     } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
6520     rt2x00dev->intf_beaconing++;
6521     intf->enable_beacon = true;
6522     + /*
6523     + * Upload beacon to the H/W. This is only required on
6524     + * USB devices. PCI devices fetch beacons periodically.
6525     + */
6526     + if (rt2x00_is_usb(rt2x00dev))
6527     + rt2x00queue_update_beacon(rt2x00dev, vif);
6528    
6529     if (rt2x00dev->intf_beaconing == 1) {
6530     /*
6531     diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
6532     index cd17c642e746..472e592b32ec 100644
6533     --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
6534     +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
6535     @@ -1001,7 +1001,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
6536     err = _rtl92cu_init_mac(hw);
6537     if (err) {
6538     RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n");
6539     - return err;
6540     + goto exit;
6541     }
6542     err = rtl92c_download_fw(hw);
6543     if (err) {
6544     diff --git a/drivers/of/irq.c b/drivers/of/irq.c
6545     index 9bcf2cf19357..ca0189308d72 100644
6546     --- a/drivers/of/irq.c
6547     +++ b/drivers/of/irq.c
6548     @@ -380,6 +380,32 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
6549     EXPORT_SYMBOL_GPL(of_irq_to_resource);
6550    
6551     /**
6552     + * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
6553     + * @dev: pointer to device tree node
6554     + * @index: zero-based index of the irq
6555     + *
6556     + * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
6557     + * is not yet created.
6558     + *
6559     + */
6560     +int of_irq_get(struct device_node *dev, int index)
6561     +{
6562     + int rc;
6563     + struct of_phandle_args oirq;
6564     + struct irq_domain *domain;
6565     +
6566     + rc = of_irq_parse_one(dev, index, &oirq);
6567     + if (rc)
6568     + return rc;
6569     +
6570     + domain = irq_find_host(oirq.np);
6571     + if (!domain)
6572     + return -EPROBE_DEFER;
6573     +
6574     + return irq_create_of_mapping(&oirq);
6575     +}
6576     +
6577     +/**
6578     * of_irq_count - Count the number of IRQs a node uses
6579     * @dev: pointer to device tree node
6580     */
6581     diff --git a/drivers/of/platform.c b/drivers/of/platform.c
6582     index 404d1daebefa..bd47fbc53dc9 100644
6583     --- a/drivers/of/platform.c
6584     +++ b/drivers/of/platform.c
6585     @@ -168,7 +168,9 @@ struct platform_device *of_device_alloc(struct device_node *np,
6586     rc = of_address_to_resource(np, i, res);
6587     WARN_ON(rc);
6588     }
6589     - WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq);
6590     + if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
6591     + pr_debug("not all legacy IRQ resources mapped for %s\n",
6592     + np->name);
6593     }
6594    
6595     dev->dev.of_node = of_node_get(np);
6596     diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
6597     index 05e352889868..483d9ad89705 100644
6598     --- a/drivers/pci/host/pci-mvebu.c
6599     +++ b/drivers/pci/host/pci-mvebu.c
6600     @@ -291,6 +291,58 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
6601     return PCIBIOS_SUCCESSFUL;
6602     }
6603    
6604     +/*
6605     + * Remove windows, starting from the largest ones to the smallest
6606     + * ones.
6607     + */
6608     +static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
6609     + phys_addr_t base, size_t size)
6610     +{
6611     + while (size) {
6612     + size_t sz = 1 << (fls(size) - 1);
6613     +
6614     + mvebu_mbus_del_window(base, sz);
6615     + base += sz;
6616     + size -= sz;
6617     + }
6618     +}
6619     +
6620     +/*
6621     + * MBus windows can only have a power of two size, but PCI BARs do not
6622     + * have this constraint. Therefore, we have to split the PCI BAR into
6623     + * areas each having a power of two size. We start from the largest
6624     + * one (i.e highest order bit set in the size).
6625     + */
6626     +static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
6627     + unsigned int target, unsigned int attribute,
6628     + phys_addr_t base, size_t size,
6629     + phys_addr_t remap)
6630     +{
6631     + size_t size_mapped = 0;
6632     +
6633     + while (size) {
6634     + size_t sz = 1 << (fls(size) - 1);
6635     + int ret;
6636     +
6637     + ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
6638     + sz, remap);
6639     + if (ret) {
6640     + dev_err(&port->pcie->pdev->dev,
6641     + "Could not create MBus window at 0x%x, size 0x%x: %d\n",
6642     + base, sz, ret);
6643     + mvebu_pcie_del_windows(port, base - size_mapped,
6644     + size_mapped);
6645     + return;
6646     + }
6647     +
6648     + size -= sz;
6649     + size_mapped += sz;
6650     + base += sz;
6651     + if (remap != MVEBU_MBUS_NO_REMAP)
6652     + remap += sz;
6653     + }
6654     +}
6655     +
6656     static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
6657     {
6658     phys_addr_t iobase;
6659     @@ -302,8 +354,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
6660    
6661     /* If a window was configured, remove it */
6662     if (port->iowin_base) {
6663     - mvebu_mbus_del_window(port->iowin_base,
6664     - port->iowin_size);
6665     + mvebu_pcie_del_windows(port, port->iowin_base,
6666     + port->iowin_size);
6667     port->iowin_base = 0;
6668     port->iowin_size = 0;
6669     }
6670     @@ -329,11 +381,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
6671     port->iowin_base = port->pcie->io.start + iobase;
6672     port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
6673     (port->bridge.iolimitupper << 16)) -
6674     - iobase);
6675     + iobase) + 1;
6676    
6677     - mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr,
6678     - port->iowin_base, port->iowin_size,
6679     - iobase);
6680     + mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
6681     + port->iowin_base, port->iowin_size,
6682     + iobase);
6683     }
6684    
6685     static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
6686     @@ -344,8 +396,8 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
6687    
6688     /* If a window was configured, remove it */
6689     if (port->memwin_base) {
6690     - mvebu_mbus_del_window(port->memwin_base,
6691     - port->memwin_size);
6692     + mvebu_pcie_del_windows(port, port->memwin_base,
6693     + port->memwin_size);
6694     port->memwin_base = 0;
6695     port->memwin_size = 0;
6696     }
6697     @@ -362,10 +414,11 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
6698     port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16);
6699     port->memwin_size =
6700     (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
6701     - port->memwin_base;
6702     + port->memwin_base + 1;
6703    
6704     - mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr,
6705     - port->memwin_base, port->memwin_size);
6706     + mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
6707     + port->memwin_base, port->memwin_size,
6708     + MVEBU_MBUS_NO_REMAP);
6709     }
6710    
6711     /*
6712     @@ -721,14 +774,21 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
6713    
6714     /*
6715     * On the PCI-to-PCI bridge side, the I/O windows must have at
6716     - * least a 64 KB size and be aligned on their size, and the
6717     - * memory windows must have at least a 1 MB size and be
6718     - * aligned on their size
6719     + * least a 64 KB size and the memory windows must have at
6720     + * least a 1 MB size. Moreover, MBus windows need to have a
6721     + * base address aligned on their size, and their size must be
6722     + * a power of two. This means that if the BAR doesn't have a
6723     + * power of two size, several MBus windows will actually be
6724     + * created. We need to ensure that the biggest MBus window
6725     + * (which will be the first one) is aligned on its size, which
6726     + * explains the rounddown_pow_of_two() being done here.
6727     */
6728     if (res->flags & IORESOURCE_IO)
6729     - return round_up(start, max_t(resource_size_t, SZ_64K, size));
6730     + return round_up(start, max_t(resource_size_t, SZ_64K,
6731     + rounddown_pow_of_two(size)));
6732     else if (res->flags & IORESOURCE_MEM)
6733     - return round_up(start, max_t(resource_size_t, SZ_1M, size));
6734     + return round_up(start, max_t(resource_size_t, SZ_1M,
6735     + rounddown_pow_of_two(size)));
6736     else
6737     return start;
6738     }
6739     diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
6740     index 58499277903a..6efc2ec5e4db 100644
6741     --- a/drivers/pci/hotplug/shpchp_ctrl.c
6742     +++ b/drivers/pci/hotplug/shpchp_ctrl.c
6743     @@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot)
6744     return WRONG_BUS_FREQUENCY;
6745     }
6746    
6747     - bsp = ctrl->pci_dev->bus->cur_bus_speed;
6748     - msp = ctrl->pci_dev->bus->max_bus_speed;
6749     + bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
6750     + msp = ctrl->pci_dev->subordinate->max_bus_speed;
6751    
6752     /* Check if there are other slots or devices on the same bus */
6753     if (!list_empty(&ctrl->pci_dev->subordinate->devices))
6754     diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
6755     index fdbc294821e6..33bb970980d2 100644
6756     --- a/drivers/pci/pci.c
6757     +++ b/drivers/pci/pci.c
6758     @@ -3043,7 +3043,8 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
6759     if (!pci_is_pcie(dev))
6760     return 1;
6761    
6762     - return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND);
6763     + return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
6764     + PCI_EXP_DEVSTA_TRPND);
6765     }
6766     EXPORT_SYMBOL(pci_wait_for_pending_transaction);
6767    
6768     @@ -3085,7 +3086,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
6769     return 0;
6770    
6771     /* Wait for Transaction Pending bit clean */
6772     - if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP))
6773     + if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP))
6774     goto clear;
6775    
6776     dev_err(&dev->dev, "transaction is not cleared; "
6777     diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
6778     index 9f611cbbc294..c31aa07b3ba5 100644
6779     --- a/drivers/pnp/pnpacpi/core.c
6780     +++ b/drivers/pnp/pnpacpi/core.c
6781     @@ -83,8 +83,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
6782     {
6783     struct acpi_device *acpi_dev;
6784     acpi_handle handle;
6785     - struct acpi_buffer buffer;
6786     - int ret;
6787     + int ret = 0;
6788    
6789     pnp_dbg(&dev->dev, "set resources\n");
6790    
6791     @@ -97,19 +96,26 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
6792     if (WARN_ON_ONCE(acpi_dev != dev->data))
6793     dev->data = acpi_dev;
6794    
6795     - ret = pnpacpi_build_resource_template(dev, &buffer);
6796     - if (ret)
6797     - return ret;
6798     - ret = pnpacpi_encode_resources(dev, &buffer);
6799     - if (ret) {
6800     + if (acpi_has_method(handle, METHOD_NAME__SRS)) {
6801     + struct acpi_buffer buffer;
6802     +
6803     + ret = pnpacpi_build_resource_template(dev, &buffer);
6804     + if (ret)
6805     + return ret;
6806     +
6807     + ret = pnpacpi_encode_resources(dev, &buffer);
6808     + if (!ret) {
6809     + acpi_status status;
6810     +
6811     + status = acpi_set_current_resources(handle, &buffer);
6812     + if (ACPI_FAILURE(status))
6813     + ret = -EIO;
6814     + }
6815     kfree(buffer.pointer);
6816     - return ret;
6817     }
6818     - if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer)))
6819     - ret = -EINVAL;
6820     - else if (acpi_bus_power_manageable(handle))
6821     + if (!ret && acpi_bus_power_manageable(handle))
6822     ret = acpi_bus_set_power(handle, ACPI_STATE_D0);
6823     - kfree(buffer.pointer);
6824     +
6825     return ret;
6826     }
6827    
6828     @@ -117,7 +123,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
6829     {
6830     struct acpi_device *acpi_dev;
6831     acpi_handle handle;
6832     - int ret;
6833     + acpi_status status;
6834    
6835     dev_dbg(&dev->dev, "disable resources\n");
6836    
6837     @@ -128,13 +134,15 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
6838     }
6839    
6840     /* acpi_unregister_gsi(pnp_irq(dev, 0)); */
6841     - ret = 0;
6842     if (acpi_bus_power_manageable(handle))
6843     acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
6844     - /* continue even if acpi_bus_set_power() fails */
6845     - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL)))
6846     - ret = -ENODEV;
6847     - return ret;
6848     +
6849     + /* continue even if acpi_bus_set_power() fails */
6850     + status = acpi_evaluate_object(handle, "_DIS", NULL, NULL);
6851     + if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
6852     + return -ENODEV;
6853     +
6854     + return 0;
6855     }
6856    
6857     #ifdef CONFIG_ACPI_SLEEP
6858     diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
6859     index bd628a6f981d..e5f13c4310fe 100644
6860     --- a/drivers/rtc/rtc-hym8563.c
6861     +++ b/drivers/rtc/rtc-hym8563.c
6862     @@ -569,6 +569,9 @@ static int hym8563_probe(struct i2c_client *client,
6863     if (IS_ERR(hym8563->rtc))
6864     return PTR_ERR(hym8563->rtc);
6865    
6866     + /* the hym8563 alarm only supports a minute accuracy */
6867     + hym8563->rtc->uie_unsupported = 1;
6868     +
6869     #ifdef CONFIG_COMMON_CLK
6870     hym8563_clkout_register_clk(hym8563);
6871     #endif
6872     diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
6873     index d0b28bba38be..fbf3b22efe5a 100644
6874     --- a/drivers/spi/spi.c
6875     +++ b/drivers/spi/spi.c
6876     @@ -1568,7 +1568,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
6877     */
6878     int spi_setup(struct spi_device *spi)
6879     {
6880     - unsigned bad_bits;
6881     + unsigned bad_bits, ugly_bits;
6882     int status = 0;
6883    
6884     /* check mode to prevent that DUAL and QUAD set at the same time
6885     @@ -1588,6 +1588,15 @@ int spi_setup(struct spi_device *spi)
6886     * that aren't supported with their current master
6887     */
6888     bad_bits = spi->mode & ~spi->master->mode_bits;
6889     + ugly_bits = bad_bits &
6890     + (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
6891     + if (ugly_bits) {
6892     + dev_warn(&spi->dev,
6893     + "setup: ignoring unsupported mode bits %x\n",
6894     + ugly_bits);
6895     + spi->mode &= ~ugly_bits;
6896     + bad_bits &= ~ugly_bits;
6897     + }
6898     if (bad_bits) {
6899     dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
6900     bad_bits);
6901     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
6902     index 7e5469a80fe3..86b92d95ac10 100644
6903     --- a/drivers/target/iscsi/iscsi_target.c
6904     +++ b/drivers/target/iscsi/iscsi_target.c
6905     @@ -1564,7 +1564,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
6906     * Initiator is expecting a NopIN ping reply..
6907     */
6908     if (hdr->itt != RESERVED_ITT) {
6909     - BUG_ON(!cmd);
6910     + if (!cmd)
6911     + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
6912     + (unsigned char *)hdr);
6913    
6914     spin_lock_bh(&conn->cmd_lock);
6915     list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
6916     diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
6917     index 65001e133670..26416c15d65c 100644
6918     --- a/drivers/target/target_core_device.c
6919     +++ b/drivers/target/target_core_device.c
6920     @@ -798,10 +798,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
6921     pr_err("emulate_write_cache not supported for pSCSI\n");
6922     return -EINVAL;
6923     }
6924     - if (dev->transport->get_write_cache) {
6925     - pr_warn("emulate_write_cache cannot be changed when underlying"
6926     - " HW reports WriteCacheEnabled, ignoring request\n");
6927     - return 0;
6928     + if (flag &&
6929     + dev->transport->get_write_cache) {
6930     + pr_err("emulate_write_cache not supported for this device\n");
6931     + return -EINVAL;
6932     }
6933    
6934     dev->dev_attrib.emulate_write_cache = flag;
6935     @@ -936,6 +936,10 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
6936     return 0;
6937     }
6938     if (!dev->transport->init_prot || !dev->transport->free_prot) {
6939     + /* 0 is only allowed value for non-supporting backends */
6940     + if (flag == 0)
6941     + return 0;
6942     +
6943     pr_err("DIF protection not supported by backend: %s\n",
6944     dev->transport->name);
6945     return -ENOSYS;
6946     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
6947     index 2956250b7225..98b48d400a3a 100644
6948     --- a/drivers/target/target_core_transport.c
6949     +++ b/drivers/target/target_core_transport.c
6950     @@ -1102,6 +1102,7 @@ void transport_init_se_cmd(
6951     init_completion(&cmd->cmd_wait_comp);
6952     init_completion(&cmd->task_stop_comp);
6953     spin_lock_init(&cmd->t_state_lock);
6954     + kref_init(&cmd->cmd_kref);
6955     cmd->transport_state = CMD_T_DEV_ACTIVE;
6956    
6957     cmd->se_tfo = tfo;
6958     @@ -2292,7 +2293,6 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
6959     unsigned long flags;
6960     int ret = 0;
6961    
6962     - kref_init(&se_cmd->cmd_kref);
6963     /*
6964     * Add a second kref if the fabric caller is expecting to handle
6965     * fabric acknowledgement that requires two target_put_sess_cmd()
6966     diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
6967     index 8b2c1aaf81de..d22cdc77e9d4 100644
6968     --- a/drivers/target/tcm_fc/tfc_cmd.c
6969     +++ b/drivers/target/tcm_fc/tfc_cmd.c
6970     @@ -90,18 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)
6971     {
6972     struct fc_frame *fp;
6973     struct fc_lport *lport;
6974     - struct se_session *se_sess;
6975     + struct ft_sess *sess;
6976    
6977     if (!cmd)
6978     return;
6979     - se_sess = cmd->sess->se_sess;
6980     + sess = cmd->sess;
6981     fp = cmd->req_frame;
6982     lport = fr_dev(fp);
6983     if (fr_seq(fp))
6984     lport->tt.seq_release(fr_seq(fp));
6985     fc_frame_free(fp);
6986     - percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
6987     - ft_sess_put(cmd->sess); /* undo get from lookup at recv */
6988     + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
6989     + ft_sess_put(sess); /* undo get from lookup at recv */
6990     }
6991    
6992     void ft_release_cmd(struct se_cmd *se_cmd)
6993     diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
6994     index 2798a23a0834..29a7f632b354 100644
6995     --- a/drivers/tty/serial/8250/8250_core.c
6996     +++ b/drivers/tty/serial/8250/8250_core.c
6997     @@ -1520,7 +1520,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
6998     status = serial8250_rx_chars(up, status);
6999     }
7000     serial8250_modem_status(up);
7001     - if (status & UART_LSR_THRE)
7002     + if (!up->dma && (status & UART_LSR_THRE))
7003     serial8250_tx_chars(up);
7004    
7005     spin_unlock_irqrestore(&port->lock, flags);
7006     diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
7007     index 7046769608d4..ab9096dc3849 100644
7008     --- a/drivers/tty/serial/8250/8250_dma.c
7009     +++ b/drivers/tty/serial/8250/8250_dma.c
7010     @@ -20,12 +20,15 @@ static void __dma_tx_complete(void *param)
7011     struct uart_8250_port *p = param;
7012     struct uart_8250_dma *dma = p->dma;
7013     struct circ_buf *xmit = &p->port.state->xmit;
7014     -
7015     - dma->tx_running = 0;
7016     + unsigned long flags;
7017    
7018     dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
7019     UART_XMIT_SIZE, DMA_TO_DEVICE);
7020    
7021     + spin_lock_irqsave(&p->port.lock, flags);
7022     +
7023     + dma->tx_running = 0;
7024     +
7025     xmit->tail += dma->tx_size;
7026     xmit->tail &= UART_XMIT_SIZE - 1;
7027     p->port.icount.tx += dma->tx_size;
7028     @@ -35,6 +38,8 @@ static void __dma_tx_complete(void *param)
7029    
7030     if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port))
7031     serial8250_tx_dma(p);
7032     +
7033     + spin_unlock_irqrestore(&p->port.lock, flags);
7034     }
7035    
7036     static void __dma_rx_complete(void *param)
7037     diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
7038     index cea8c20a1425..1926925a52a9 100644
7039     --- a/drivers/usb/gadget/at91_udc.c
7040     +++ b/drivers/usb/gadget/at91_udc.c
7041     @@ -1709,16 +1709,6 @@ static int at91udc_probe(struct platform_device *pdev)
7042     return -ENODEV;
7043     }
7044    
7045     - if (pdev->num_resources != 2) {
7046     - DBG("invalid num_resources\n");
7047     - return -ENODEV;
7048     - }
7049     - if ((pdev->resource[0].flags != IORESOURCE_MEM)
7050     - || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
7051     - DBG("invalid resource type\n");
7052     - return -ENODEV;
7053     - }
7054     -
7055     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
7056     if (!res)
7057     return -ENXIO;
7058     diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
7059     index 6f2c8d3899d2..cf2734b532a7 100644
7060     --- a/drivers/usb/host/ehci-fsl.c
7061     +++ b/drivers/usb/host/ehci-fsl.c
7062     @@ -248,7 +248,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
7063     break;
7064     }
7065    
7066     - if (pdata->have_sysif_regs && pdata->controller_ver &&
7067     + if (pdata->have_sysif_regs &&
7068     + pdata->controller_ver > FSL_USB_VER_1_6 &&
7069     (phy_mode == FSL_USB2_PHY_ULPI)) {
7070     /* check PHY_CLK_VALID to get phy clk valid */
7071     if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
7072     diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
7073     index c81c8721cc5a..cd871b895013 100644
7074     --- a/drivers/usb/host/ohci-hub.c
7075     +++ b/drivers/usb/host/ohci-hub.c
7076     @@ -90,6 +90,24 @@ __acquires(ohci->lock)
7077     dl_done_list (ohci);
7078     finish_unlinks (ohci, ohci_frame_no(ohci));
7079    
7080     + /*
7081     + * Some controllers don't handle "global" suspend properly if
7082     + * there are unsuspended ports. For these controllers, put all
7083     + * the enabled ports into suspend before suspending the root hub.
7084     + */
7085     + if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) {
7086     + __hc32 __iomem *portstat = ohci->regs->roothub.portstatus;
7087     + int i;
7088     + unsigned temp;
7089     +
7090     + for (i = 0; i < ohci->num_ports; (++i, ++portstat)) {
7091     + temp = ohci_readl(ohci, portstat);
7092     + if ((temp & (RH_PS_PES | RH_PS_PSS)) ==
7093     + RH_PS_PES)
7094     + ohci_writel(ohci, RH_PS_PSS, portstat);
7095     + }
7096     + }
7097     +
7098     /* maybe resume can wake root hub */
7099     if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) {
7100     ohci->hc_control |= OHCI_CTRL_RWE;
7101     diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
7102     index 90879e9ccbec..bb1509675727 100644
7103     --- a/drivers/usb/host/ohci-pci.c
7104     +++ b/drivers/usb/host/ohci-pci.c
7105     @@ -160,6 +160,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
7106     ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
7107     }
7108    
7109     + ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND;
7110     return 0;
7111     }
7112    
7113     diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
7114     index 9250cada13f0..4550ce05af7f 100644
7115     --- a/drivers/usb/host/ohci.h
7116     +++ b/drivers/usb/host/ohci.h
7117     @@ -405,6 +405,8 @@ struct ohci_hcd {
7118     #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
7119     #define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
7120     #define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
7121     +#define OHCI_QUIRK_GLOBAL_SUSPEND 0x800 /* must suspend ports */
7122     +
7123     // there are also chip quirks/bugs in init logic
7124    
7125     struct work_struct nec_work; /* Worker for NEC quirk */
7126     diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
7127     index 7ed681a714a5..6c0a542e8ec1 100644
7128     --- a/drivers/usb/serial/qcserial.c
7129     +++ b/drivers/usb/serial/qcserial.c
7130     @@ -151,6 +151,21 @@ static const struct usb_device_id id_table[] = {
7131     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
7132     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
7133     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
7134     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
7135     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
7136     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
7137     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 0)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Device Management */
7138     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 2)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card NMEA */
7139     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Modem */
7140     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 0)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Device Management */
7141     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 2)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card NMEA */
7142     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 3)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Modem */
7143     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 0)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
7144     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 2)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
7145     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 3)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
7146     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 0)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
7147     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 2)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
7148     + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 3)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Modem */
7149    
7150     { } /* Terminating entry */
7151     };
7152     diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
7153     index 4ef2a80728f7..008d805c3d21 100644
7154     --- a/drivers/usb/storage/shuttle_usbat.c
7155     +++ b/drivers/usb/storage/shuttle_usbat.c
7156     @@ -1851,7 +1851,7 @@ static int usbat_probe(struct usb_interface *intf,
7157     us->transport_name = "Shuttle USBAT";
7158     us->transport = usbat_flash_transport;
7159     us->transport_reset = usb_stor_CB_reset;
7160     - us->max_lun = 1;
7161     + us->max_lun = 0;
7162    
7163     result = usb_stor_probe2(us);
7164     return result;
7165     diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
7166     index adbeb255616a..042c83b01046 100644
7167     --- a/drivers/usb/storage/unusual_devs.h
7168     +++ b/drivers/usb/storage/unusual_devs.h
7169     @@ -234,6 +234,20 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
7170     USB_SC_DEVICE, USB_PR_DEVICE, NULL,
7171     US_FL_MAX_SECTORS_64 ),
7172    
7173     +/* Reported by Daniele Forsi <dforsi@gmail.com> */
7174     +UNUSUAL_DEV( 0x0421, 0x04b9, 0x0350, 0x0350,
7175     + "Nokia",
7176     + "5300",
7177     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
7178     + US_FL_MAX_SECTORS_64 ),
7179     +
7180     +/* Patch submitted by Victor A. Santos <victoraur.santos@gmail.com> */
7181     +UNUSUAL_DEV( 0x0421, 0x05af, 0x0742, 0x0742,
7182     + "Nokia",
7183     + "305",
7184     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
7185     + US_FL_MAX_SECTORS_64),
7186     +
7187     /* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
7188     UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
7189     "Nokia",
7190     diff --git a/fs/affs/super.c b/fs/affs/super.c
7191     index d098731b82ff..9a5b19dc899d 100644
7192     --- a/fs/affs/super.c
7193     +++ b/fs/affs/super.c
7194     @@ -336,8 +336,6 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
7195     &blocksize,&sbi->s_prefix,
7196     sbi->s_volume, &mount_flags)) {
7197     printk(KERN_ERR "AFFS: Error parsing options\n");
7198     - kfree(sbi->s_prefix);
7199     - kfree(sbi);
7200     return -EINVAL;
7201     }
7202     /* N.B. after this point s_prefix must be released */
7203     diff --git a/fs/aio.c b/fs/aio.c
7204     index 12a3de0ee6da..04cd7686555d 100644
7205     --- a/fs/aio.c
7206     +++ b/fs/aio.c
7207     @@ -1299,10 +1299,8 @@ rw_common:
7208     &iovec, compat)
7209     : aio_setup_single_vector(req, rw, buf, &nr_segs,
7210     iovec);
7211     - if (ret)
7212     - return ret;
7213     -
7214     - ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
7215     + if (!ret)
7216     + ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
7217     if (ret < 0) {
7218     if (iovec != &inline_vec)
7219     kfree(iovec);
7220     diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
7221     index 2caf36ac3e93..cc87c1abac97 100644
7222     --- a/fs/autofs4/root.c
7223     +++ b/fs/autofs4/root.c
7224     @@ -179,7 +179,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
7225     spin_lock(&active->d_lock);
7226    
7227     /* Already gone? */
7228     - if (!d_count(active))
7229     + if ((int) d_count(active) <= 0)
7230     goto next;
7231    
7232     qstr = &active->d_name;
7233     @@ -230,7 +230,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
7234    
7235     spin_lock(&expiring->d_lock);
7236    
7237     - /* Bad luck, we've already been dentry_iput */
7238     + /* We've already been dentry_iput or unlinked */
7239     if (!expiring->d_inode)
7240     goto next;
7241    
7242     diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
7243     index 45eda6d7a40c..5e0982aa7000 100644
7244     --- a/fs/ceph/dir.c
7245     +++ b/fs/ceph/dir.c
7246     @@ -446,7 +446,6 @@ more:
7247     if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
7248     dout(" marking %p complete\n", inode);
7249     __ceph_dir_set_complete(ci, fi->dir_release_count);
7250     - ci->i_max_offset = ctx->pos;
7251     }
7252     spin_unlock(&ci->i_ceph_lock);
7253    
7254     @@ -932,14 +931,16 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
7255     * to do it here.
7256     */
7257    
7258     - /* d_move screws up d_subdirs order */
7259     - ceph_dir_clear_complete(new_dir);
7260     -
7261     d_move(old_dentry, new_dentry);
7262    
7263     /* ensure target dentry is invalidated, despite
7264     rehashing bug in vfs_rename_dir */
7265     ceph_invalidate_dentry_lease(new_dentry);
7266     +
7267     + /* d_move screws up sibling dentries' offsets */
7268     + ceph_dir_clear_complete(old_dir);
7269     + ceph_dir_clear_complete(new_dir);
7270     +
7271     }
7272     ceph_mdsc_put_request(req);
7273     return err;
7274     diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
7275     index 32d519d8a2e2..6471f9c83428 100644
7276     --- a/fs/ceph/inode.c
7277     +++ b/fs/ceph/inode.c
7278     @@ -752,7 +752,6 @@ static int fill_inode(struct inode *inode,
7279     !__ceph_dir_is_complete(ci)) {
7280     dout(" marking %p complete (empty)\n", inode);
7281     __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
7282     - ci->i_max_offset = 2;
7283     }
7284     no_change:
7285     spin_unlock(&ci->i_ceph_lock);
7286     @@ -890,41 +889,6 @@ out_unlock:
7287     }
7288    
7289     /*
7290     - * Set dentry's directory position based on the current dir's max, and
7291     - * order it in d_subdirs, so that dcache_readdir behaves.
7292     - *
7293     - * Always called under directory's i_mutex.
7294     - */
7295     -static void ceph_set_dentry_offset(struct dentry *dn)
7296     -{
7297     - struct dentry *dir = dn->d_parent;
7298     - struct inode *inode = dir->d_inode;
7299     - struct ceph_inode_info *ci;
7300     - struct ceph_dentry_info *di;
7301     -
7302     - BUG_ON(!inode);
7303     -
7304     - ci = ceph_inode(inode);
7305     - di = ceph_dentry(dn);
7306     -
7307     - spin_lock(&ci->i_ceph_lock);
7308     - if (!__ceph_dir_is_complete(ci)) {
7309     - spin_unlock(&ci->i_ceph_lock);
7310     - return;
7311     - }
7312     - di->offset = ceph_inode(inode)->i_max_offset++;
7313     - spin_unlock(&ci->i_ceph_lock);
7314     -
7315     - spin_lock(&dir->d_lock);
7316     - spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
7317     - list_move(&dn->d_u.d_child, &dir->d_subdirs);
7318     - dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
7319     - dn->d_u.d_child.prev, dn->d_u.d_child.next);
7320     - spin_unlock(&dn->d_lock);
7321     - spin_unlock(&dir->d_lock);
7322     -}
7323     -
7324     -/*
7325     * splice a dentry to an inode.
7326     * caller must hold directory i_mutex for this to be safe.
7327     *
7328     @@ -933,7 +897,7 @@ static void ceph_set_dentry_offset(struct dentry *dn)
7329     * the caller) if we fail.
7330     */
7331     static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
7332     - bool *prehash, bool set_offset)
7333     + bool *prehash)
7334     {
7335     struct dentry *realdn;
7336    
7337     @@ -965,8 +929,6 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
7338     }
7339     if ((!prehash || *prehash) && d_unhashed(dn))
7340     d_rehash(dn);
7341     - if (set_offset)
7342     - ceph_set_dentry_offset(dn);
7343     out:
7344     return dn;
7345     }
7346     @@ -987,7 +949,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
7347     {
7348     struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
7349     struct inode *in = NULL;
7350     - struct ceph_mds_reply_inode *ininfo;
7351     struct ceph_vino vino;
7352     struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
7353     int err = 0;
7354     @@ -1112,6 +1073,9 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
7355    
7356     /* rename? */
7357     if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
7358     + struct inode *olddir = req->r_old_dentry_dir;
7359     + BUG_ON(!olddir);
7360     +
7361     dout(" src %p '%.*s' dst %p '%.*s'\n",
7362     req->r_old_dentry,
7363     req->r_old_dentry->d_name.len,
7364     @@ -1131,13 +1095,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
7365     rehashing bug in vfs_rename_dir */
7366     ceph_invalidate_dentry_lease(dn);
7367    
7368     - /*
7369     - * d_move() puts the renamed dentry at the end of
7370     - * d_subdirs. We need to assign it an appropriate
7371     - * directory offset so we can behave when dir is
7372     - * complete.
7373     - */
7374     - ceph_set_dentry_offset(req->r_old_dentry);
7375     + /* d_move screws up sibling dentries' offsets */
7376     + ceph_dir_clear_complete(dir);
7377     + ceph_dir_clear_complete(olddir);
7378     +
7379     dout("dn %p gets new offset %lld\n", req->r_old_dentry,
7380     ceph_dentry(req->r_old_dentry)->offset);
7381    
7382     @@ -1164,8 +1125,9 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
7383    
7384     /* attach proper inode */
7385     if (!dn->d_inode) {
7386     + ceph_dir_clear_complete(dir);
7387     ihold(in);
7388     - dn = splice_dentry(dn, in, &have_lease, true);
7389     + dn = splice_dentry(dn, in, &have_lease);
7390     if (IS_ERR(dn)) {
7391     err = PTR_ERR(dn);
7392     goto done;
7393     @@ -1186,17 +1148,16 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
7394     (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
7395     req->r_op == CEPH_MDS_OP_MKSNAP)) {
7396     struct dentry *dn = req->r_dentry;
7397     + struct inode *dir = req->r_locked_dir;
7398    
7399     /* fill out a snapdir LOOKUPSNAP dentry */
7400     BUG_ON(!dn);
7401     - BUG_ON(!req->r_locked_dir);
7402     - BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
7403     - ininfo = rinfo->targeti.in;
7404     - vino.ino = le64_to_cpu(ininfo->ino);
7405     - vino.snap = le64_to_cpu(ininfo->snapid);
7406     + BUG_ON(!dir);
7407     + BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
7408     dout(" linking snapped dir %p to dn %p\n", in, dn);
7409     + ceph_dir_clear_complete(dir);
7410     ihold(in);
7411     - dn = splice_dentry(dn, in, NULL, true);
7412     + dn = splice_dentry(dn, in, NULL);
7413     if (IS_ERR(dn)) {
7414     err = PTR_ERR(dn);
7415     goto done;
7416     @@ -1358,7 +1319,7 @@ retry_lookup:
7417     }
7418    
7419     if (!dn->d_inode) {
7420     - dn = splice_dentry(dn, in, NULL, false);
7421     + dn = splice_dentry(dn, in, NULL);
7422     if (IS_ERR(dn)) {
7423     err = PTR_ERR(dn);
7424     dn = NULL;
7425     diff --git a/fs/ceph/super.h b/fs/ceph/super.h
7426     index d8801a95b685..df2caa84b94f 100644
7427     --- a/fs/ceph/super.h
7428     +++ b/fs/ceph/super.h
7429     @@ -266,7 +266,6 @@ struct ceph_inode_info {
7430     struct timespec i_rctime;
7431     u64 i_rbytes, i_rfiles, i_rsubdirs;
7432     u64 i_files, i_subdirs;
7433     - u64 i_max_offset; /* largest readdir offset, set with complete dir */
7434    
7435     struct rb_root i_fragtree;
7436     struct mutex i_fragtree_mutex;
7437     diff --git a/fs/coredump.c b/fs/coredump.c
7438     index e3ad709a4232..0b2528fb640e 100644
7439     --- a/fs/coredump.c
7440     +++ b/fs/coredump.c
7441     @@ -73,10 +73,15 @@ static int expand_corename(struct core_name *cn, int size)
7442     static int cn_vprintf(struct core_name *cn, const char *fmt, va_list arg)
7443     {
7444     int free, need;
7445     + va_list arg_copy;
7446    
7447     again:
7448     free = cn->size - cn->used;
7449     - need = vsnprintf(cn->corename + cn->used, free, fmt, arg);
7450     +
7451     + va_copy(arg_copy, arg);
7452     + need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
7453     + va_end(arg_copy);
7454     +
7455     if (need < free) {
7456     cn->used += need;
7457     return 0;
7458     diff --git a/fs/dcache.c b/fs/dcache.c
7459     index ca02c13a84aa..7f3b4004c6c3 100644
7460     --- a/fs/dcache.c
7461     +++ b/fs/dcache.c
7462     @@ -1647,8 +1647,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
7463     unsigned add_flags = d_flags_for_inode(inode);
7464    
7465     spin_lock(&dentry->d_lock);
7466     - dentry->d_flags &= ~DCACHE_ENTRY_TYPE;
7467     - dentry->d_flags |= add_flags;
7468     + __d_set_type(dentry, add_flags);
7469     if (inode)
7470     hlist_add_head(&dentry->d_alias, &inode->i_dentry);
7471     dentry->d_inode = inode;
7472     diff --git a/fs/exec.c b/fs/exec.c
7473     index 3d78fccdd723..31e46b1b358b 100644
7474     --- a/fs/exec.c
7475     +++ b/fs/exec.c
7476     @@ -654,10 +654,10 @@ int setup_arg_pages(struct linux_binprm *bprm,
7477     unsigned long rlim_stack;
7478    
7479     #ifdef CONFIG_STACK_GROWSUP
7480     - /* Limit stack size to 1GB */
7481     + /* Limit stack size */
7482     stack_base = rlimit_max(RLIMIT_STACK);
7483     - if (stack_base > (1 << 30))
7484     - stack_base = 1 << 30;
7485     + if (stack_base > STACK_SIZE_MAX)
7486     + stack_base = STACK_SIZE_MAX;
7487    
7488     /* Make sure we didn't let the argument array grow too large. */
7489     if (vma->vm_end - vma->vm_start > stack_base)
7490     diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
7491     index dbf397bfdff2..d29640b49be3 100644
7492     --- a/fs/kernfs/file.c
7493     +++ b/fs/kernfs/file.c
7494     @@ -476,6 +476,8 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
7495    
7496     ops = kernfs_ops(of->kn);
7497     rc = ops->mmap(of, vma);
7498     + if (rc)
7499     + goto out_put;
7500    
7501     /*
7502     * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
7503     diff --git a/fs/namei.c b/fs/namei.c
7504     index 4b491b431990..4a3c105cf703 100644
7505     --- a/fs/namei.c
7506     +++ b/fs/namei.c
7507     @@ -1537,7 +1537,7 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
7508     inode = path->dentry->d_inode;
7509     }
7510     err = -ENOENT;
7511     - if (!inode)
7512     + if (!inode || d_is_negative(path->dentry))
7513     goto out_path_put;
7514    
7515     if (should_follow_link(path->dentry, follow)) {
7516     @@ -2240,7 +2240,7 @@ mountpoint_last(struct nameidata *nd, struct path *path)
7517     mutex_unlock(&dir->d_inode->i_mutex);
7518    
7519     done:
7520     - if (!dentry->d_inode) {
7521     + if (!dentry->d_inode || d_is_negative(dentry)) {
7522     error = -ENOENT;
7523     dput(dentry);
7524     goto out;
7525     @@ -2982,7 +2982,7 @@ retry_lookup:
7526     finish_lookup:
7527     /* we _can_ be in RCU mode here */
7528     error = -ENOENT;
7529     - if (d_is_negative(path->dentry)) {
7530     + if (!inode || d_is_negative(path->dentry)) {
7531     path_to_nameidata(path, nd);
7532     goto out;
7533     }
7534     diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
7535     index d190e33d0ec2..dea8c60954ba 100644
7536     --- a/fs/nfsd/nfs4acl.c
7537     +++ b/fs/nfsd/nfs4acl.c
7538     @@ -402,8 +402,10 @@ sort_pacl(struct posix_acl *pacl)
7539     * by uid/gid. */
7540     int i, j;
7541    
7542     - if (pacl->a_count <= 4)
7543     - return; /* no users or groups */
7544     + /* no users or groups */
7545     + if (!pacl || pacl->a_count <= 4)
7546     + return;
7547     +
7548     i = 1;
7549     while (pacl->a_entries[i].e_tag == ACL_USER)
7550     i++;
7551     @@ -530,13 +532,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
7552    
7553     /*
7554     * ACLs with no ACEs are treated differently in the inheritable
7555     - * and effective cases: when there are no inheritable ACEs, we
7556     - * set a zero-length default posix acl:
7557     + * and effective cases: when there are no inheritable ACEs,
7558     + * calls ->set_acl with a NULL ACL structure.
7559     */
7560     - if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) {
7561     - pacl = posix_acl_alloc(0, GFP_KERNEL);
7562     - return pacl ? pacl : ERR_PTR(-ENOMEM);
7563     - }
7564     + if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
7565     + return NULL;
7566     +
7567     /*
7568     * When there are no effective ACEs, the following will end
7569     * up setting a 3-element effective posix ACL with all
7570     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
7571     index 7415eac36501..1693fd93fa58 100644
7572     --- a/fs/nfsd/nfs4state.c
7573     +++ b/fs/nfsd/nfs4state.c
7574     @@ -1078,6 +1078,18 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
7575     return NULL;
7576     }
7577     clp->cl_name.len = name.len;
7578     + INIT_LIST_HEAD(&clp->cl_sessions);
7579     + idr_init(&clp->cl_stateids);
7580     + atomic_set(&clp->cl_refcount, 0);
7581     + clp->cl_cb_state = NFSD4_CB_UNKNOWN;
7582     + INIT_LIST_HEAD(&clp->cl_idhash);
7583     + INIT_LIST_HEAD(&clp->cl_openowners);
7584     + INIT_LIST_HEAD(&clp->cl_delegations);
7585     + INIT_LIST_HEAD(&clp->cl_lru);
7586     + INIT_LIST_HEAD(&clp->cl_callbacks);
7587     + INIT_LIST_HEAD(&clp->cl_revoked);
7588     + spin_lock_init(&clp->cl_lock);
7589     + rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
7590     return clp;
7591     }
7592    
7593     @@ -1095,6 +1107,7 @@ free_client(struct nfs4_client *clp)
7594     WARN_ON_ONCE(atomic_read(&ses->se_ref));
7595     free_session(ses);
7596     }
7597     + rpc_destroy_wait_queue(&clp->cl_cb_waitq);
7598     free_svc_cred(&clp->cl_cred);
7599     kfree(clp->cl_name.data);
7600     idr_destroy(&clp->cl_stateids);
7601     @@ -1347,7 +1360,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
7602     if (clp == NULL)
7603     return NULL;
7604    
7605     - INIT_LIST_HEAD(&clp->cl_sessions);
7606     ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
7607     if (ret) {
7608     spin_lock(&nn->client_lock);
7609     @@ -1355,20 +1367,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
7610     spin_unlock(&nn->client_lock);
7611     return NULL;
7612     }
7613     - idr_init(&clp->cl_stateids);
7614     - atomic_set(&clp->cl_refcount, 0);
7615     - clp->cl_cb_state = NFSD4_CB_UNKNOWN;
7616     - INIT_LIST_HEAD(&clp->cl_idhash);
7617     - INIT_LIST_HEAD(&clp->cl_openowners);
7618     - INIT_LIST_HEAD(&clp->cl_delegations);
7619     - INIT_LIST_HEAD(&clp->cl_lru);
7620     - INIT_LIST_HEAD(&clp->cl_callbacks);
7621     - INIT_LIST_HEAD(&clp->cl_revoked);
7622     - spin_lock_init(&clp->cl_lock);
7623     nfsd4_init_callback(&clp->cl_cb_null);
7624     clp->cl_time = get_seconds();
7625     clear_bit(0, &clp->cl_cb_slot_busy);
7626     - rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
7627     copy_verf(clp, verf);
7628     rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
7629     gen_confirm(clp);
7630     @@ -3713,9 +3714,16 @@ out:
7631     static __be32
7632     nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
7633     {
7634     - if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
7635     + struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
7636     +
7637     + if (check_for_locks(stp->st_file, lo))
7638     return nfserr_locks_held;
7639     - release_lock_stateid(stp);
7640     + /*
7641     + * Currently there's a 1-1 lock stateid<->lockowner
7642     + * correspondance, and we have to delete the lockowner when we
7643     + * delete the lock stateid:
7644     + */
7645     + unhash_lockowner(lo);
7646     return nfs_ok;
7647     }
7648    
7649     @@ -4155,6 +4163,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
7650    
7651     if (!same_owner_str(&lo->lo_owner, owner, clid))
7652     return false;
7653     + if (list_empty(&lo->lo_owner.so_stateids)) {
7654     + WARN_ON_ONCE(1);
7655     + return false;
7656     + }
7657     lst = list_first_entry(&lo->lo_owner.so_stateids,
7658     struct nfs4_ol_stateid, st_perstateowner);
7659     return lst->st_file->fi_inode == inode;
7660     diff --git a/fs/posix_acl.c b/fs/posix_acl.c
7661     index 9e363e41dacc..0855f772cd41 100644
7662     --- a/fs/posix_acl.c
7663     +++ b/fs/posix_acl.c
7664     @@ -246,6 +246,12 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
7665     umode_t mode = 0;
7666     int not_equiv = 0;
7667    
7668     + /*
7669     + * A null ACL can always be presented as mode bits.
7670     + */
7671     + if (!acl)
7672     + return 0;
7673     +
7674     FOREACH_ACL_ENTRY(pa, acl, pe) {
7675     switch (pa->e_tag) {
7676     case ACL_USER_OBJ:
7677     diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
7678     index 810cf6e613e5..5fd2bf18e27d 100644
7679     --- a/fs/sysfs/file.c
7680     +++ b/fs/sysfs/file.c
7681     @@ -47,12 +47,13 @@ static int sysfs_kf_seq_show(struct seq_file *sf, void *v)
7682     ssize_t count;
7683     char *buf;
7684    
7685     - /* acquire buffer and ensure that it's >= PAGE_SIZE */
7686     + /* acquire buffer and ensure that it's >= PAGE_SIZE and clear */
7687     count = seq_get_buf(sf, &buf);
7688     if (count < PAGE_SIZE) {
7689     seq_commit(sf, -1);
7690     return 0;
7691     }
7692     + memset(buf, 0, PAGE_SIZE);
7693    
7694     /*
7695     * Invoke show(). Control may reach here via seq file lseek even
7696     diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
7697     index c5c92d59e531..0a5f55272672 100644
7698     --- a/include/linux/dmaengine.h
7699     +++ b/include/linux/dmaengine.h
7700     @@ -433,6 +433,7 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
7701     typedef void (*dma_async_tx_callback)(void *dma_async_param);
7702    
7703     struct dmaengine_unmap_data {
7704     + u8 map_cnt;
7705     u8 to_cnt;
7706     u8 from_cnt;
7707     u8 bidi_cnt;
7708     diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
7709     index f4233b195dab..2068dff8a2cc 100644
7710     --- a/include/linux/ftrace.h
7711     +++ b/include/linux/ftrace.h
7712     @@ -524,6 +524,7 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a
7713     extern int ftrace_arch_read_dyn_info(char *buf, int size);
7714    
7715     extern int skip_trace(unsigned long ip);
7716     +extern void ftrace_module_init(struct module *mod);
7717    
7718     extern void ftrace_disable_daemon(void);
7719     extern void ftrace_enable_daemon(void);
7720     @@ -533,6 +534,7 @@ static inline int ftrace_force_update(void) { return 0; }
7721     static inline void ftrace_disable_daemon(void) { }
7722     static inline void ftrace_enable_daemon(void) { }
7723     static inline void ftrace_release_mod(struct module *mod) {}
7724     +static inline void ftrace_module_init(struct module *mod) {}
7725     static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
7726     {
7727     return -EINVAL;
7728     diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
7729     index a2678d35b5a2..203c43d3e1b8 100644
7730     --- a/include/linux/interrupt.h
7731     +++ b/include/linux/interrupt.h
7732     @@ -202,7 +202,40 @@ static inline int check_wakeup_irqs(void) { return 0; }
7733    
7734     extern cpumask_var_t irq_default_affinity;
7735    
7736     -extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
7737     +/* Internal implementation. Use the helpers below */
7738     +extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
7739     + bool force);
7740     +
7741     +/**
7742     + * irq_set_affinity - Set the irq affinity of a given irq
7743     + * @irq: Interrupt to set affinity
7744     + * @mask: cpumask
7745     + *
7746     + * Fails if cpumask does not contain an online CPU
7747     + */
7748     +static inline int
7749     +irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
7750     +{
7751     + return __irq_set_affinity(irq, cpumask, false);
7752     +}
7753     +
7754     +/**
7755     + * irq_force_affinity - Force the irq affinity of a given irq
7756     + * @irq: Interrupt to set affinity
7757     + * @mask: cpumask
7758     + *
7759     + * Same as irq_set_affinity, but without checking the mask against
7760     + * online cpus.
7761     + *
7762     + * Solely for low level cpu hotplug code, where we need to make per
7763     + * cpu interrupts affine before the cpu becomes online.
7764     + */
7765     +static inline int
7766     +irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
7767     +{
7768     + return __irq_set_affinity(irq, cpumask, true);
7769     +}
7770     +
7771     extern int irq_can_set_affinity(unsigned int irq);
7772     extern int irq_select_affinity(unsigned int irq);
7773    
7774     @@ -238,6 +271,11 @@ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
7775     return -EINVAL;
7776     }
7777    
7778     +static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
7779     +{
7780     + return 0;
7781     +}
7782     +
7783     static inline int irq_can_set_affinity(unsigned int irq)
7784     {
7785     return 0;
7786     diff --git a/include/linux/irq.h b/include/linux/irq.h
7787     index 7dc10036eff5..ef1ac9feff56 100644
7788     --- a/include/linux/irq.h
7789     +++ b/include/linux/irq.h
7790     @@ -385,7 +385,8 @@ extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
7791    
7792     extern void irq_cpu_online(void);
7793     extern void irq_cpu_offline(void);
7794     -extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask);
7795     +extern int irq_set_affinity_locked(struct irq_data *data,
7796     + const struct cpumask *cpumask, bool force);
7797    
7798     #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
7799     void irq_move_irq(struct irq_data *data);
7800     diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
7801     index 3f23b4472c31..6404253d810d 100644
7802     --- a/include/linux/of_irq.h
7803     +++ b/include/linux/of_irq.h
7804     @@ -44,11 +44,16 @@ extern void of_irq_init(const struct of_device_id *matches);
7805    
7806     #ifdef CONFIG_OF_IRQ
7807     extern int of_irq_count(struct device_node *dev);
7808     +extern int of_irq_get(struct device_node *dev, int index);
7809     #else
7810     static inline int of_irq_count(struct device_node *dev)
7811     {
7812     return 0;
7813     }
7814     +static inline int of_irq_get(struct device_node *dev, int index)
7815     +{
7816     + return 0;
7817     +}
7818     #endif
7819    
7820     #if defined(CONFIG_OF)
7821     diff --git a/include/linux/serio.h b/include/linux/serio.h
7822     index 36aac733840a..9f779c7a2da4 100644
7823     --- a/include/linux/serio.h
7824     +++ b/include/linux/serio.h
7825     @@ -23,6 +23,7 @@ struct serio {
7826    
7827     char name[32];
7828     char phys[32];
7829     + char firmware_id[128];
7830    
7831     bool manual_bind;
7832    
7833     diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
7834     index b1f84b05c67e..37123eb1f093 100644
7835     --- a/include/net/cfg80211.h
7836     +++ b/include/net/cfg80211.h
7837     @@ -3637,6 +3637,18 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy);
7838     void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
7839    
7840     /**
7841     + * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped
7842     + *
7843     + * @wiphy: the wiphy on which the scheduled scan stopped
7844     + *
7845     + * The driver can call this function to inform cfg80211 that the
7846     + * scheduled scan had to be stopped, for whatever reason. The driver
7847     + * is then called back via the sched_scan_stop operation when done.
7848     + * This function should be called with rtnl locked.
7849     + */
7850     +void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
7851     +
7852     +/**
7853     * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
7854     *
7855     * @wiphy: the wiphy reporting the BSS
7856     diff --git a/include/trace/events/module.h b/include/trace/events/module.h
7857     index 161932737416..ca298c7157ae 100644
7858     --- a/include/trace/events/module.h
7859     +++ b/include/trace/events/module.h
7860     @@ -78,7 +78,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
7861    
7862     TP_fast_assign(
7863     __entry->ip = ip;
7864     - __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
7865     + __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);
7866     __assign_str(name, mod->name);
7867     ),
7868    
7869     diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
7870     index 5e1ab552cbed..c28de1afd684 100644
7871     --- a/include/uapi/drm/tegra_drm.h
7872     +++ b/include/uapi/drm/tegra_drm.h
7873     @@ -114,7 +114,6 @@ struct drm_tegra_submit {
7874     __u32 num_waitchks;
7875     __u32 waitchk_mask;
7876     __u32 timeout;
7877     - __u32 pad;
7878     __u64 syncpts;
7879     __u64 cmdbufs;
7880     __u64 relocs;
7881     diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
7882     index bd24470d24a2..f4849525519c 100644
7883     --- a/include/uapi/linux/input.h
7884     +++ b/include/uapi/linux/input.h
7885     @@ -164,6 +164,7 @@ struct input_keymap_entry {
7886     #define INPUT_PROP_DIRECT 0x01 /* direct input devices */
7887     #define INPUT_PROP_BUTTONPAD 0x02 /* has button(s) under pad */
7888     #define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */
7889     +#define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */
7890    
7891     #define INPUT_PROP_MAX 0x1f
7892     #define INPUT_PROP_CNT (INPUT_PROP_MAX + 1)
7893     diff --git a/kernel/futex.c b/kernel/futex.c
7894     index 6801b3751a95..e3087afb7429 100644
7895     --- a/kernel/futex.c
7896     +++ b/kernel/futex.c
7897     @@ -729,6 +729,55 @@ void exit_pi_state_list(struct task_struct *curr)
7898     raw_spin_unlock_irq(&curr->pi_lock);
7899     }
7900    
7901     +/*
7902     + * We need to check the following states:
7903     + *
7904     + * Waiter | pi_state | pi->owner | uTID | uODIED | ?
7905     + *
7906     + * [1] NULL | --- | --- | 0 | 0/1 | Valid
7907     + * [2] NULL | --- | --- | >0 | 0/1 | Valid
7908     + *
7909     + * [3] Found | NULL | -- | Any | 0/1 | Invalid
7910     + *
7911     + * [4] Found | Found | NULL | 0 | 1 | Valid
7912     + * [5] Found | Found | NULL | >0 | 1 | Invalid
7913     + *
7914     + * [6] Found | Found | task | 0 | 1 | Valid
7915     + *
7916     + * [7] Found | Found | NULL | Any | 0 | Invalid
7917     + *
7918     + * [8] Found | Found | task | ==taskTID | 0/1 | Valid
7919     + * [9] Found | Found | task | 0 | 0 | Invalid
7920     + * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
7921     + *
7922     + * [1] Indicates that the kernel can acquire the futex atomically. We
7923     + * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
7924     + *
7925     + * [2] Valid, if TID does not belong to a kernel thread. If no matching
7926     + * thread is found then it indicates that the owner TID has died.
7927     + *
7928     + * [3] Invalid. The waiter is queued on a non PI futex
7929     + *
7930     + * [4] Valid state after exit_robust_list(), which sets the user space
7931     + * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
7932     + *
7933     + * [5] The user space value got manipulated between exit_robust_list()
7934     + * and exit_pi_state_list()
7935     + *
7936     + * [6] Valid state after exit_pi_state_list() which sets the new owner in
7937     + * the pi_state but cannot access the user space value.
7938     + *
7939     + * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
7940     + *
7941     + * [8] Owner and user space value match
7942     + *
7943     + * [9] There is no transient state which sets the user space TID to 0
7944     + * except exit_robust_list(), but this is indicated by the
7945     + * FUTEX_OWNER_DIED bit. See [4]
7946     + *
7947     + * [10] There is no transient state which leaves owner and user space
7948     + * TID out of sync.
7949     + */
7950     static int
7951     lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
7952     union futex_key *key, struct futex_pi_state **ps)
7953     @@ -741,12 +790,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
7954     plist_for_each_entry_safe(this, next, &hb->chain, list) {
7955     if (match_futex(&this->key, key)) {
7956     /*
7957     - * Another waiter already exists - bump up
7958     - * the refcount and return its pi_state:
7959     + * Sanity check the waiter before increasing
7960     + * the refcount and attaching to it.
7961     */
7962     pi_state = this->pi_state;
7963     /*
7964     - * Userspace might have messed up non-PI and PI futexes
7965     + * Userspace might have messed up non-PI and
7966     + * PI futexes [3]
7967     */
7968     if (unlikely(!pi_state))
7969     return -EINVAL;
7970     @@ -754,34 +804,70 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
7971     WARN_ON(!atomic_read(&pi_state->refcount));
7972    
7973     /*
7974     - * When pi_state->owner is NULL then the owner died
7975     - * and another waiter is on the fly. pi_state->owner
7976     - * is fixed up by the task which acquires
7977     - * pi_state->rt_mutex.
7978     - *
7979     - * We do not check for pid == 0 which can happen when
7980     - * the owner died and robust_list_exit() cleared the
7981     - * TID.
7982     + * Handle the owner died case:
7983     */
7984     - if (pid && pi_state->owner) {
7985     + if (uval & FUTEX_OWNER_DIED) {
7986     + /*
7987     + * exit_pi_state_list sets owner to NULL and
7988     + * wakes the topmost waiter. The task which
7989     + * acquires the pi_state->rt_mutex will fixup
7990     + * owner.
7991     + */
7992     + if (!pi_state->owner) {
7993     + /*
7994     + * No pi state owner, but the user
7995     + * space TID is not 0. Inconsistent
7996     + * state. [5]
7997     + */
7998     + if (pid)
7999     + return -EINVAL;
8000     + /*
8001     + * Take a ref on the state and
8002     + * return. [4]
8003     + */
8004     + goto out_state;
8005     + }
8006     +
8007     /*
8008     - * Bail out if user space manipulated the
8009     - * futex value.
8010     + * If TID is 0, then either the dying owner
8011     + * has not yet executed exit_pi_state_list()
8012     + * or some waiter acquired the rtmutex in the
8013     + * pi state, but did not yet fixup the TID in
8014     + * user space.
8015     + *
8016     + * Take a ref on the state and return. [6]
8017     */
8018     - if (pid != task_pid_vnr(pi_state->owner))
8019     + if (!pid)
8020     + goto out_state;
8021     + } else {
8022     + /*
8023     + * If the owner died bit is not set,
8024     + * then the pi_state must have an
8025     + * owner. [7]
8026     + */
8027     + if (!pi_state->owner)
8028     return -EINVAL;
8029     }
8030    
8031     + /*
8032     + * Bail out if user space manipulated the
8033     + * futex value. If pi state exists then the
8034     + * owner TID must be the same as the user
8035     + * space TID. [9/10]
8036     + */
8037     + if (pid != task_pid_vnr(pi_state->owner))
8038     + return -EINVAL;
8039     +
8040     + out_state:
8041     atomic_inc(&pi_state->refcount);
8042     *ps = pi_state;
8043     -
8044     return 0;
8045     }
8046     }
8047    
8048     /*
8049     * We are the first waiter - try to look up the real owner and attach
8050     - * the new pi_state to it, but bail out when TID = 0
8051     + * the new pi_state to it, but bail out when TID = 0 [1]
8052     */
8053     if (!pid)
8054     return -ESRCH;
8055     @@ -789,6 +875,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
8056     if (!p)
8057     return -ESRCH;
8058    
8059     + if (!p->mm) {
8060     + put_task_struct(p);
8061     + return -EPERM;
8062     + }
8063     +
8064     /*
8065     * We need to look at the task state flags to figure out,
8066     * whether the task is exiting. To protect against the do_exit
8067     @@ -809,6 +900,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
8068     return ret;
8069     }
8070    
8071     + /*
8072     + * No existing pi state. First waiter. [2]
8073     + */
8074     pi_state = alloc_pi_state();
8075    
8076     /*
8077     @@ -880,10 +974,18 @@ retry:
8078     return -EDEADLK;
8079    
8080     /*
8081     - * Surprise - we got the lock. Just return to userspace:
8082     + * Surprise - we got the lock, but we do not trust user space at all.
8083     */
8084     - if (unlikely(!curval))
8085     - return 1;
8086     + if (unlikely(!curval)) {
8087     + /*
8088     + * We verify whether there is kernel state for this
8089     + * futex. If not, we can safely assume, that the 0 ->
8090     + * TID transition is correct. If state exists, we do
8091     + * not bother to fixup the user space state as it was
8092     + * corrupted already.
8093     + */
8094     + return futex_top_waiter(hb, key) ? -EINVAL : 1;
8095     + }
8096    
8097     uval = curval;
8098    
8099     @@ -1014,6 +1116,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
8100     struct task_struct *new_owner;
8101     struct futex_pi_state *pi_state = this->pi_state;
8102     u32 uninitialized_var(curval), newval;
8103     + int ret = 0;
8104    
8105     if (!pi_state)
8106     return -EINVAL;
8107     @@ -1037,23 +1140,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
8108     new_owner = this->task;
8109    
8110     /*
8111     - * We pass it to the next owner. (The WAITERS bit is always
8112     - * kept enabled while there is PI state around. We must also
8113     - * preserve the owner died bit.)
8114     + * We pass it to the next owner. The WAITERS bit is always
8115     + * kept enabled while there is PI state around. We cleanup the
8116     + * owner died bit, because we are the owner.
8117     */
8118     - if (!(uval & FUTEX_OWNER_DIED)) {
8119     - int ret = 0;
8120     + newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
8121    
8122     - newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
8123     -
8124     - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
8125     - ret = -EFAULT;
8126     - else if (curval != uval)
8127     - ret = -EINVAL;
8128     - if (ret) {
8129     - raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
8130     - return ret;
8131     - }
8132     + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
8133     + ret = -EFAULT;
8134     + else if (curval != uval)
8135     + ret = -EINVAL;
8136     + if (ret) {
8137     + raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
8138     + return ret;
8139     }
8140    
8141     raw_spin_lock_irq(&pi_state->owner->pi_lock);
8142     @@ -1333,7 +1432,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
8143     *
8144     * Return:
8145     * 0 - failed to acquire the lock atomically;
8146     - * 1 - acquired the lock;
8147     + * >0 - acquired the lock, return value is vpid of the top_waiter
8148     * <0 - error
8149     */
8150     static int futex_proxy_trylock_atomic(u32 __user *pifutex,
8151     @@ -1344,7 +1443,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
8152     {
8153     struct futex_q *top_waiter = NULL;
8154     u32 curval;
8155     - int ret;
8156     + int ret, vpid;
8157    
8158     if (get_futex_value_locked(&curval, pifutex))
8159     return -EFAULT;
8160     @@ -1372,11 +1471,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
8161     * the contended case or if set_waiters is 1. The pi_state is returned
8162     * in ps in contended cases.
8163     */
8164     + vpid = task_pid_vnr(top_waiter->task);
8165     ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
8166     set_waiters);
8167     - if (ret == 1)
8168     + if (ret == 1) {
8169     requeue_pi_wake_futex(top_waiter, key2, hb2);
8170     -
8171     + return vpid;
8172     + }
8173     return ret;
8174     }
8175    
8176     @@ -1407,10 +1508,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
8177     struct futex_pi_state *pi_state = NULL;
8178     struct futex_hash_bucket *hb1, *hb2;
8179     struct futex_q *this, *next;
8180     - u32 curval2;
8181    
8182     if (requeue_pi) {
8183     /*
8184     + * Requeue PI only works on two distinct uaddrs. This
8185     + * check is only valid for private futexes. See below.
8186     + */
8187     + if (uaddr1 == uaddr2)
8188     + return -EINVAL;
8189     +
8190     + /*
8191     * requeue_pi requires a pi_state, try to allocate it now
8192     * without any locks in case it fails.
8193     */
8194     @@ -1448,6 +1555,15 @@ retry:
8195     if (unlikely(ret != 0))
8196     goto out_put_key1;
8197    
8198     + /*
8199     + * The check above which compares uaddrs is not sufficient for
8200     + * shared futexes. We need to compare the keys:
8201     + */
8202     + if (requeue_pi && match_futex(&key1, &key2)) {
8203     + ret = -EINVAL;
8204     + goto out_put_keys;
8205     + }
8206     +
8207     hb1 = hash_futex(&key1);
8208     hb2 = hash_futex(&key2);
8209    
8210     @@ -1495,16 +1611,25 @@ retry_private:
8211     * At this point the top_waiter has either taken uaddr2 or is
8212     * waiting on it. If the former, then the pi_state will not
8213     * exist yet, look it up one more time to ensure we have a
8214     - * reference to it.
8215     + * reference to it. If the lock was taken, ret contains the
8216     + * vpid of the top waiter task.
8217     */
8218     - if (ret == 1) {
8219     + if (ret > 0) {
8220     WARN_ON(pi_state);
8221     drop_count++;
8222     task_count++;
8223     - ret = get_futex_value_locked(&curval2, uaddr2);
8224     - if (!ret)
8225     - ret = lookup_pi_state(curval2, hb2, &key2,
8226     - &pi_state);
8227     + /*
8228     + * If we acquired the lock, then the user
8229     + * space value of uaddr2 should be vpid. It
8230     + * cannot be changed by the top waiter as it
8231     + * is blocked on hb2 lock if it tries to do
8232     + * so. If something fiddled with it behind our
8233     + * back the pi state lookup might unearth
8234     + * it. So we rather use the known value than
8235     + * rereading and handing potential crap to
8236     + * lookup_pi_state.
8237     + */
8238     + ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
8239     }
8240    
8241     switch (ret) {
8242     @@ -2287,9 +2412,10 @@ retry:
8243     /*
8244     * To avoid races, try to do the TID -> 0 atomic transition
8245     * again. If it succeeds then we can return without waking
8246     - * anyone else up:
8247     + * anyone else up. We only try this if neither the waiters nor
8248     + * the owner died bit are set.
8249     */
8250     - if (!(uval & FUTEX_OWNER_DIED) &&
8251     + if (!(uval & ~FUTEX_TID_MASK) &&
8252     cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
8253     goto pi_faulted;
8254     /*
8255     @@ -2319,11 +2445,9 @@ retry:
8256     /*
8257     * No waiters - kernel unlocks the futex:
8258     */
8259     - if (!(uval & FUTEX_OWNER_DIED)) {
8260     - ret = unlock_futex_pi(uaddr, uval);
8261     - if (ret == -EFAULT)
8262     - goto pi_faulted;
8263     - }
8264     + ret = unlock_futex_pi(uaddr, uval);
8265     + if (ret == -EFAULT)
8266     + goto pi_faulted;
8267    
8268     out_unlock:
8269     spin_unlock(&hb->lock);
8270     @@ -2485,6 +2609,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
8271     if (ret)
8272     goto out_key2;
8273    
8274     + /*
8275     + * The check above which compares uaddrs is not sufficient for
8276     + * shared futexes. We need to compare the keys:
8277     + */
8278     + if (match_futex(&q.key, &key2)) {
8279     + ret = -EINVAL;
8280     + goto out_put_keys;
8281     + }
8282     +
8283     /* Queue the futex_q, drop the hb lock, wait for wakeup. */
8284     futex_wait_queue_me(hb, &q, to);
8285    
8286     diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
8287     index 09094361dce5..04d03745fb98 100644
8288     --- a/kernel/hrtimer.c
8289     +++ b/kernel/hrtimer.c
8290     @@ -247,6 +247,11 @@ again:
8291     goto again;
8292     }
8293     timer->base = new_base;
8294     + } else {
8295     + if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
8296     + cpu = this_cpu;
8297     + goto again;
8298     + }
8299     }
8300     return new_base;
8301     }
8302     @@ -582,6 +587,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
8303    
8304     cpu_base->expires_next.tv64 = expires_next.tv64;
8305    
8306     + /*
8307     + * If a hang was detected in the last timer interrupt then we
8308     + * leave the hang delay active in the hardware. We want the
8309     + * system to make progress. That also prevents the following
8310     + * scenario:
8311     + * T1 expires 50ms from now
8312     + * T2 expires 5s from now
8313     + *
8314     + * T1 is removed, so this code is called and would reprogram
8315     + * the hardware to 5s from now. Any hrtimer_start after that
8316     + * will not reprogram the hardware due to hang_detected being
8317     + * set. So we'd effectivly block all timers until the T2 event
8318     + * fires.
8319     + */
8320     + if (cpu_base->hang_detected)
8321     + return;
8322     +
8323     if (cpu_base->expires_next.tv64 != KTIME_MAX)
8324     tick_program_event(cpu_base->expires_next, 1);
8325     }
8326     @@ -981,11 +1003,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
8327     /* Remove an active timer from the queue: */
8328     ret = remove_hrtimer(timer, base);
8329    
8330     - /* Switch the timer base, if necessary: */
8331     - new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
8332     -
8333     if (mode & HRTIMER_MODE_REL) {
8334     - tim = ktime_add_safe(tim, new_base->get_time());
8335     + tim = ktime_add_safe(tim, base->get_time());
8336     /*
8337     * CONFIG_TIME_LOW_RES is a temporary way for architectures
8338     * to signal that they simply return xtime in
8339     @@ -1000,6 +1019,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
8340    
8341     hrtimer_set_expires_range_ns(timer, tim, delta_ns);
8342    
8343     + /* Switch the timer base, if necessary: */
8344     + new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
8345     +
8346     timer_stats_hrtimer_set_start_info(timer);
8347    
8348     leftmost = enqueue_hrtimer(timer, new_base);
8349     diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
8350     index d3bf660cb57f..66a1b46d5677 100644
8351     --- a/kernel/irq/manage.c
8352     +++ b/kernel/irq/manage.c
8353     @@ -150,7 +150,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
8354     struct irq_chip *chip = irq_data_get_irq_chip(data);
8355     int ret;
8356    
8357     - ret = chip->irq_set_affinity(data, mask, false);
8358     + ret = chip->irq_set_affinity(data, mask, force);
8359     switch (ret) {
8360     case IRQ_SET_MASK_OK:
8361     cpumask_copy(data->affinity, mask);
8362     @@ -162,7 +162,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
8363     return ret;
8364     }
8365    
8366     -int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
8367     +int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
8368     + bool force)
8369     {
8370     struct irq_chip *chip = irq_data_get_irq_chip(data);
8371     struct irq_desc *desc = irq_data_to_desc(data);
8372     @@ -172,7 +173,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
8373     return -EINVAL;
8374    
8375     if (irq_can_move_pcntxt(data)) {
8376     - ret = irq_do_set_affinity(data, mask, false);
8377     + ret = irq_do_set_affinity(data, mask, force);
8378     } else {
8379     irqd_set_move_pending(data);
8380     irq_copy_pending(desc, mask);
8381     @@ -187,13 +188,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
8382     return ret;
8383     }
8384    
8385     -/**
8386     - * irq_set_affinity - Set the irq affinity of a given irq
8387     - * @irq: Interrupt to set affinity
8388     - * @mask: cpumask
8389     - *
8390     - */
8391     -int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
8392     +int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
8393     {
8394     struct irq_desc *desc = irq_to_desc(irq);
8395     unsigned long flags;
8396     @@ -203,7 +198,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
8397     return -EINVAL;
8398    
8399     raw_spin_lock_irqsave(&desc->lock, flags);
8400     - ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
8401     + ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
8402     raw_spin_unlock_irqrestore(&desc->lock, flags);
8403     return ret;
8404     }
8405     diff --git a/kernel/kexec.c b/kernel/kexec.c
8406     index 60bafbed06ab..18ff0b91d6d2 100644
8407     --- a/kernel/kexec.c
8408     +++ b/kernel/kexec.c
8409     @@ -1682,6 +1682,14 @@ int kernel_kexec(void)
8410     kexec_in_progress = true;
8411     kernel_restart_prepare(NULL);
8412     migrate_to_reboot_cpu();
8413     +
8414     + /*
8415     + * migrate_to_reboot_cpu() disables CPU hotplug assuming that
8416     + * no further code needs to use CPU hotplug (which is true in
8417     + * the reboot case). However, the kexec path depends on using
8418     + * CPU hotplug again; so re-enable it here.
8419     + */
8420     + cpu_hotplug_enable();
8421     printk(KERN_EMERG "Starting new kernel\n");
8422     machine_shutdown();
8423     }
8424     diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
8425     index 2e960a2bab81..0339f515531a 100644
8426     --- a/kernel/locking/rtmutex.c
8427     +++ b/kernel/locking/rtmutex.c
8428     @@ -331,9 +331,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
8429     * top_waiter can be NULL, when we are in the deboosting
8430     * mode!
8431     */
8432     - if (top_waiter && (!task_has_pi_waiters(task) ||
8433     - top_waiter != task_top_pi_waiter(task)))
8434     - goto out_unlock_pi;
8435     + if (top_waiter) {
8436     + if (!task_has_pi_waiters(task))
8437     + goto out_unlock_pi;
8438     + /*
8439     + * If deadlock detection is off, we stop here if we
8440     + * are not the top pi waiter of the task.
8441     + */
8442     + if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
8443     + goto out_unlock_pi;
8444     + }
8445    
8446     /*
8447     * When deadlock detection is off then we check, if further
8448     @@ -349,7 +356,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
8449     goto retry;
8450     }
8451    
8452     - /* Deadlock detection */
8453     + /*
8454     + * Deadlock detection. If the lock is the same as the original
8455     + * lock which caused us to walk the lock chain or if the
8456     + * current lock is owned by the task which initiated the chain
8457     + * walk, we detected a deadlock.
8458     + */
8459     if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
8460     debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
8461     raw_spin_unlock(&lock->wait_lock);
8462     @@ -515,6 +527,18 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
8463     unsigned long flags;
8464     int chain_walk = 0, res;
8465    
8466     + /*
8467     + * Early deadlock detection. We really don't want the task to
8468     + * enqueue on itself just to untangle the mess later. It's not
8469     + * only an optimization. We drop the locks, so another waiter
8470     + * can come in before the chain walk detects the deadlock. So
8471     + * the other will detect the deadlock and return -EDEADLOCK,
8472     + * which is wrong, as the other waiter is not in a deadlock
8473     + * situation.
8474     + */
8475     + if (detect_deadlock && owner == task)
8476     + return -EDEADLK;
8477     +
8478     raw_spin_lock_irqsave(&task->pi_lock, flags);
8479     __rt_mutex_adjust_prio(task);
8480     waiter->task = task;
8481     diff --git a/kernel/module.c b/kernel/module.c
8482     index d24fcf29cb64..6716a1fa618b 100644
8483     --- a/kernel/module.c
8484     +++ b/kernel/module.c
8485     @@ -815,9 +815,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
8486     return -EFAULT;
8487     name[MODULE_NAME_LEN-1] = '\0';
8488    
8489     - if (!(flags & O_NONBLOCK))
8490     - pr_warn("waiting module removal not supported: please upgrade\n");
8491     -
8492     if (mutex_lock_interruptible(&module_mutex) != 0)
8493     return -EINTR;
8494    
8495     @@ -3265,6 +3262,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
8496    
8497     dynamic_debug_setup(info->debug, info->num_debug);
8498    
8499     + /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
8500     + ftrace_module_init(mod);
8501     +
8502     /* Finally it's fully formed, ready to start executing. */
8503     err = complete_formation(mod, info);
8504     if (err)
8505     diff --git a/kernel/timer.c b/kernel/timer.c
8506     index accfd241b9e5..38f0d40fca13 100644
8507     --- a/kernel/timer.c
8508     +++ b/kernel/timer.c
8509     @@ -822,7 +822,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
8510    
8511     bit = find_last_bit(&mask, BITS_PER_LONG);
8512    
8513     - mask = (1 << bit) - 1;
8514     + mask = (1UL << bit) - 1;
8515    
8516     expires_limit = expires_limit & ~(mask);
8517    
8518     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
8519     index cd7f76d1eb86..868633e61b43 100644
8520     --- a/kernel/trace/ftrace.c
8521     +++ b/kernel/trace/ftrace.c
8522     @@ -4315,16 +4315,11 @@ static void ftrace_init_module(struct module *mod,
8523     ftrace_process_locs(mod, start, end);
8524     }
8525    
8526     -static int ftrace_module_notify_enter(struct notifier_block *self,
8527     - unsigned long val, void *data)
8528     +void ftrace_module_init(struct module *mod)
8529     {
8530     - struct module *mod = data;
8531     -
8532     - if (val == MODULE_STATE_COMING)
8533     - ftrace_init_module(mod, mod->ftrace_callsites,
8534     - mod->ftrace_callsites +
8535     - mod->num_ftrace_callsites);
8536     - return 0;
8537     + ftrace_init_module(mod, mod->ftrace_callsites,
8538     + mod->ftrace_callsites +
8539     + mod->num_ftrace_callsites);
8540     }
8541    
8542     static int ftrace_module_notify_exit(struct notifier_block *self,
8543     @@ -4338,11 +4333,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
8544     return 0;
8545     }
8546     #else
8547     -static int ftrace_module_notify_enter(struct notifier_block *self,
8548     - unsigned long val, void *data)
8549     -{
8550     - return 0;
8551     -}
8552     static int ftrace_module_notify_exit(struct notifier_block *self,
8553     unsigned long val, void *data)
8554     {
8555     @@ -4350,11 +4340,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
8556     }
8557     #endif /* CONFIG_MODULES */
8558    
8559     -struct notifier_block ftrace_module_enter_nb = {
8560     - .notifier_call = ftrace_module_notify_enter,
8561     - .priority = INT_MAX, /* Run before anything that can use kprobes */
8562     -};
8563     -
8564     struct notifier_block ftrace_module_exit_nb = {
8565     .notifier_call = ftrace_module_notify_exit,
8566     .priority = INT_MIN, /* Run after anything that can remove kprobes */
8567     @@ -4391,10 +4376,6 @@ void __init ftrace_init(void)
8568     __start_mcount_loc,
8569     __stop_mcount_loc);
8570    
8571     - ret = register_module_notifier(&ftrace_module_enter_nb);
8572     - if (ret)
8573     - pr_warning("Failed to register trace ftrace module enter notifier\n");
8574     -
8575     ret = register_module_notifier(&ftrace_module_exit_nb);
8576     if (ret)
8577     pr_warning("Failed to register trace ftrace module exit notifier\n");
8578     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
8579     index 193e977a10ea..b6a394108e3b 100644
8580     --- a/kernel/workqueue.c
8581     +++ b/kernel/workqueue.c
8582     @@ -1909,6 +1909,12 @@ static void send_mayday(struct work_struct *work)
8583    
8584     /* mayday mayday mayday */
8585     if (list_empty(&pwq->mayday_node)) {
8586     + /*
8587     + * If @pwq is for an unbound wq, its base ref may be put at
8588     + * any time due to an attribute change. Pin @pwq until the
8589     + * rescuer is done with it.
8590     + */
8591     + get_pwq(pwq);
8592     list_add_tail(&pwq->mayday_node, &wq->maydays);
8593     wake_up_process(wq->rescuer->task);
8594     }
8595     @@ -2391,6 +2397,7 @@ static int rescuer_thread(void *__rescuer)
8596     struct worker *rescuer = __rescuer;
8597     struct workqueue_struct *wq = rescuer->rescue_wq;
8598     struct list_head *scheduled = &rescuer->scheduled;
8599     + bool should_stop;
8600    
8601     set_user_nice(current, RESCUER_NICE_LEVEL);
8602    
8603     @@ -2402,11 +2409,15 @@ static int rescuer_thread(void *__rescuer)
8604     repeat:
8605     set_current_state(TASK_INTERRUPTIBLE);
8606    
8607     - if (kthread_should_stop()) {
8608     - __set_current_state(TASK_RUNNING);
8609     - rescuer->task->flags &= ~PF_WQ_WORKER;
8610     - return 0;
8611     - }
8612     + /*
8613     + * By the time the rescuer is requested to stop, the workqueue
8614     + * shouldn't have any work pending, but @wq->maydays may still have
8615     + * pwq(s) queued. This can happen by non-rescuer workers consuming
8616     + * all the work items before the rescuer got to them. Go through
8617     + * @wq->maydays processing before acting on should_stop so that the
8618     + * list is always empty on exit.
8619     + */
8620     + should_stop = kthread_should_stop();
8621    
8622     /* see whether any pwq is asking for help */
8623     spin_lock_irq(&wq_mayday_lock);
8624     @@ -2438,6 +2449,12 @@ repeat:
8625     process_scheduled_works(rescuer);
8626    
8627     /*
8628     + * Put the reference grabbed by send_mayday(). @pool won't
8629     + * go away while we're holding its lock.
8630     + */
8631     + put_pwq(pwq);
8632     +
8633     + /*
8634     * Leave this pool. If keep_working() is %true, notify a
8635     * regular worker; otherwise, we end up with 0 concurrency
8636     * and stalling the execution.
8637     @@ -2452,6 +2469,12 @@ repeat:
8638    
8639     spin_unlock_irq(&wq_mayday_lock);
8640    
8641     + if (should_stop) {
8642     + __set_current_state(TASK_RUNNING);
8643     + rescuer->task->flags &= ~PF_WQ_WORKER;
8644     + return 0;
8645     + }
8646     +
8647     /* rescuers should never participate in concurrency management */
8648     WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
8649     schedule();
8650     @@ -4093,7 +4116,8 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
8651     if (!pwq) {
8652     pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
8653     wq->name);
8654     - goto out_unlock;
8655     + mutex_lock(&wq->mutex);
8656     + goto use_dfl_pwq;
8657     }
8658    
8659     /*
8660     diff --git a/mm/compaction.c b/mm/compaction.c
8661     index 918577595ea8..5f702ef0a65f 100644
8662     --- a/mm/compaction.c
8663     +++ b/mm/compaction.c
8664     @@ -666,16 +666,20 @@ static void isolate_freepages(struct zone *zone,
8665     struct compact_control *cc)
8666     {
8667     struct page *page;
8668     - unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
8669     + unsigned long high_pfn, low_pfn, pfn, z_end_pfn;
8670     int nr_freepages = cc->nr_freepages;
8671     struct list_head *freelist = &cc->freepages;
8672    
8673     /*
8674     * Initialise the free scanner. The starting point is where we last
8675     - * scanned from (or the end of the zone if starting). The low point
8676     - * is the end of the pageblock the migration scanner is using.
8677     + * successfully isolated from, zone-cached value, or the end of the
8678     + * zone when isolating for the first time. We need this aligned to
8679     + * the pageblock boundary, because we do pfn -= pageblock_nr_pages
8680     + * in the for loop.
8681     + * The low boundary is the end of the pageblock the migration scanner
8682     + * is using.
8683     */
8684     - pfn = cc->free_pfn;
8685     + pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
8686     low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
8687    
8688     /*
8689     @@ -695,6 +699,7 @@ static void isolate_freepages(struct zone *zone,
8690     for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
8691     pfn -= pageblock_nr_pages) {
8692     unsigned long isolated;
8693     + unsigned long end_pfn;
8694    
8695     /*
8696     * This can iterate a massively long zone without finding any
8697     @@ -729,13 +734,10 @@ static void isolate_freepages(struct zone *zone,
8698     isolated = 0;
8699    
8700     /*
8701     - * As pfn may not start aligned, pfn+pageblock_nr_page
8702     - * may cross a MAX_ORDER_NR_PAGES boundary and miss
8703     - * a pfn_valid check. Ensure isolate_freepages_block()
8704     - * only scans within a pageblock
8705     + * Take care when isolating in last pageblock of a zone which
8706     + * ends in the middle of a pageblock.
8707     */
8708     - end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
8709     - end_pfn = min(end_pfn, z_end_pfn);
8710     + end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
8711     isolated = isolate_freepages_block(cc, pfn, end_pfn,
8712     freelist, false);
8713     nr_freepages += isolated;
8714     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
8715     index 90002ea43638..66586bb44c62 100644
8716     --- a/mm/memory-failure.c
8717     +++ b/mm/memory-failure.c
8718     @@ -1085,15 +1085,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
8719     return 0;
8720     } else if (PageHuge(hpage)) {
8721     /*
8722     - * Check "just unpoisoned", "filter hit", and
8723     - * "race with other subpage."
8724     + * Check "filter hit" and "race with other subpage."
8725     */
8726     lock_page(hpage);
8727     - if (!PageHWPoison(hpage)
8728     - || (hwpoison_filter(p) && TestClearPageHWPoison(p))
8729     - || (p != hpage && TestSetPageHWPoison(hpage))) {
8730     - atomic_long_sub(nr_pages, &num_poisoned_pages);
8731     - return 0;
8732     + if (PageHWPoison(hpage)) {
8733     + if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
8734     + || (p != hpage && TestSetPageHWPoison(hpage))) {
8735     + atomic_long_sub(nr_pages, &num_poisoned_pages);
8736     + unlock_page(hpage);
8737     + return 0;
8738     + }
8739     }
8740     set_page_hwpoison_huge_page(hpage);
8741     res = dequeue_hwpoisoned_huge_page(hpage);
8742     diff --git a/mm/memory.c b/mm/memory.c
8743     index 22dfa617bddb..49e930f9ed46 100644
8744     --- a/mm/memory.c
8745     +++ b/mm/memory.c
8746     @@ -1929,12 +1929,17 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
8747     unsigned long address, unsigned int fault_flags)
8748     {
8749     struct vm_area_struct *vma;
8750     + vm_flags_t vm_flags;
8751     int ret;
8752    
8753     vma = find_extend_vma(mm, address);
8754     if (!vma || address < vma->vm_start)
8755     return -EFAULT;
8756    
8757     + vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
8758     + if (!(vm_flags & vma->vm_flags))
8759     + return -EFAULT;
8760     +
8761     ret = handle_mm_fault(mm, vma, address, fault_flags);
8762     if (ret & VM_FAULT_ERROR) {
8763     if (ret & VM_FAULT_OOM)
8764     diff --git a/mm/mremap.c b/mm/mremap.c
8765     index 0843feb66f3d..05f1180e9f21 100644
8766     --- a/mm/mremap.c
8767     +++ b/mm/mremap.c
8768     @@ -194,10 +194,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
8769     break;
8770     if (pmd_trans_huge(*old_pmd)) {
8771     int err = 0;
8772     - if (extent == HPAGE_PMD_SIZE)
8773     + if (extent == HPAGE_PMD_SIZE) {
8774     + VM_BUG_ON(vma->vm_file || !vma->anon_vma);
8775     + /* See comment in move_ptes() */
8776     + if (need_rmap_locks)
8777     + anon_vma_lock_write(vma->anon_vma);
8778     err = move_huge_pmd(vma, new_vma, old_addr,
8779     new_addr, old_end,
8780     old_pmd, new_pmd);
8781     + if (need_rmap_locks)
8782     + anon_vma_unlock_write(vma->anon_vma);
8783     + }
8784     if (err > 0) {
8785     need_flush = true;
8786     continue;
8787     diff --git a/mm/page-writeback.c b/mm/page-writeback.c
8788     index 7106cb1aca8e..8f6daa62206d 100644
8789     --- a/mm/page-writeback.c
8790     +++ b/mm/page-writeback.c
8791     @@ -593,14 +593,14 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
8792     * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
8793     * => fast response on large errors; small oscillation near setpoint
8794     */
8795     -static inline long long pos_ratio_polynom(unsigned long setpoint,
8796     +static long long pos_ratio_polynom(unsigned long setpoint,
8797     unsigned long dirty,
8798     unsigned long limit)
8799     {
8800     long long pos_ratio;
8801     long x;
8802    
8803     - x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
8804     + x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
8805     limit - setpoint + 1);
8806     pos_ratio = x;
8807     pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
8808     @@ -842,7 +842,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
8809     x_intercept = bdi_setpoint + span;
8810    
8811     if (bdi_dirty < x_intercept - span / 4) {
8812     - pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
8813     + pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
8814     x_intercept - bdi_setpoint + 1);
8815     } else
8816     pos_ratio /= 4;
8817     diff --git a/mm/percpu.c b/mm/percpu.c
8818     index 036cfe07050f..a2a54a85f691 100644
8819     --- a/mm/percpu.c
8820     +++ b/mm/percpu.c
8821     @@ -612,7 +612,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
8822     chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
8823     sizeof(chunk->map[0]));
8824     if (!chunk->map) {
8825     - kfree(chunk);
8826     + pcpu_mem_free(chunk, pcpu_chunk_struct_size);
8827     return NULL;
8828     }
8829    
8830     diff --git a/mm/vmscan.c b/mm/vmscan.c
8831     index a9c74b409681..30a4b096ff6e 100644
8832     --- a/mm/vmscan.c
8833     +++ b/mm/vmscan.c
8834     @@ -1916,6 +1916,24 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
8835     }
8836    
8837     /*
8838     + * Prevent the reclaimer from falling into the cache trap: as
8839     + * cache pages start out inactive, every cache fault will tip
8840     + * the scan balance towards the file LRU. And as the file LRU
8841     + * shrinks, so does the window for rotation from references.
8842     + * This means we have a runaway feedback loop where a tiny
8843     + * thrashing file LRU becomes infinitely more attractive than
8844     + * anon pages. Try to detect this based on file LRU size.
8845     + */
8846     + if (global_reclaim(sc)) {
8847     + unsigned long free = zone_page_state(zone, NR_FREE_PAGES);
8848     +
8849     + if (unlikely(file + free <= high_wmark_pages(zone))) {
8850     + scan_balance = SCAN_ANON;
8851     + goto out;
8852     + }
8853     + }
8854     +
8855     + /*
8856     * There is enough inactive page cache, do not reclaim
8857     * anything from the anonymous working set right now.
8858     */
8859     diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
8860     index ba5366c320da..9a6bc9df5e81 100644
8861     --- a/net/bluetooth/hci_conn.c
8862     +++ b/net/bluetooth/hci_conn.c
8863     @@ -752,14 +752,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
8864     if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
8865     struct hci_cp_auth_requested cp;
8866    
8867     - /* encrypt must be pending if auth is also pending */
8868     - set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
8869     -
8870     cp.handle = cpu_to_le16(conn->handle);
8871     hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
8872     sizeof(cp), &cp);
8873     +
8874     + /* If we're already encrypted set the REAUTH_PEND flag,
8875     + * otherwise set the ENCRYPT_PEND.
8876     + */
8877     if (conn->key_type != 0xff)
8878     set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
8879     + else
8880     + set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
8881     }
8882    
8883     return 0;
8884     diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
8885     index 60828cf02eb8..66ada7794ed0 100644
8886     --- a/net/bluetooth/hci_event.c
8887     +++ b/net/bluetooth/hci_event.c
8888     @@ -3006,6 +3006,12 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
8889     if (!conn)
8890     goto unlock;
8891    
8892     + /* For BR/EDR the necessary steps are taken through the
8893     + * auth_complete event.
8894     + */
8895     + if (conn->type != LE_LINK)
8896     + goto unlock;
8897     +
8898     if (!ev->status)
8899     conn->sec_level = conn->pending_sec_level;
8900    
8901     diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
8902     index 30efc5c18622..988721a629eb 100644
8903     --- a/net/ceph/messenger.c
8904     +++ b/net/ceph/messenger.c
8905     @@ -557,7 +557,7 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
8906     return r;
8907     }
8908    
8909     -static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
8910     +static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
8911     int offset, size_t size, bool more)
8912     {
8913     int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
8914     @@ -570,6 +570,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
8915     return ret;
8916     }
8917    
8918     +static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
8919     + int offset, size_t size, bool more)
8920     +{
8921     + int ret;
8922     + struct kvec iov;
8923     +
8924     + /* sendpage cannot properly handle pages with page_count == 0,
8925     + * we need to fallback to sendmsg if that's the case */
8926     + if (page_count(page) >= 1)
8927     + return __ceph_tcp_sendpage(sock, page, offset, size, more);
8928     +
8929     + iov.iov_base = kmap(page) + offset;
8930     + iov.iov_len = size;
8931     + ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
8932     + kunmap(page);
8933     +
8934     + return ret;
8935     +}
8936    
8937     /*
8938     * Shutdown/close the socket for the given connection.
8939     diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
8940     index 6bd498470138..b127902361f4 100644
8941     --- a/net/mac80211/ieee80211_i.h
8942     +++ b/net/mac80211/ieee80211_i.h
8943     @@ -317,6 +317,7 @@ struct ieee80211_roc_work {
8944    
8945     bool started, abort, hw_begun, notified;
8946     bool to_be_freed;
8947     + bool on_channel;
8948    
8949     unsigned long hw_start_time;
8950    
8951     diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
8952     index 47059ac44e24..e6a84cb1a5e4 100644
8953     --- a/net/mac80211/mlme.c
8954     +++ b/net/mac80211/mlme.c
8955     @@ -3511,18 +3511,24 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
8956    
8957     sdata_lock(sdata);
8958    
8959     - if (ifmgd->auth_data) {
8960     + if (ifmgd->auth_data || ifmgd->assoc_data) {
8961     + const u8 *bssid = ifmgd->auth_data ?
8962     + ifmgd->auth_data->bss->bssid :
8963     + ifmgd->assoc_data->bss->bssid;
8964     +
8965     /*
8966     - * If we are trying to authenticate while suspending, cfg80211
8967     - * won't know and won't actually abort those attempts, thus we
8968     - * need to do that ourselves.
8969     + * If we are trying to authenticate / associate while suspending,
8970     + * cfg80211 won't know and won't actually abort those attempts,
8971     + * thus we need to do that ourselves.
8972     */
8973     - ieee80211_send_deauth_disassoc(sdata,
8974     - ifmgd->auth_data->bss->bssid,
8975     + ieee80211_send_deauth_disassoc(sdata, bssid,
8976     IEEE80211_STYPE_DEAUTH,
8977     WLAN_REASON_DEAUTH_LEAVING,
8978     false, frame_buf);
8979     - ieee80211_destroy_auth_data(sdata, false);
8980     + if (ifmgd->assoc_data)
8981     + ieee80211_destroy_assoc_data(sdata, false);
8982     + if (ifmgd->auth_data)
8983     + ieee80211_destroy_auth_data(sdata, false);
8984     cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
8985     IEEE80211_DEAUTH_FRAME_LEN);
8986     }
8987     diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
8988     index 6fb38558a5e6..7a17decd27f9 100644
8989     --- a/net/mac80211/offchannel.c
8990     +++ b/net/mac80211/offchannel.c
8991     @@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
8992     container_of(work, struct ieee80211_roc_work, work.work);
8993     struct ieee80211_sub_if_data *sdata = roc->sdata;
8994     struct ieee80211_local *local = sdata->local;
8995     - bool started;
8996     + bool started, on_channel;
8997    
8998     mutex_lock(&local->mtx);
8999    
9000     @@ -354,14 +354,26 @@ void ieee80211_sw_roc_work(struct work_struct *work)
9001     if (!roc->started) {
9002     struct ieee80211_roc_work *dep;
9003    
9004     - /* start this ROC */
9005     - ieee80211_offchannel_stop_vifs(local);
9006     + WARN_ON(local->use_chanctx);
9007     +
9008     + /* If actually operating on the desired channel (with at least
9009     + * 20 MHz channel width) don't stop all the operations but still
9010     + * treat it as though the ROC operation started properly, so
9011     + * other ROC operations won't interfere with this one.
9012     + */
9013     + roc->on_channel = roc->chan == local->_oper_chandef.chan &&
9014     + local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
9015     + local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
9016    
9017     - /* switch channel etc */
9018     + /* start this ROC */
9019     ieee80211_recalc_idle(local);
9020    
9021     - local->tmp_channel = roc->chan;
9022     - ieee80211_hw_config(local, 0);
9023     + if (!roc->on_channel) {
9024     + ieee80211_offchannel_stop_vifs(local);
9025     +
9026     + local->tmp_channel = roc->chan;
9027     + ieee80211_hw_config(local, 0);
9028     + }
9029    
9030     /* tell userspace or send frame */
9031     ieee80211_handle_roc_started(roc);
9032     @@ -380,9 +392,10 @@ void ieee80211_sw_roc_work(struct work_struct *work)
9033     finish:
9034     list_del(&roc->list);
9035     started = roc->started;
9036     + on_channel = roc->on_channel;
9037     ieee80211_roc_notify_destroy(roc, !roc->abort);
9038    
9039     - if (started) {
9040     + if (started && !on_channel) {
9041     ieee80211_flush_queues(local, NULL);
9042    
9043     local->tmp_channel = NULL;
9044     diff --git a/net/mac80211/util.c b/net/mac80211/util.c
9045     index b8700d417a9c..642762518213 100644
9046     --- a/net/mac80211/util.c
9047     +++ b/net/mac80211/util.c
9048     @@ -1754,7 +1754,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
9049     mutex_unlock(&local->mtx);
9050    
9051     if (sched_scan_stopped)
9052     - cfg80211_sched_scan_stopped(local->hw.wiphy);
9053     + cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy);
9054    
9055     /*
9056     * If this is for hw restart things are still running.
9057     diff --git a/net/wireless/reg.c b/net/wireless/reg.c
9058     index f0541370e68e..338794ea44d1 100644
9059     --- a/net/wireless/reg.c
9060     +++ b/net/wireless/reg.c
9061     @@ -1683,17 +1683,9 @@ static void reg_process_hint(struct regulatory_request *reg_request)
9062     struct wiphy *wiphy = NULL;
9063     enum reg_request_treatment treatment;
9064    
9065     - if (WARN_ON(!reg_request->alpha2))
9066     - return;
9067     -
9068     if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
9069     wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
9070    
9071     - if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && !wiphy) {
9072     - kfree(reg_request);
9073     - return;
9074     - }
9075     -
9076     switch (reg_request->initiator) {
9077     case NL80211_REGDOM_SET_BY_CORE:
9078     reg_process_hint_core(reg_request);
9079     @@ -1706,20 +1698,29 @@ static void reg_process_hint(struct regulatory_request *reg_request)
9080     schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142));
9081     return;
9082     case NL80211_REGDOM_SET_BY_DRIVER:
9083     + if (!wiphy)
9084     + goto out_free;
9085     treatment = reg_process_hint_driver(wiphy, reg_request);
9086     break;
9087     case NL80211_REGDOM_SET_BY_COUNTRY_IE:
9088     + if (!wiphy)
9089     + goto out_free;
9090     treatment = reg_process_hint_country_ie(wiphy, reg_request);
9091     break;
9092     default:
9093     WARN(1, "invalid initiator %d\n", reg_request->initiator);
9094     - return;
9095     + goto out_free;
9096     }
9097    
9098     /* This is required so that the orig_* parameters are saved */
9099     if (treatment == REG_REQ_ALREADY_SET && wiphy &&
9100     wiphy->regulatory_flags & REGULATORY_STRICT_REG)
9101     wiphy_update_regulatory(wiphy, reg_request->initiator);
9102     +
9103     + return;
9104     +
9105     +out_free:
9106     + kfree(reg_request);
9107     }
9108    
9109     /*
9110     diff --git a/net/wireless/scan.c b/net/wireless/scan.c
9111     index d1ed4aebbbb7..38d6dd553770 100644
9112     --- a/net/wireless/scan.c
9113     +++ b/net/wireless/scan.c
9114     @@ -284,14 +284,22 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
9115     }
9116     EXPORT_SYMBOL(cfg80211_sched_scan_results);
9117    
9118     -void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
9119     +void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy)
9120     {
9121     struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9122    
9123     + ASSERT_RTNL();
9124     +
9125     trace_cfg80211_sched_scan_stopped(wiphy);
9126    
9127     - rtnl_lock();
9128     __cfg80211_stop_sched_scan(rdev, true);
9129     +}
9130     +EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl);
9131     +
9132     +void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
9133     +{
9134     + rtnl_lock();
9135     + cfg80211_sched_scan_stopped_rtnl(wiphy);
9136     rtnl_unlock();
9137     }
9138     EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
9139     diff --git a/net/wireless/sme.c b/net/wireless/sme.c
9140     index f04d4c32e96e..760722f9126b 100644
9141     --- a/net/wireless/sme.c
9142     +++ b/net/wireless/sme.c
9143     @@ -235,7 +235,6 @@ void cfg80211_conn_work(struct work_struct *work)
9144     NULL, 0, NULL, 0,
9145     WLAN_STATUS_UNSPECIFIED_FAILURE,
9146     false, NULL);
9147     - cfg80211_sme_free(wdev);
9148     }
9149     wdev_unlock(wdev);
9150     }
9151     @@ -649,6 +648,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
9152     cfg80211_unhold_bss(bss_from_pub(bss));
9153     cfg80211_put_bss(wdev->wiphy, bss);
9154     }
9155     + cfg80211_sme_free(wdev);
9156     return;
9157     }
9158    
9159     diff --git a/security/device_cgroup.c b/security/device_cgroup.c
9160     index d3b6d2cd3a06..6e4e6eb732fe 100644
9161     --- a/security/device_cgroup.c
9162     +++ b/security/device_cgroup.c
9163     @@ -308,57 +308,139 @@ static int devcgroup_seq_show(struct seq_file *m, void *v)
9164     }
9165    
9166     /**
9167     - * may_access - verifies if a new exception is part of what is allowed
9168     - * by a dev cgroup based on the default policy +
9169     - * exceptions. This is used to make sure a child cgroup
9170     - * won't have more privileges than its parent or to
9171     - * verify if a certain access is allowed.
9172     - * @dev_cgroup: dev cgroup to be tested against
9173     - * @refex: new exception
9174     - * @behavior: behavior of the exception
9175     + * match_exception - iterates the exception list trying to match a rule
9176     + * based on type, major, minor and access type. It is
9177     + * considered a match if an exception is found that
9178     + * will contain the entire range of provided parameters.
9179     + * @exceptions: list of exceptions
9180     + * @type: device type (DEV_BLOCK or DEV_CHAR)
9181     + * @major: device file major number, ~0 to match all
9182     + * @minor: device file minor number, ~0 to match all
9183     + * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
9184     + *
9185     + * returns: true in case it matches an exception completely
9186     */
9187     -static bool may_access(struct dev_cgroup *dev_cgroup,
9188     - struct dev_exception_item *refex,
9189     - enum devcg_behavior behavior)
9190     +static bool match_exception(struct list_head *exceptions, short type,
9191     + u32 major, u32 minor, short access)
9192     {
9193     struct dev_exception_item *ex;
9194     - bool match = false;
9195    
9196     - rcu_lockdep_assert(rcu_read_lock_held() ||
9197     - lockdep_is_held(&devcgroup_mutex),
9198     - "device_cgroup::may_access() called without proper synchronization");
9199     + list_for_each_entry_rcu(ex, exceptions, list) {
9200     + if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
9201     + continue;
9202     + if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
9203     + continue;
9204     + if (ex->major != ~0 && ex->major != major)
9205     + continue;
9206     + if (ex->minor != ~0 && ex->minor != minor)
9207     + continue;
9208     + /* provided access cannot have more than the exception rule */
9209     + if (access & (~ex->access))
9210     + continue;
9211     + return true;
9212     + }
9213     + return false;
9214     +}
9215     +
9216     +/**
9217     + * match_exception_partial - iterates the exception list trying to match a rule
9218     + * based on type, major, minor and access type. It is
9219     + * considered a match if an exception's range is
9220     + * found to contain *any* of the devices specified by
9221     + * provided parameters. This is used to make sure no
9222     + * extra access is being granted that is forbidden by
9223     + * any of the exception list.
9224     + * @exceptions: list of exceptions
9225     + * @type: device type (DEV_BLOCK or DEV_CHAR)
9226     + * @major: device file major number, ~0 to match all
9227     + * @minor: device file minor number, ~0 to match all
9228     + * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
9229     + *
9230     + * returns: true in case the provided range mat matches an exception completely
9231     + */
9232     +static bool match_exception_partial(struct list_head *exceptions, short type,
9233     + u32 major, u32 minor, short access)
9234     +{
9235     + struct dev_exception_item *ex;
9236    
9237     - list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
9238     - if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
9239     + list_for_each_entry_rcu(ex, exceptions, list) {
9240     + if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
9241     continue;
9242     - if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
9243     + if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
9244     continue;
9245     - if (ex->major != ~0 && ex->major != refex->major)
9246     + /*
9247     + * We must be sure that both the exception and the provided
9248     + * range aren't masking all devices
9249     + */
9250     + if (ex->major != ~0 && major != ~0 && ex->major != major)
9251     continue;
9252     - if (ex->minor != ~0 && ex->minor != refex->minor)
9253     + if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
9254     continue;
9255     - if (refex->access & (~ex->access))
9256     + /*
9257     + * In order to make sure the provided range isn't matching
9258     + * an exception, all its access bits shouldn't match the
9259     + * exception's access bits
9260     + */
9261     + if (!(access & ex->access))
9262     continue;
9263     - match = true;
9264     - break;
9265     + return true;
9266     }
9267     + return false;
9268     +}
9269     +
9270     +/**
9271     + * verify_new_ex - verifies if a new exception is part of what is allowed
9272     + * by a dev cgroup based on the default policy +
9273     + * exceptions. This is used to make sure a child cgroup
9274     + * won't have more privileges than its parent
9275     + * @dev_cgroup: dev cgroup to be tested against
9276     + * @refex: new exception
9277     + * @behavior: behavior of the exception's dev_cgroup
9278     + */
9279     +static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
9280     + struct dev_exception_item *refex,
9281     + enum devcg_behavior behavior)
9282     +{
9283     + bool match = false;
9284     +
9285     + rcu_lockdep_assert(rcu_read_lock_held() ||
9286     + lockdep_is_held(&devcgroup_mutex),
9287     + "device_cgroup:verify_new_ex called without proper synchronization");
9288    
9289     if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
9290     if (behavior == DEVCG_DEFAULT_ALLOW) {
9291     - /* the exception will deny access to certain devices */
9292     + /*
9293     + * new exception in the child doesn't matter, only
9294     + * adding extra restrictions
9295     + */
9296     return true;
9297     } else {
9298     - /* the exception will allow access to certain devices */
9299     + /*
9300     + * new exception in the child will add more devices
9301     + * that can be acessed, so it can't match any of
9302     + * parent's exceptions, even slightly
9303     + */
9304     + match = match_exception_partial(&dev_cgroup->exceptions,
9305     + refex->type,
9306     + refex->major,
9307     + refex->minor,
9308     + refex->access);
9309     +
9310     if (match)
9311     - /*
9312     - * a new exception allowing access shouldn't
9313     - * match an parent's exception
9314     - */
9315     return false;
9316     return true;
9317     }
9318     } else {
9319     - /* only behavior == DEVCG_DEFAULT_DENY allowed here */
9320     + /*
9321     + * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
9322     + * the new exception will add access to more devices and must
9323     + * be contained completely in an parent's exception to be
9324     + * allowed
9325     + */
9326     + match = match_exception(&dev_cgroup->exceptions, refex->type,
9327     + refex->major, refex->minor,
9328     + refex->access);
9329     +
9330     if (match)
9331     /* parent has an exception that matches the proposed */
9332     return true;
9333     @@ -380,7 +462,38 @@ static int parent_has_perm(struct dev_cgroup *childcg,
9334    
9335     if (!parent)
9336     return 1;
9337     - return may_access(parent, ex, childcg->behavior);
9338     + return verify_new_ex(parent, ex, childcg->behavior);
9339     +}
9340     +
9341     +/**
9342     + * parent_allows_removal - verify if it's ok to remove an exception
9343     + * @childcg: child cgroup from where the exception will be removed
9344     + * @ex: exception being removed
9345     + *
9346     + * When removing an exception in cgroups with default ALLOW policy, it must
9347     + * be checked if removing it will give the child cgroup more access than the
9348     + * parent.
9349     + *
9350     + * Return: true if it's ok to remove exception, false otherwise
9351     + */
9352     +static bool parent_allows_removal(struct dev_cgroup *childcg,
9353     + struct dev_exception_item *ex)
9354     +{
9355     + struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css));
9356     +
9357     + if (!parent)
9358     + return true;
9359     +
9360     + /* It's always allowed to remove access to devices */
9361     + if (childcg->behavior == DEVCG_DEFAULT_DENY)
9362     + return true;
9363     +
9364     + /*
9365     + * Make sure you're not removing part or a whole exception existing in
9366     + * the parent cgroup
9367     + */
9368     + return !match_exception_partial(&parent->exceptions, ex->type,
9369     + ex->major, ex->minor, ex->access);
9370     }
9371    
9372     /**
9373     @@ -618,17 +731,21 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
9374    
9375     switch (filetype) {
9376     case DEVCG_ALLOW:
9377     - if (!parent_has_perm(devcgroup, &ex))
9378     - return -EPERM;
9379     /*
9380     * If the default policy is to allow by default, try to remove
9381     * an matching exception instead. And be silent about it: we
9382     * don't want to break compatibility
9383     */
9384     if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
9385     + /* Check if the parent allows removing it first */
9386     + if (!parent_allows_removal(devcgroup, &ex))
9387     + return -EPERM;
9388     dev_exception_rm(devcgroup, &ex);
9389     - return 0;
9390     + break;
9391     }
9392     +
9393     + if (!parent_has_perm(devcgroup, &ex))
9394     + return -EPERM;
9395     rc = dev_exception_add(devcgroup, &ex);
9396     break;
9397     case DEVCG_DENY:
9398     @@ -708,18 +825,18 @@ static int __devcgroup_check_permission(short type, u32 major, u32 minor,
9399     short access)
9400     {
9401     struct dev_cgroup *dev_cgroup;
9402     - struct dev_exception_item ex;
9403     - int rc;
9404     -
9405     - memset(&ex, 0, sizeof(ex));
9406     - ex.type = type;
9407     - ex.major = major;
9408     - ex.minor = minor;
9409     - ex.access = access;
9410     + bool rc;
9411    
9412     rcu_read_lock();
9413     dev_cgroup = task_devcgroup(current);
9414     - rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior);
9415     + if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
9416     + /* Can't match any of the exceptions, even partially */
9417     + rc = !match_exception_partial(&dev_cgroup->exceptions,
9418     + type, major, minor, access);
9419     + else
9420     + /* Need to match completely one exception to be allowed */
9421     + rc = match_exception(&dev_cgroup->exceptions, type, major,
9422     + minor, access);
9423     rcu_read_unlock();
9424    
9425     if (!rc)
9426     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
9427     index a8dec9e9e876..22f7883fcb9a 100644
9428     --- a/sound/pci/hda/hda_intel.c
9429     +++ b/sound/pci/hda/hda_intel.c
9430     @@ -3988,6 +3988,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
9431     /* Lynx Point */
9432     { PCI_DEVICE(0x8086, 0x8c20),
9433     .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
9434     + /* 9 Series */
9435     + { PCI_DEVICE(0x8086, 0x8ca0),
9436     + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
9437     /* Wellsburg */
9438     { PCI_DEVICE(0x8086, 0x8d20),
9439     .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
9440     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
9441     index 5ef95034d041..ce5b339d9333 100644
9442     --- a/sound/pci/hda/patch_hdmi.c
9443     +++ b/sound/pci/hda/patch_hdmi.c
9444     @@ -1123,8 +1123,10 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
9445     AMP_OUT_UNMUTE);
9446    
9447     eld = &per_pin->sink_eld;
9448     - if (!eld->monitor_present)
9449     + if (!eld->monitor_present) {
9450     + hdmi_set_channel_count(codec, per_pin->cvt_nid, channels);
9451     return;
9452     + }
9453    
9454     if (!non_pcm && per_pin->chmap_set)
9455     ca = hdmi_manual_channel_allocation(channels, per_pin->chmap);
9456     diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
9457     index 97db3b45b411..799348e9cdd0 100644
9458     --- a/sound/soc/codecs/wm8962.c
9459     +++ b/sound/soc/codecs/wm8962.c
9460     @@ -154,6 +154,7 @@ static struct reg_default wm8962_reg[] = {
9461     { 40, 0x0000 }, /* R40 - SPKOUTL volume */
9462     { 41, 0x0000 }, /* R41 - SPKOUTR volume */
9463    
9464     + { 49, 0x0010 }, /* R49 - Class D Control 1 */
9465     { 51, 0x0003 }, /* R51 - Class D Control 2 */
9466    
9467     { 56, 0x0506 }, /* R56 - Clocking 4 */
9468     @@ -795,7 +796,6 @@ static bool wm8962_volatile_register(struct device *dev, unsigned int reg)
9469     case WM8962_ALC2:
9470     case WM8962_THERMAL_SHUTDOWN_STATUS:
9471     case WM8962_ADDITIONAL_CONTROL_4:
9472     - case WM8962_CLASS_D_CONTROL_1:
9473     case WM8962_DC_SERVO_6:
9474     case WM8962_INTERRUPT_STATUS_1:
9475     case WM8962_INTERRUPT_STATUS_2:
9476     @@ -2922,13 +2922,22 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
9477     static int wm8962_mute(struct snd_soc_dai *dai, int mute)
9478     {
9479     struct snd_soc_codec *codec = dai->codec;
9480     - int val;
9481     + int val, ret;
9482    
9483     if (mute)
9484     - val = WM8962_DAC_MUTE;
9485     + val = WM8962_DAC_MUTE | WM8962_DAC_MUTE_ALT;
9486     else
9487     val = 0;
9488    
9489     + /**
9490     + * The DAC mute bit is mirrored in two registers, update both to keep
9491     + * the register cache consistent.
9492     + */
9493     + ret = snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_1,
9494     + WM8962_DAC_MUTE_ALT, val);
9495     + if (ret < 0)
9496     + return ret;
9497     +
9498     return snd_soc_update_bits(codec, WM8962_ADC_DAC_CONTROL_1,
9499     WM8962_DAC_MUTE, val);
9500     }
9501     diff --git a/sound/soc/codecs/wm8962.h b/sound/soc/codecs/wm8962.h
9502     index a1a5d5294c19..910aafd09d21 100644
9503     --- a/sound/soc/codecs/wm8962.h
9504     +++ b/sound/soc/codecs/wm8962.h
9505     @@ -1954,6 +1954,10 @@
9506     #define WM8962_SPKOUTL_ENA_MASK 0x0040 /* SPKOUTL_ENA */
9507     #define WM8962_SPKOUTL_ENA_SHIFT 6 /* SPKOUTL_ENA */
9508     #define WM8962_SPKOUTL_ENA_WIDTH 1 /* SPKOUTL_ENA */
9509     +#define WM8962_DAC_MUTE_ALT 0x0010 /* DAC_MUTE */
9510     +#define WM8962_DAC_MUTE_ALT_MASK 0x0010 /* DAC_MUTE */
9511     +#define WM8962_DAC_MUTE_ALT_SHIFT 4 /* DAC_MUTE */
9512     +#define WM8962_DAC_MUTE_ALT_WIDTH 1 /* DAC_MUTE */
9513     #define WM8962_SPKOUTL_PGA_MUTE 0x0002 /* SPKOUTL_PGA_MUTE */
9514     #define WM8962_SPKOUTL_PGA_MUTE_MASK 0x0002 /* SPKOUTL_PGA_MUTE */
9515     #define WM8962_SPKOUTL_PGA_MUTE_SHIFT 1 /* SPKOUTL_PGA_MUTE */
9516     diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
9517     index 910223782c5c..c1369c3ad643 100644
9518     --- a/sound/soc/soc-dapm.c
9519     +++ b/sound/soc/soc-dapm.c
9520     @@ -3649,8 +3649,11 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card)
9521     cpu_dai = rtd->cpu_dai;
9522     codec_dai = rtd->codec_dai;
9523    
9524     - /* dynamic FE links have no fixed DAI mapping */
9525     - if (rtd->dai_link->dynamic)
9526     + /*
9527     + * dynamic FE links have no fixed DAI mapping.
9528     + * CODEC<->CODEC links have no direct connection.
9529     + */
9530     + if (rtd->dai_link->dynamic || rtd->dai_link->params)
9531     continue;
9532    
9533     /* there is no point in connecting BE DAI links with dummies */
9534     diff --git a/sound/usb/card.h b/sound/usb/card.h
9535     index 9867ab866857..97acb906acc2 100644
9536     --- a/sound/usb/card.h
9537     +++ b/sound/usb/card.h
9538     @@ -92,6 +92,7 @@ struct snd_usb_endpoint {
9539     unsigned int curframesize; /* current packet size in frames (for capture) */
9540     unsigned int syncmaxsize; /* sync endpoint packet size */
9541     unsigned int fill_max:1; /* fill max packet size always */
9542     + unsigned int udh01_fb_quirk:1; /* corrupted feedback data */
9543     unsigned int datainterval; /* log_2 of data packet interval */
9544     unsigned int syncinterval; /* P for adaptive mode, 0 otherwise */
9545     unsigned char silence_value;
9546     diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
9547     index 83aabea259d7..814430fec6a2 100644
9548     --- a/sound/usb/endpoint.c
9549     +++ b/sound/usb/endpoint.c
9550     @@ -469,6 +469,10 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
9551     ep->syncinterval = 3;
9552    
9553     ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
9554     +
9555     + if (chip->usb_id == USB_ID(0x0644, 0x8038) /* TEAC UD-H01 */ &&
9556     + ep->syncmaxsize == 4)
9557     + ep->udh01_fb_quirk = 1;
9558     }
9559    
9560     list_add_tail(&ep->list, &chip->ep_list);
9561     @@ -1099,7 +1103,16 @@ void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
9562     if (f == 0)
9563     return;
9564    
9565     - if (unlikely(ep->freqshift == INT_MIN)) {
9566     + if (unlikely(sender->udh01_fb_quirk)) {
9567     + /*
9568     + * The TEAC UD-H01 firmware sometimes changes the feedback value
9569     + * by +/- 0x1.0000.
9570     + */
9571     + if (f < ep->freqn - 0x8000)
9572     + f += 0x10000;
9573     + else if (f > ep->freqn + 0x8000)
9574     + f -= 0x10000;
9575     + } else if (unlikely(ep->freqshift == INT_MIN)) {
9576     /*
9577     * The first time we see a feedback value, determine its format
9578     * by shifting it left or right until it matches the nominal