Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.7/0101-4.7.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2821 - (show annotations) (download)
Wed Aug 31 12:36:25 2016 UTC (7 years, 8 months ago) by niro
File size: 253567 byte(s)
-linux-4.7.2
1 diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
2 index 696d5caf4fd8..f0e3361db20c 100644
3 --- a/Documentation/module-signing.txt
4 +++ b/Documentation/module-signing.txt
5 @@ -271,3 +271,9 @@ Since the private key is used to sign modules, viruses and malware could use
6 the private key to sign modules and compromise the operating system. The
7 private key must be either destroyed or moved to a secure location and not kept
8 in the root node of the kernel source tree.
9 +
10 +If you use the same private key to sign modules for multiple kernel
11 +configurations, you must ensure that the module version information is
12 +sufficient to prevent loading a module into a different kernel. Either
13 +set CONFIG_MODVERSIONS=y or ensure that each configuration has a different
14 +kernel release string by changing EXTRAVERSION or CONFIG_LOCALVERSION.
15 diff --git a/Makefile b/Makefile
16 index 84335c0b2eda..bb98f1ce854e 100644
17 --- a/Makefile
18 +++ b/Makefile
19 @@ -1,6 +1,6 @@
20 VERSION = 4
21 PATCHLEVEL = 7
22 -SUBLEVEL = 1
23 +SUBLEVEL = 2
24 EXTRAVERSION =
25 NAME = Psychotic Stoned Sheep
26
27 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
28 index 858f98ef7f1b..0f92d97432a2 100644
29 --- a/arch/arc/include/asm/pgtable.h
30 +++ b/arch/arc/include/asm/pgtable.h
31 @@ -110,7 +110,7 @@
32 #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
33
34 /* Set of bits not changed in pte_modify */
35 -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
36 +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
37
38 /* More Abbrevaited helpers */
39 #define PAGE_U_NONE __pgprot(___DEF)
40 diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
41 index 73d7e4c75b7d..ab74b5d9186c 100644
42 --- a/arch/arc/mm/dma.c
43 +++ b/arch/arc/mm/dma.c
44 @@ -92,7 +92,8 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
45 static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
46 dma_addr_t dma_handle, struct dma_attrs *attrs)
47 {
48 - struct page *page = virt_to_page(dma_handle);
49 + phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
50 + struct page *page = virt_to_page(paddr);
51 int is_non_coh = 1;
52
53 is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
54 diff --git a/arch/arm/boot/dts/arm-realview-pbx-a9.dts b/arch/arm/boot/dts/arm-realview-pbx-a9.dts
55 index db808f92dd79..90d00b407f85 100644
56 --- a/arch/arm/boot/dts/arm-realview-pbx-a9.dts
57 +++ b/arch/arm/boot/dts/arm-realview-pbx-a9.dts
58 @@ -70,13 +70,12 @@
59 * associativity as these may be erroneously set
60 * up by boot loader(s).
61 */
62 - cache-size = <1048576>; // 1MB
63 - cache-sets = <4096>;
64 + cache-size = <131072>; // 128KB
65 + cache-sets = <512>;
66 cache-line-size = <32>;
67 arm,parity-disable;
68 - arm,tag-latency = <1>;
69 - arm,data-latency = <1 1>;
70 - arm,dirty-latency = <1>;
71 + arm,tag-latency = <1 1 1>;
72 + arm,data-latency = <1 1 1>;
73 };
74
75 scu: scu@1f000000 {
76 diff --git a/arch/arm/boot/dts/sun4i-a10-a1000.dts b/arch/arm/boot/dts/sun4i-a10-a1000.dts
77 index c92a1ae33a1e..fa70b8fbf221 100644
78 --- a/arch/arm/boot/dts/sun4i-a10-a1000.dts
79 +++ b/arch/arm/boot/dts/sun4i-a10-a1000.dts
80 @@ -84,6 +84,7 @@
81 regulator-name = "emac-3v3";
82 regulator-min-microvolt = <3300000>;
83 regulator-max-microvolt = <3300000>;
84 + startup-delay-us = <20000>;
85 enable-active-high;
86 gpio = <&pio 7 15 GPIO_ACTIVE_HIGH>;
87 };
88 diff --git a/arch/arm/boot/dts/sun4i-a10-hackberry.dts b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
89 index 2b17c5199151..6de83a6187d0 100644
90 --- a/arch/arm/boot/dts/sun4i-a10-hackberry.dts
91 +++ b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
92 @@ -66,6 +66,7 @@
93 regulator-name = "emac-3v3";
94 regulator-min-microvolt = <3300000>;
95 regulator-max-microvolt = <3300000>;
96 + startup-delay-us = <20000>;
97 enable-active-high;
98 gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>;
99 };
100 diff --git a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
101 index 7afc7a64eef1..e28f080b1fd5 100644
102 --- a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
103 +++ b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
104 @@ -80,6 +80,7 @@
105 regulator-name = "emac-3v3";
106 regulator-min-microvolt = <3300000>;
107 regulator-max-microvolt = <3300000>;
108 + startup-delay-us = <20000>;
109 enable-active-high;
110 gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>; /* PH19 */
111 };
112 diff --git a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
113 index 9fea918f949e..39731a78f087 100644
114 --- a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
115 +++ b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
116 @@ -79,6 +79,7 @@
117 regulator-name = "emac-3v3";
118 regulator-min-microvolt = <3300000>;
119 regulator-max-microvolt = <3300000>;
120 + startup-delay-us = <20000>;
121 enable-active-high;
122 gpio = <&pio 0 2 GPIO_ACTIVE_HIGH>;
123 };
124 diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
125 index 941f36263c8f..f4d8125c1bfc 100644
126 --- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
127 +++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
128 @@ -1386,7 +1386,7 @@
129 * Pin 41: BR_UART1_TXD
130 * Pin 44: BR_UART1_RXD
131 */
132 - serial@70006000 {
133 + serial@0,70006000 {
134 compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
135 status = "okay";
136 };
137 @@ -1398,7 +1398,7 @@
138 * Pin 71: UART2_CTS_L
139 * Pin 74: UART2_RTS_L
140 */
141 - serial@70006040 {
142 + serial@0,70006040 {
143 compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
144 status = "okay";
145 };
146 diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig
147 index b6e54ee9bdbd..ca39c04fec6b 100644
148 --- a/arch/arm/configs/aspeed_g4_defconfig
149 +++ b/arch/arm/configs/aspeed_g4_defconfig
150 @@ -58,7 +58,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
151 # CONFIG_IOMMU_SUPPORT is not set
152 CONFIG_FIRMWARE_MEMMAP=y
153 CONFIG_FANOTIFY=y
154 -CONFIG_PRINTK_TIME=1
155 +CONFIG_PRINTK_TIME=y
156 CONFIG_DYNAMIC_DEBUG=y
157 CONFIG_STRIP_ASM_SYMS=y
158 CONFIG_PAGE_POISONING=y
159 diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig
160 index 892605167357..4f366b0370e9 100644
161 --- a/arch/arm/configs/aspeed_g5_defconfig
162 +++ b/arch/arm/configs/aspeed_g5_defconfig
163 @@ -59,7 +59,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
164 # CONFIG_IOMMU_SUPPORT is not set
165 CONFIG_FIRMWARE_MEMMAP=y
166 CONFIG_FANOTIFY=y
167 -CONFIG_PRINTK_TIME=1
168 +CONFIG_PRINTK_TIME=y
169 CONFIG_DYNAMIC_DEBUG=y
170 CONFIG_STRIP_ASM_SYMS=y
171 CONFIG_PAGE_POISONING=y
172 diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
173 index ff7ed5697d3e..d2485c749ad5 100644
174 --- a/arch/arm/mm/dma-mapping.c
175 +++ b/arch/arm/mm/dma-mapping.c
176 @@ -49,6 +49,7 @@ struct arm_dma_alloc_args {
177 pgprot_t prot;
178 const void *caller;
179 bool want_vaddr;
180 + int coherent_flag;
181 };
182
183 struct arm_dma_free_args {
184 @@ -59,6 +60,9 @@ struct arm_dma_free_args {
185 bool want_vaddr;
186 };
187
188 +#define NORMAL 0
189 +#define COHERENT 1
190 +
191 struct arm_dma_allocator {
192 void *(*alloc)(struct arm_dma_alloc_args *args,
193 struct page **ret_page);
194 @@ -272,7 +276,7 @@ static u64 get_coherent_dma_mask(struct device *dev)
195 return mask;
196 }
197
198 -static void __dma_clear_buffer(struct page *page, size_t size)
199 +static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
200 {
201 /*
202 * Ensure that the allocated pages are zeroed, and that any data
203 @@ -284,17 +288,21 @@ static void __dma_clear_buffer(struct page *page, size_t size)
204 while (size > 0) {
205 void *ptr = kmap_atomic(page);
206 memset(ptr, 0, PAGE_SIZE);
207 - dmac_flush_range(ptr, ptr + PAGE_SIZE);
208 + if (coherent_flag != COHERENT)
209 + dmac_flush_range(ptr, ptr + PAGE_SIZE);
210 kunmap_atomic(ptr);
211 page++;
212 size -= PAGE_SIZE;
213 }
214 - outer_flush_range(base, end);
215 + if (coherent_flag != COHERENT)
216 + outer_flush_range(base, end);
217 } else {
218 void *ptr = page_address(page);
219 memset(ptr, 0, size);
220 - dmac_flush_range(ptr, ptr + size);
221 - outer_flush_range(__pa(ptr), __pa(ptr) + size);
222 + if (coherent_flag != COHERENT) {
223 + dmac_flush_range(ptr, ptr + size);
224 + outer_flush_range(__pa(ptr), __pa(ptr) + size);
225 + }
226 }
227 }
228
229 @@ -302,7 +310,8 @@ static void __dma_clear_buffer(struct page *page, size_t size)
230 * Allocate a DMA buffer for 'dev' of size 'size' using the
231 * specified gfp mask. Note that 'size' must be page aligned.
232 */
233 -static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
234 +static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
235 + gfp_t gfp, int coherent_flag)
236 {
237 unsigned long order = get_order(size);
238 struct page *page, *p, *e;
239 @@ -318,7 +327,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
240 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
241 __free_page(p);
242
243 - __dma_clear_buffer(page, size);
244 + __dma_clear_buffer(page, size, coherent_flag);
245
246 return page;
247 }
248 @@ -340,7 +349,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
249
250 static void *__alloc_from_contiguous(struct device *dev, size_t size,
251 pgprot_t prot, struct page **ret_page,
252 - const void *caller, bool want_vaddr);
253 + const void *caller, bool want_vaddr,
254 + int coherent_flag);
255
256 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
257 pgprot_t prot, struct page **ret_page,
258 @@ -405,10 +415,13 @@ static int __init atomic_pool_init(void)
259 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
260 if (!atomic_pool)
261 goto out;
262 -
263 + /*
264 + * The atomic pool is only used for non-coherent allocations
265 + * so we must pass NORMAL for coherent_flag.
266 + */
267 if (dev_get_cma_area(NULL))
268 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
269 - &page, atomic_pool_init, true);
270 + &page, atomic_pool_init, true, NORMAL);
271 else
272 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
273 &page, atomic_pool_init, true);
274 @@ -522,7 +535,11 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
275 {
276 struct page *page;
277 void *ptr = NULL;
278 - page = __dma_alloc_buffer(dev, size, gfp);
279 + /*
280 + * __alloc_remap_buffer is only called when the device is
281 + * non-coherent
282 + */
283 + page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
284 if (!page)
285 return NULL;
286 if (!want_vaddr)
287 @@ -577,7 +594,8 @@ static int __free_from_pool(void *start, size_t size)
288
289 static void *__alloc_from_contiguous(struct device *dev, size_t size,
290 pgprot_t prot, struct page **ret_page,
291 - const void *caller, bool want_vaddr)
292 + const void *caller, bool want_vaddr,
293 + int coherent_flag)
294 {
295 unsigned long order = get_order(size);
296 size_t count = size >> PAGE_SHIFT;
297 @@ -588,7 +606,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
298 if (!page)
299 return NULL;
300
301 - __dma_clear_buffer(page, size);
302 + __dma_clear_buffer(page, size, coherent_flag);
303
304 if (!want_vaddr)
305 goto out;
306 @@ -638,7 +656,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
307 #define __get_dma_pgprot(attrs, prot) __pgprot(0)
308 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
309 #define __alloc_from_pool(size, ret_page) NULL
310 -#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
311 +#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag) NULL
312 #define __free_from_pool(cpu_addr, size) do { } while (0)
313 #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
314 #define __dma_free_remap(cpu_addr, size) do { } while (0)
315 @@ -649,7 +667,8 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
316 struct page **ret_page)
317 {
318 struct page *page;
319 - page = __dma_alloc_buffer(dev, size, gfp);
320 + /* __alloc_simple_buffer is only called when the device is coherent */
321 + page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
322 if (!page)
323 return NULL;
324
325 @@ -679,7 +698,7 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
326 {
327 return __alloc_from_contiguous(args->dev, args->size, args->prot,
328 ret_page, args->caller,
329 - args->want_vaddr);
330 + args->want_vaddr, args->coherent_flag);
331 }
332
333 static void cma_allocator_free(struct arm_dma_free_args *args)
334 @@ -746,6 +765,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
335 .prot = prot,
336 .caller = caller,
337 .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
338 + .coherent_flag = is_coherent ? COHERENT : NORMAL,
339 };
340
341 #ifdef CONFIG_DMA_API_DEBUG
342 @@ -1253,7 +1273,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
343 static const int iommu_order_array[] = { 9, 8, 4, 0 };
344
345 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
346 - gfp_t gfp, struct dma_attrs *attrs)
347 + gfp_t gfp, struct dma_attrs *attrs,
348 + int coherent_flag)
349 {
350 struct page **pages;
351 int count = size >> PAGE_SHIFT;
352 @@ -1277,7 +1298,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
353 if (!page)
354 goto error;
355
356 - __dma_clear_buffer(page, size);
357 + __dma_clear_buffer(page, size, coherent_flag);
358
359 for (i = 0; i < count; i++)
360 pages[i] = page + i;
361 @@ -1327,7 +1348,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
362 pages[i + j] = pages[i] + j;
363 }
364
365 - __dma_clear_buffer(pages[i], PAGE_SIZE << order);
366 + __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
367 i += 1 << order;
368 count -= 1 << order;
369 }
370 @@ -1505,7 +1526,8 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
371 */
372 gfp &= ~(__GFP_COMP);
373
374 - pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
375 + /* For now always consider we are in a non-coherent case */
376 + pages = __iommu_alloc_buffer(dev, size, gfp, attrs, NORMAL);
377 if (!pages)
378 return NULL;
379
380 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
381 index 5a0a691d4220..20384925bb0f 100644
382 --- a/arch/arm64/Kconfig
383 +++ b/arch/arm64/Kconfig
384 @@ -872,7 +872,7 @@ config RELOCATABLE
385
386 config RANDOMIZE_BASE
387 bool "Randomize the address of the kernel image"
388 - select ARM64_MODULE_PLTS
389 + select ARM64_MODULE_PLTS if MODULES
390 select RELOCATABLE
391 help
392 Randomizes the virtual address at which the kernel image is
393 diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
394 index 8b4a7c9154e9..080203e3aa2f 100644
395 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
396 +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
397 @@ -670,7 +670,7 @@
398 #address-cells = <0>;
399
400 reg = <0x0 0xffb71000 0x0 0x1000>,
401 - <0x0 0xffb72000 0x0 0x1000>,
402 + <0x0 0xffb72000 0x0 0x2000>,
403 <0x0 0xffb74000 0x0 0x2000>,
404 <0x0 0xffb76000 0x0 0x2000>;
405 interrupts = <GIC_PPI 9
406 diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
407 index 4fbf3c54275c..0800d23e2fdd 100644
408 --- a/arch/arm64/kernel/debug-monitors.c
409 +++ b/arch/arm64/kernel/debug-monitors.c
410 @@ -151,7 +151,6 @@ static int debug_monitors_init(void)
411 /* Clear the OS lock. */
412 on_each_cpu(clear_os_lock, NULL, 1);
413 isb();
414 - local_dbg_enable();
415
416 /* Register hotplug handler. */
417 __register_cpu_notifier(&os_lock_nb);
418 diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
419 index 21ab5df9fa76..65d81f965e74 100644
420 --- a/arch/arm64/kernel/hibernate.c
421 +++ b/arch/arm64/kernel/hibernate.c
422 @@ -35,6 +35,7 @@
423 #include <asm/sections.h>
424 #include <asm/smp.h>
425 #include <asm/suspend.h>
426 +#include <asm/sysreg.h>
427 #include <asm/virt.h>
428
429 /*
430 @@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
431 set_pte(pte, __pte(virt_to_phys((void *)dst) |
432 pgprot_val(PAGE_KERNEL_EXEC)));
433
434 - /* Load our new page tables */
435 - asm volatile("msr ttbr0_el1, %0;"
436 - "isb;"
437 - "tlbi vmalle1is;"
438 - "dsb ish;"
439 - "isb" : : "r"(virt_to_phys(pgd)));
440 + /*
441 + * Load our new page tables. A strict BBM approach requires that we
442 + * ensure that TLBs are free of any entries that may overlap with the
443 + * global mappings we are about to install.
444 + *
445 + * For a real hibernate/resume cycle TTBR0 currently points to a zero
446 + * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
447 + * runtime services), while for a userspace-driven test_resume cycle it
448 + * points to userspace page tables (and we must point it at a zero page
449 + * ourselves). Elsewhere we only (un)install the idmap with preemption
450 + * disabled, so T0SZ should be as required regardless.
451 + */
452 + cpu_set_reserved_ttbr0();
453 + local_flush_tlb_all();
454 + write_sysreg(virt_to_phys(pgd), ttbr0_el1);
455 + isb();
456
457 *phys_dst_addr = virt_to_phys((void *)dst);
458
459 @@ -394,6 +405,38 @@ int swsusp_arch_resume(void)
460 void *, phys_addr_t, phys_addr_t);
461
462 /*
463 + * Restoring the memory image will overwrite the ttbr1 page tables.
464 + * Create a second copy of just the linear map, and use this when
465 + * restoring.
466 + */
467 + tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
468 + if (!tmp_pg_dir) {
469 + pr_err("Failed to allocate memory for temporary page tables.");
470 + rc = -ENOMEM;
471 + goto out;
472 + }
473 + rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
474 + if (rc)
475 + goto out;
476 +
477 + /*
478 + * Since we only copied the linear map, we need to find restore_pblist's
479 + * linear map address.
480 + */
481 + lm_restore_pblist = LMADDR(restore_pblist);
482 +
483 + /*
484 + * We need a zero page that is zero before & after resume in order to
485 + * to break before make on the ttbr1 page tables.
486 + */
487 + zero_page = (void *)get_safe_page(GFP_ATOMIC);
488 + if (!zero_page) {
489 + pr_err("Failed to allocate zero page.");
490 + rc = -ENOMEM;
491 + goto out;
492 + }
493 +
494 + /*
495 * Locate the exit code in the bottom-but-one page, so that *NULL
496 * still has disastrous affects.
497 */
498 @@ -419,27 +462,6 @@ int swsusp_arch_resume(void)
499 __flush_dcache_area(hibernate_exit, exit_size);
500
501 /*
502 - * Restoring the memory image will overwrite the ttbr1 page tables.
503 - * Create a second copy of just the linear map, and use this when
504 - * restoring.
505 - */
506 - tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
507 - if (!tmp_pg_dir) {
508 - pr_err("Failed to allocate memory for temporary page tables.");
509 - rc = -ENOMEM;
510 - goto out;
511 - }
512 - rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
513 - if (rc)
514 - goto out;
515 -
516 - /*
517 - * Since we only copied the linear map, we need to find restore_pblist's
518 - * linear map address.
519 - */
520 - lm_restore_pblist = LMADDR(restore_pblist);
521 -
522 - /*
523 * KASLR will cause the el2 vectors to be in a different location in
524 * the resumed kernel. Load hibernate's temporary copy into el2.
525 *
526 @@ -453,12 +475,6 @@ int swsusp_arch_resume(void)
527 __hyp_set_vectors(el2_vectors);
528 }
529
530 - /*
531 - * We need a zero page that is zero before & after resume in order to
532 - * to break before make on the ttbr1 page tables.
533 - */
534 - zero_page = (void *)get_safe_page(GFP_ATOMIC);
535 -
536 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
537 resume_hdr.reenter_kernel, lm_restore_pblist,
538 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
539 diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
540 index 62ff3c0622e2..490db85dec23 100644
541 --- a/arch/arm64/kernel/smp.c
542 +++ b/arch/arm64/kernel/smp.c
543 @@ -267,7 +267,6 @@ asmlinkage void secondary_start_kernel(void)
544 set_cpu_online(cpu, true);
545 complete(&cpu_running);
546
547 - local_dbg_enable();
548 local_irq_enable();
549 local_async_enable();
550
551 @@ -437,9 +436,9 @@ void __init smp_cpus_done(unsigned int max_cpus)
552
553 void __init smp_prepare_boot_cpu(void)
554 {
555 + set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
556 cpuinfo_store_boot_cpu();
557 save_boot_cpu_run_el();
558 - set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
559 }
560
561 static u64 __init of_get_cpu_mpidr(struct device_node *dn)
562 @@ -694,6 +693,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
563 smp_store_cpu_info(smp_processor_id());
564
565 /*
566 + * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
567 + * secondary CPUs present.
568 + */
569 + if (max_cpus == 0)
570 + return;
571 +
572 + /*
573 * Initialise the present map (which describes the set of CPUs
574 * actually populated at the present time) and release the
575 * secondaries from the bootloader.
576 diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
577 index 435e820e898d..e564d4595998 100644
578 --- a/arch/arm64/kernel/vmlinux.lds.S
579 +++ b/arch/arm64/kernel/vmlinux.lds.S
580 @@ -181,9 +181,9 @@ SECTIONS
581 *(.hash)
582 }
583
584 - __rela_offset = ADDR(.rela) - KIMAGE_VADDR;
585 + __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
586 __rela_size = SIZEOF(.rela);
587 - __dynsym_offset = ADDR(.dynsym) - KIMAGE_VADDR;
588 + __dynsym_offset = ABSOLUTE(ADDR(.dynsym) - KIMAGE_VADDR);
589
590 . = ALIGN(SEGMENT_ALIGN);
591 __init_end = .;
592 diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
593 index 0f7c40eb3f53..934137647837 100644
594 --- a/arch/arm64/kvm/hyp/sysreg-sr.c
595 +++ b/arch/arm64/kvm/hyp/sysreg-sr.c
596 @@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { }
597 /*
598 * Non-VHE: Both host and guest must save everything.
599 *
600 - * VHE: Host must save tpidr*_el[01], actlr_el1, sp0, pc, pstate, and
601 - * guest must save everything.
602 + * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
603 + * pstate, and guest must save everything.
604 */
605
606 static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
607 @@ -37,6 +37,7 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
608 ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
609 ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
610 ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
611 + ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
612 ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
613 ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
614 ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
615 @@ -61,7 +62,6 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
616 ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
617 ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
618 ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
619 - ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
620
621 ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
622 ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
623 @@ -90,6 +90,7 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx
624 write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
625 write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
626 write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
627 + write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
628 write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
629 write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
630 write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
631 @@ -114,7 +115,6 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
632 write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
633 write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
634 write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
635 - write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
636
637 write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
638 write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
639 diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
640 index e9e0e6db73f6..898c0e6aedd4 100644
641 --- a/arch/arm64/kvm/inject_fault.c
642 +++ b/arch/arm64/kvm/inject_fault.c
643 @@ -132,16 +132,14 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
644 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
645 {
646 unsigned long cpsr = *vcpu_cpsr(vcpu);
647 - bool is_aarch32;
648 + bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
649 u32 esr = 0;
650
651 - is_aarch32 = vcpu_mode_is_32bit(vcpu);
652 -
653 - *vcpu_spsr(vcpu) = cpsr;
654 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
655 -
656 *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
657 +
658 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
659 + *vcpu_spsr(vcpu) = cpsr;
660
661 vcpu_sys_reg(vcpu, FAR_EL1) = addr;
662
663 @@ -172,11 +170,11 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
664 unsigned long cpsr = *vcpu_cpsr(vcpu);
665 u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
666
667 - *vcpu_spsr(vcpu) = cpsr;
668 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
669 -
670 *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
671 +
672 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
673 + *vcpu_spsr(vcpu) = cpsr;
674
675 /*
676 * Build an unknown exception, depending on the instruction
677 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
678 index 0f85a46c3e18..3e90a2cad995 100644
679 --- a/arch/arm64/mm/mmu.c
680 +++ b/arch/arm64/mm/mmu.c
681 @@ -748,9 +748,9 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
682 /*
683 * Check whether the physical FDT address is set and meets the minimum
684 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
685 - * at least 8 bytes so that we can always access the size field of the
686 - * FDT header after mapping the first chunk, double check here if that
687 - * is indeed the case.
688 + * at least 8 bytes so that we can always access the magic and size
689 + * fields of the FDT header after mapping the first chunk, double check
690 + * here if that is indeed the case.
691 */
692 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
693 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
694 @@ -778,7 +778,7 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
695 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
696 dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
697
698 - if (fdt_check_header(dt_virt) != 0)
699 + if (fdt_magic(dt_virt) != FDT_MAGIC)
700 return NULL;
701
702 *size = fdt_totalsize(dt_virt);
703 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
704 index c4317879b938..5bb61de23201 100644
705 --- a/arch/arm64/mm/proc.S
706 +++ b/arch/arm64/mm/proc.S
707 @@ -180,6 +180,8 @@ ENTRY(__cpu_setup)
708 msr cpacr_el1, x0 // Enable FP/ASIMD
709 mov x0, #1 << 12 // Reset mdscr_el1 and disable
710 msr mdscr_el1, x0 // access to the DCC from EL0
711 + isb // Unmask debug exceptions now,
712 + enable_dbg // since this is per-cpu
713 reset_pmuserenr_el0 x0 // Disable PMU access from EL0
714 /*
715 * Memory region attributes for LPAE:
716 diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h
717 index 0154e2807ebb..2369ad394876 100644
718 --- a/arch/metag/include/asm/cmpxchg_lnkget.h
719 +++ b/arch/metag/include/asm/cmpxchg_lnkget.h
720 @@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
721 " DCACHE [%2], %0\n"
722 #endif
723 "2:\n"
724 - : "=&d" (temp), "=&da" (retval)
725 + : "=&d" (temp), "=&d" (retval)
726 : "da" (m), "bd" (old), "da" (new)
727 : "cc"
728 );
729 diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
730 index e4c21bbf9422..804d2a2a19fe 100644
731 --- a/arch/mips/kernel/cevt-r4k.c
732 +++ b/arch/mips/kernel/cevt-r4k.c
733 @@ -276,12 +276,7 @@ int r4k_clockevent_init(void)
734 CLOCK_EVT_FEAT_C3STOP |
735 CLOCK_EVT_FEAT_PERCPU;
736
737 - clockevent_set_clock(cd, mips_hpt_frequency);
738 -
739 - /* Calculate the min / max delta */
740 - cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
741 min_delta = calculate_min_delta();
742 - cd->min_delta_ns = clockevent_delta2ns(min_delta, cd);
743
744 cd->rating = 300;
745 cd->irq = irq;
746 @@ -289,7 +284,7 @@ int r4k_clockevent_init(void)
747 cd->set_next_event = mips_next_event;
748 cd->event_handler = mips_event_handler;
749
750 - clockevents_register_device(cd);
751 + clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
752
753 if (cp0_timer_irq_installed)
754 return 0;
755 diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
756 index 1f910563fdf6..d76275da54cb 100644
757 --- a/arch/mips/kernel/csrc-r4k.c
758 +++ b/arch/mips/kernel/csrc-r4k.c
759 @@ -23,7 +23,7 @@ static struct clocksource clocksource_mips = {
760 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
761 };
762
763 -static u64 notrace r4k_read_sched_clock(void)
764 +static u64 __maybe_unused notrace r4k_read_sched_clock(void)
765 {
766 return read_c0_count();
767 }
768 @@ -82,7 +82,9 @@ int __init init_r4k_clocksource(void)
769
770 clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
771
772 +#ifndef CONFIG_CPU_FREQ
773 sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
774 +#endif
775
776 return 0;
777 }
778 diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
779 index 645c8a1982a7..2b42a74ed771 100644
780 --- a/arch/mips/kvm/emulate.c
781 +++ b/arch/mips/kvm/emulate.c
782 @@ -1615,8 +1615,14 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
783
784 preempt_disable();
785 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
786 - if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
787 - kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
788 + if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
789 + kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
790 + kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
791 + __func__, va, vcpu, read_c0_entryhi());
792 + er = EMULATE_FAIL;
793 + preempt_enable();
794 + goto done;
795 + }
796 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
797 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
798 int index;
799 @@ -1654,14 +1660,19 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
800 run, vcpu);
801 preempt_enable();
802 goto dont_update_pc;
803 - } else {
804 - /*
805 - * We fault an entry from the guest tlb to the
806 - * shadow host TLB
807 - */
808 - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
809 - NULL,
810 - NULL);
811 + }
812 + /*
813 + * We fault an entry from the guest tlb to the
814 + * shadow host TLB
815 + */
816 + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
817 + NULL, NULL)) {
818 + kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
819 + __func__, va, index, vcpu,
820 + read_c0_entryhi());
821 + er = EMULATE_FAIL;
822 + preempt_enable();
823 + goto done;
824 }
825 }
826 } else {
827 @@ -2625,8 +2636,13 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
828 * OK we have a Guest TLB entry, now inject it into the
829 * shadow host TLB
830 */
831 - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
832 - NULL);
833 + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
834 + NULL, NULL)) {
835 + kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
836 + __func__, va, index, vcpu,
837 + read_c0_entryhi());
838 + er = EMULATE_FAIL;
839 + }
840 }
841 }
842
843 diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
844 index ed021ae7867a..ad2270ff83d1 100644
845 --- a/arch/mips/kvm/tlb.c
846 +++ b/arch/mips/kvm/tlb.c
847 @@ -284,7 +284,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
848 }
849
850 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
851 - if (gfn >= kvm->arch.guest_pmap_npages) {
852 + if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
853 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
854 gfn, badvaddr);
855 kvm_mips_dump_host_tlbs();
856 @@ -373,26 +373,40 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
857 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
858 struct kvm *kvm = vcpu->kvm;
859 kvm_pfn_t pfn0, pfn1;
860 + gfn_t gfn0, gfn1;
861 + long tlb_lo[2];
862 int ret;
863
864 - if ((tlb->tlb_hi & VPN2_MASK) == 0) {
865 - pfn0 = 0;
866 - pfn1 = 0;
867 - } else {
868 - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
869 - >> PAGE_SHIFT) < 0)
870 - return -1;
871 -
872 - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
873 - >> PAGE_SHIFT) < 0)
874 - return -1;
875 -
876 - pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
877 - >> PAGE_SHIFT];
878 - pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
879 - >> PAGE_SHIFT];
880 + tlb_lo[0] = tlb->tlb_lo0;
881 + tlb_lo[1] = tlb->tlb_lo1;
882 +
883 + /*
884 + * The commpage address must not be mapped to anything else if the guest
885 + * TLB contains entries nearby, or commpage accesses will break.
886 + */
887 + if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
888 + VPN2_MASK & (PAGE_MASK << 1)))
889 + tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
890 +
891 + gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
892 + gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
893 + if (gfn0 >= kvm->arch.guest_pmap_npages ||
894 + gfn1 >= kvm->arch.guest_pmap_npages) {
895 + kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
896 + __func__, gfn0, gfn1, tlb->tlb_hi);
897 + kvm_mips_dump_guest_tlbs(vcpu);
898 + return -1;
899 }
900
901 + if (kvm_mips_map_page(kvm, gfn0) < 0)
902 + return -1;
903 +
904 + if (kvm_mips_map_page(kvm, gfn1) < 0)
905 + return -1;
906 +
907 + pfn0 = kvm->arch.guest_pmap[gfn0];
908 + pfn1 = kvm->arch.guest_pmap[gfn1];
909 +
910 if (hpa0)
911 *hpa0 = pfn0 << PAGE_SHIFT;
912
913 @@ -401,9 +415,9 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
914
915 /* Get attributes from the Guest TLB */
916 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
917 - (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
918 + (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
919 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
920 - (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
921 + (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
922
923 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
924 tlb->tlb_lo0, tlb->tlb_lo1);
925 @@ -776,10 +790,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
926 local_irq_restore(flags);
927 return KVM_INVALID_INST;
928 }
929 - kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
930 - &vcpu->arch.
931 - guest_tlb[index],
932 - NULL, NULL);
933 + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
934 + &vcpu->arch.guest_tlb[index],
935 + NULL, NULL)) {
936 + kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
937 + __func__, opc, index, vcpu,
938 + read_c0_entryhi());
939 + kvm_mips_dump_guest_tlbs(vcpu);
940 + local_irq_restore(flags);
941 + return KVM_INVALID_INST;
942 + }
943 inst = *(opc);
944 }
945 local_irq_restore(flags);
946 diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
947 index 249039af66c4..4788bea62a6a 100644
948 --- a/arch/mips/loongson64/loongson-3/hpet.c
949 +++ b/arch/mips/loongson64/loongson-3/hpet.c
950 @@ -13,8 +13,8 @@
951 #define SMBUS_PCI_REG64 0x64
952 #define SMBUS_PCI_REGB4 0xb4
953
954 -#define HPET_MIN_CYCLES 64
955 -#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
956 +#define HPET_MIN_CYCLES 16
957 +#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12)
958
959 static DEFINE_SPINLOCK(hpet_lock);
960 DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
961 @@ -157,14 +157,14 @@ static int hpet_tick_resume(struct clock_event_device *evt)
962 static int hpet_next_event(unsigned long delta,
963 struct clock_event_device *evt)
964 {
965 - unsigned int cnt;
966 - int res;
967 + u32 cnt;
968 + s32 res;
969
970 cnt = hpet_read(HPET_COUNTER);
971 - cnt += delta;
972 + cnt += (u32) delta;
973 hpet_write(HPET_T0_CMP, cnt);
974
975 - res = (int)(cnt - hpet_read(HPET_COUNTER));
976 + res = (s32)(cnt - hpet_read(HPET_COUNTER));
977
978 return res < HPET_MIN_CYCLES ? -ETIME : 0;
979 }
980 @@ -230,7 +230,7 @@ void __init setup_hpet_timer(void)
981
982 cd = &per_cpu(hpet_clockevent_device, cpu);
983 cd->name = "hpet";
984 - cd->rating = 320;
985 + cd->rating = 100;
986 cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
987 cd->set_state_shutdown = hpet_set_state_shutdown;
988 cd->set_state_periodic = hpet_set_state_periodic;
989 diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
990 index 9c2220a45189..45e3b8799ed0 100644
991 --- a/arch/mips/mm/uasm-mips.c
992 +++ b/arch/mips/mm/uasm-mips.c
993 @@ -65,7 +65,7 @@ static struct insn insn_table[] = {
994 #ifndef CONFIG_CPU_MIPSR6
995 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
996 #else
997 - { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
998 + { insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
999 #endif
1000 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
1001 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
1002 diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
1003 index b7019b559ddb..298afcf3bf2a 100644
1004 --- a/arch/powerpc/kernel/tm.S
1005 +++ b/arch/powerpc/kernel/tm.S
1006 @@ -338,8 +338,6 @@ _GLOBAL(__tm_recheckpoint)
1007 */
1008 subi r7, r7, STACK_FRAME_OVERHEAD
1009
1010 - SET_SCRATCH0(r1)
1011 -
1012 mfmsr r6
1013 /* R4 = original MSR to indicate whether thread used FP/Vector etc. */
1014
1015 @@ -468,6 +466,7 @@ restore_gprs:
1016 * until we turn MSR RI back on.
1017 */
1018
1019 + SET_SCRATCH0(r1)
1020 ld r5, -8(r1)
1021 ld r1, -16(r1)
1022
1023 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
1024 index e571ad277398..38e108eaeafe 100644
1025 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
1026 +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
1027 @@ -655,112 +655,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1028
1029 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1030 BEGIN_FTR_SECTION
1031 - b skip_tm
1032 -END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1033 -
1034 - /* Turn on TM/FP/VSX/VMX so we can restore them. */
1035 - mfmsr r5
1036 - li r6, MSR_TM >> 32
1037 - sldi r6, r6, 32
1038 - or r5, r5, r6
1039 - ori r5, r5, MSR_FP
1040 - oris r5, r5, (MSR_VEC | MSR_VSX)@h
1041 - mtmsrd r5
1042 -
1043 - /*
1044 - * The user may change these outside of a transaction, so they must
1045 - * always be context switched.
1046 - */
1047 - ld r5, VCPU_TFHAR(r4)
1048 - ld r6, VCPU_TFIAR(r4)
1049 - ld r7, VCPU_TEXASR(r4)
1050 - mtspr SPRN_TFHAR, r5
1051 - mtspr SPRN_TFIAR, r6
1052 - mtspr SPRN_TEXASR, r7
1053 -
1054 - ld r5, VCPU_MSR(r4)
1055 - rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1056 - beq skip_tm /* TM not active in guest */
1057 -
1058 - /* Make sure the failure summary is set, otherwise we'll program check
1059 - * when we trechkpt. It's possible that this might have been not set
1060 - * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
1061 - * host.
1062 - */
1063 - oris r7, r7, (TEXASR_FS)@h
1064 - mtspr SPRN_TEXASR, r7
1065 -
1066 - /*
1067 - * We need to load up the checkpointed state for the guest.
1068 - * We need to do this early as it will blow away any GPRs, VSRs and
1069 - * some SPRs.
1070 - */
1071 -
1072 - mr r31, r4
1073 - addi r3, r31, VCPU_FPRS_TM
1074 - bl load_fp_state
1075 - addi r3, r31, VCPU_VRS_TM
1076 - bl load_vr_state
1077 - mr r4, r31
1078 - lwz r7, VCPU_VRSAVE_TM(r4)
1079 - mtspr SPRN_VRSAVE, r7
1080 -
1081 - ld r5, VCPU_LR_TM(r4)
1082 - lwz r6, VCPU_CR_TM(r4)
1083 - ld r7, VCPU_CTR_TM(r4)
1084 - ld r8, VCPU_AMR_TM(r4)
1085 - ld r9, VCPU_TAR_TM(r4)
1086 - mtlr r5
1087 - mtcr r6
1088 - mtctr r7
1089 - mtspr SPRN_AMR, r8
1090 - mtspr SPRN_TAR, r9
1091 -
1092 - /*
1093 - * Load up PPR and DSCR values but don't put them in the actual SPRs
1094 - * till the last moment to avoid running with userspace PPR and DSCR for
1095 - * too long.
1096 - */
1097 - ld r29, VCPU_DSCR_TM(r4)
1098 - ld r30, VCPU_PPR_TM(r4)
1099 -
1100 - std r2, PACATMSCRATCH(r13) /* Save TOC */
1101 -
1102 - /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1103 - li r5, 0
1104 - mtmsrd r5, 1
1105 -
1106 - /* Load GPRs r0-r28 */
1107 - reg = 0
1108 - .rept 29
1109 - ld reg, VCPU_GPRS_TM(reg)(r31)
1110 - reg = reg + 1
1111 - .endr
1112 -
1113 - mtspr SPRN_DSCR, r29
1114 - mtspr SPRN_PPR, r30
1115 -
1116 - /* Load final GPRs */
1117 - ld 29, VCPU_GPRS_TM(29)(r31)
1118 - ld 30, VCPU_GPRS_TM(30)(r31)
1119 - ld 31, VCPU_GPRS_TM(31)(r31)
1120 -
1121 - /* TM checkpointed state is now setup. All GPRs are now volatile. */
1122 - TRECHKPT
1123 -
1124 - /* Now let's get back the state we need. */
1125 - HMT_MEDIUM
1126 - GET_PACA(r13)
1127 - ld r29, HSTATE_DSCR(r13)
1128 - mtspr SPRN_DSCR, r29
1129 - ld r4, HSTATE_KVM_VCPU(r13)
1130 - ld r1, HSTATE_HOST_R1(r13)
1131 - ld r2, PACATMSCRATCH(r13)
1132 -
1133 - /* Set the MSR RI since we have our registers back. */
1134 - li r5, MSR_RI
1135 - mtmsrd r5, 1
1136 -skip_tm:
1137 + bl kvmppc_restore_tm
1138 +END_FTR_SECTION_IFSET(CPU_FTR_TM)
1139 #endif
1140
1141 /* Load guest PMU registers */
1142 @@ -841,12 +737,6 @@ BEGIN_FTR_SECTION
1143 /* Skip next section on POWER7 */
1144 b 8f
1145 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1146 - /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
1147 - mfmsr r8
1148 - li r0, 1
1149 - rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1150 - mtmsrd r8
1151 -
1152 /* Load up POWER8-specific registers */
1153 ld r5, VCPU_IAMR(r4)
1154 lwz r6, VCPU_PSPB(r4)
1155 @@ -1436,106 +1326,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1156
1157 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1158 BEGIN_FTR_SECTION
1159 - b 2f
1160 -END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1161 - /* Turn on TM. */
1162 - mfmsr r8
1163 - li r0, 1
1164 - rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1165 - mtmsrd r8
1166 -
1167 - ld r5, VCPU_MSR(r9)
1168 - rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1169 - beq 1f /* TM not active in guest. */
1170 -
1171 - li r3, TM_CAUSE_KVM_RESCHED
1172 -
1173 - /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1174 - li r5, 0
1175 - mtmsrd r5, 1
1176 -
1177 - /* All GPRs are volatile at this point. */
1178 - TRECLAIM(R3)
1179 -
1180 - /* Temporarily store r13 and r9 so we have some regs to play with */
1181 - SET_SCRATCH0(r13)
1182 - GET_PACA(r13)
1183 - std r9, PACATMSCRATCH(r13)
1184 - ld r9, HSTATE_KVM_VCPU(r13)
1185 -
1186 - /* Get a few more GPRs free. */
1187 - std r29, VCPU_GPRS_TM(29)(r9)
1188 - std r30, VCPU_GPRS_TM(30)(r9)
1189 - std r31, VCPU_GPRS_TM(31)(r9)
1190 -
1191 - /* Save away PPR and DSCR soon so don't run with user values. */
1192 - mfspr r31, SPRN_PPR
1193 - HMT_MEDIUM
1194 - mfspr r30, SPRN_DSCR
1195 - ld r29, HSTATE_DSCR(r13)
1196 - mtspr SPRN_DSCR, r29
1197 -
1198 - /* Save all but r9, r13 & r29-r31 */
1199 - reg = 0
1200 - .rept 29
1201 - .if (reg != 9) && (reg != 13)
1202 - std reg, VCPU_GPRS_TM(reg)(r9)
1203 - .endif
1204 - reg = reg + 1
1205 - .endr
1206 - /* ... now save r13 */
1207 - GET_SCRATCH0(r4)
1208 - std r4, VCPU_GPRS_TM(13)(r9)
1209 - /* ... and save r9 */
1210 - ld r4, PACATMSCRATCH(r13)
1211 - std r4, VCPU_GPRS_TM(9)(r9)
1212 -
1213 - /* Reload stack pointer and TOC. */
1214 - ld r1, HSTATE_HOST_R1(r13)
1215 - ld r2, PACATOC(r13)
1216 -
1217 - /* Set MSR RI now we have r1 and r13 back. */
1218 - li r5, MSR_RI
1219 - mtmsrd r5, 1
1220 -
1221 - /* Save away checkpinted SPRs. */
1222 - std r31, VCPU_PPR_TM(r9)
1223 - std r30, VCPU_DSCR_TM(r9)
1224 - mflr r5
1225 - mfcr r6
1226 - mfctr r7
1227 - mfspr r8, SPRN_AMR
1228 - mfspr r10, SPRN_TAR
1229 - std r5, VCPU_LR_TM(r9)
1230 - stw r6, VCPU_CR_TM(r9)
1231 - std r7, VCPU_CTR_TM(r9)
1232 - std r8, VCPU_AMR_TM(r9)
1233 - std r10, VCPU_TAR_TM(r9)
1234 -
1235 - /* Restore r12 as trap number. */
1236 - lwz r12, VCPU_TRAP(r9)
1237 -
1238 - /* Save FP/VSX. */
1239 - addi r3, r9, VCPU_FPRS_TM
1240 - bl store_fp_state
1241 - addi r3, r9, VCPU_VRS_TM
1242 - bl store_vr_state
1243 - mfspr r6, SPRN_VRSAVE
1244 - stw r6, VCPU_VRSAVE_TM(r9)
1245 -1:
1246 - /*
1247 - * We need to save these SPRs after the treclaim so that the software
1248 - * error code is recorded correctly in the TEXASR. Also the user may
1249 - * change these outside of a transaction, so they must always be
1250 - * context switched.
1251 - */
1252 - mfspr r5, SPRN_TFHAR
1253 - mfspr r6, SPRN_TFIAR
1254 - mfspr r7, SPRN_TEXASR
1255 - std r5, VCPU_TFHAR(r9)
1256 - std r6, VCPU_TFIAR(r9)
1257 - std r7, VCPU_TEXASR(r9)
1258 -2:
1259 + bl kvmppc_save_tm
1260 +END_FTR_SECTION_IFSET(CPU_FTR_TM)
1261 #endif
1262
1263 /* Increment yield count if they have a VPA */
1264 @@ -2245,6 +2037,13 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
1265 /* save FP state */
1266 bl kvmppc_save_fp
1267
1268 +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1269 +BEGIN_FTR_SECTION
1270 + ld r9, HSTATE_KVM_VCPU(r13)
1271 + bl kvmppc_save_tm
1272 +END_FTR_SECTION_IFSET(CPU_FTR_TM)
1273 +#endif
1274 +
1275 /*
1276 * Set DEC to the smaller of DEC and HDEC, so that we wake
1277 * no later than the end of our timeslice (HDEC interrupts
1278 @@ -2321,6 +2120,12 @@ kvm_end_cede:
1279 bl kvmhv_accumulate_time
1280 #endif
1281
1282 +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1283 +BEGIN_FTR_SECTION
1284 + bl kvmppc_restore_tm
1285 +END_FTR_SECTION_IFSET(CPU_FTR_TM)
1286 +#endif
1287 +
1288 /* load up FP state */
1289 bl kvmppc_load_fp
1290
1291 @@ -2631,6 +2436,239 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1292 mr r4,r31
1293 blr
1294
1295 +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1296 +/*
1297 + * Save transactional state and TM-related registers.
1298 + * Called with r9 pointing to the vcpu struct.
1299 + * This can modify all checkpointed registers, but
1300 + * restores r1, r2 and r9 (vcpu pointer) before exit.
1301 + */
1302 +kvmppc_save_tm:
1303 + mflr r0
1304 + std r0, PPC_LR_STKOFF(r1)
1305 +
1306 + /* Turn on TM. */
1307 + mfmsr r8
1308 + li r0, 1
1309 + rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1310 + mtmsrd r8
1311 +
1312 + ld r5, VCPU_MSR(r9)
1313 + rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1314 + beq 1f /* TM not active in guest. */
1315 +
1316 + std r1, HSTATE_HOST_R1(r13)
1317 + li r3, TM_CAUSE_KVM_RESCHED
1318 +
1319 + /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1320 + li r5, 0
1321 + mtmsrd r5, 1
1322 +
1323 + /* All GPRs are volatile at this point. */
1324 + TRECLAIM(R3)
1325 +
1326 + /* Temporarily store r13 and r9 so we have some regs to play with */
1327 + SET_SCRATCH0(r13)
1328 + GET_PACA(r13)
1329 + std r9, PACATMSCRATCH(r13)
1330 + ld r9, HSTATE_KVM_VCPU(r13)
1331 +
1332 + /* Get a few more GPRs free. */
1333 + std r29, VCPU_GPRS_TM(29)(r9)
1334 + std r30, VCPU_GPRS_TM(30)(r9)
1335 + std r31, VCPU_GPRS_TM(31)(r9)
1336 +
1337 + /* Save away PPR and DSCR soon so don't run with user values. */
1338 + mfspr r31, SPRN_PPR
1339 + HMT_MEDIUM
1340 + mfspr r30, SPRN_DSCR
1341 + ld r29, HSTATE_DSCR(r13)
1342 + mtspr SPRN_DSCR, r29
1343 +
1344 + /* Save all but r9, r13 & r29-r31 */
1345 + reg = 0
1346 + .rept 29
1347 + .if (reg != 9) && (reg != 13)
1348 + std reg, VCPU_GPRS_TM(reg)(r9)
1349 + .endif
1350 + reg = reg + 1
1351 + .endr
1352 + /* ... now save r13 */
1353 + GET_SCRATCH0(r4)
1354 + std r4, VCPU_GPRS_TM(13)(r9)
1355 + /* ... and save r9 */
1356 + ld r4, PACATMSCRATCH(r13)
1357 + std r4, VCPU_GPRS_TM(9)(r9)
1358 +
1359 + /* Reload stack pointer and TOC. */
1360 + ld r1, HSTATE_HOST_R1(r13)
1361 + ld r2, PACATOC(r13)
1362 +
1363 + /* Set MSR RI now we have r1 and r13 back. */
1364 + li r5, MSR_RI
1365 + mtmsrd r5, 1
1366 +
1367 + /* Save away checkpinted SPRs. */
1368 + std r31, VCPU_PPR_TM(r9)
1369 + std r30, VCPU_DSCR_TM(r9)
1370 + mflr r5
1371 + mfcr r6
1372 + mfctr r7
1373 + mfspr r8, SPRN_AMR
1374 + mfspr r10, SPRN_TAR
1375 + std r5, VCPU_LR_TM(r9)
1376 + stw r6, VCPU_CR_TM(r9)
1377 + std r7, VCPU_CTR_TM(r9)
1378 + std r8, VCPU_AMR_TM(r9)
1379 + std r10, VCPU_TAR_TM(r9)
1380 +
1381 + /* Restore r12 as trap number. */
1382 + lwz r12, VCPU_TRAP(r9)
1383 +
1384 + /* Save FP/VSX. */
1385 + addi r3, r9, VCPU_FPRS_TM
1386 + bl store_fp_state
1387 + addi r3, r9, VCPU_VRS_TM
1388 + bl store_vr_state
1389 + mfspr r6, SPRN_VRSAVE
1390 + stw r6, VCPU_VRSAVE_TM(r9)
1391 +1:
1392 + /*
1393 + * We need to save these SPRs after the treclaim so that the software
1394 + * error code is recorded correctly in the TEXASR. Also the user may
1395 + * change these outside of a transaction, so they must always be
1396 + * context switched.
1397 + */
1398 + mfspr r5, SPRN_TFHAR
1399 + mfspr r6, SPRN_TFIAR
1400 + mfspr r7, SPRN_TEXASR
1401 + std r5, VCPU_TFHAR(r9)
1402 + std r6, VCPU_TFIAR(r9)
1403 + std r7, VCPU_TEXASR(r9)
1404 +
1405 + ld r0, PPC_LR_STKOFF(r1)
1406 + mtlr r0
1407 + blr
1408 +
1409 +/*
1410 + * Restore transactional state and TM-related registers.
1411 + * Called with r4 pointing to the vcpu struct.
1412 + * This potentially modifies all checkpointed registers.
1413 + * It restores r1, r2, r4 from the PACA.
1414 + */
1415 +kvmppc_restore_tm:
1416 + mflr r0
1417 + std r0, PPC_LR_STKOFF(r1)
1418 +
1419 + /* Turn on TM/FP/VSX/VMX so we can restore them. */
1420 + mfmsr r5
1421 + li r6, MSR_TM >> 32
1422 + sldi r6, r6, 32
1423 + or r5, r5, r6
1424 + ori r5, r5, MSR_FP
1425 + oris r5, r5, (MSR_VEC | MSR_VSX)@h
1426 + mtmsrd r5
1427 +
1428 + /*
1429 + * The user may change these outside of a transaction, so they must
1430 + * always be context switched.
1431 + */
1432 + ld r5, VCPU_TFHAR(r4)
1433 + ld r6, VCPU_TFIAR(r4)
1434 + ld r7, VCPU_TEXASR(r4)
1435 + mtspr SPRN_TFHAR, r5
1436 + mtspr SPRN_TFIAR, r6
1437 + mtspr SPRN_TEXASR, r7
1438 +
1439 + ld r5, VCPU_MSR(r4)
1440 + rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1441 + beqlr /* TM not active in guest */
1442 + std r1, HSTATE_HOST_R1(r13)
1443 +
1444 + /* Make sure the failure summary is set, otherwise we'll program check
1445 + * when we trechkpt. It's possible that this might have been not set
1446 + * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
1447 + * host.
1448 + */
1449 + oris r7, r7, (TEXASR_FS)@h
1450 + mtspr SPRN_TEXASR, r7
1451 +
1452 + /*
1453 + * We need to load up the checkpointed state for the guest.
1454 + * We need to do this early as it will blow away any GPRs, VSRs and
1455 + * some SPRs.
1456 + */
1457 +
1458 + mr r31, r4
1459 + addi r3, r31, VCPU_FPRS_TM
1460 + bl load_fp_state
1461 + addi r3, r31, VCPU_VRS_TM
1462 + bl load_vr_state
1463 + mr r4, r31
1464 + lwz r7, VCPU_VRSAVE_TM(r4)
1465 + mtspr SPRN_VRSAVE, r7
1466 +
1467 + ld r5, VCPU_LR_TM(r4)
1468 + lwz r6, VCPU_CR_TM(r4)
1469 + ld r7, VCPU_CTR_TM(r4)
1470 + ld r8, VCPU_AMR_TM(r4)
1471 + ld r9, VCPU_TAR_TM(r4)
1472 + mtlr r5
1473 + mtcr r6
1474 + mtctr r7
1475 + mtspr SPRN_AMR, r8
1476 + mtspr SPRN_TAR, r9
1477 +
1478 + /*
1479 + * Load up PPR and DSCR values but don't put them in the actual SPRs
1480 + * till the last moment to avoid running with userspace PPR and DSCR for
1481 + * too long.
1482 + */
1483 + ld r29, VCPU_DSCR_TM(r4)
1484 + ld r30, VCPU_PPR_TM(r4)
1485 +
1486 + std r2, PACATMSCRATCH(r13) /* Save TOC */
1487 +
1488 + /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1489 + li r5, 0
1490 + mtmsrd r5, 1
1491 +
1492 + /* Load GPRs r0-r28 */
1493 + reg = 0
1494 + .rept 29
1495 + ld reg, VCPU_GPRS_TM(reg)(r31)
1496 + reg = reg + 1
1497 + .endr
1498 +
1499 + mtspr SPRN_DSCR, r29
1500 + mtspr SPRN_PPR, r30
1501 +
1502 + /* Load final GPRs */
1503 + ld 29, VCPU_GPRS_TM(29)(r31)
1504 + ld 30, VCPU_GPRS_TM(30)(r31)
1505 + ld 31, VCPU_GPRS_TM(31)(r31)
1506 +
1507 + /* TM checkpointed state is now setup. All GPRs are now volatile. */
1508 + TRECHKPT
1509 +
1510 + /* Now let's get back the state we need. */
1511 + HMT_MEDIUM
1512 + GET_PACA(r13)
1513 + ld r29, HSTATE_DSCR(r13)
1514 + mtspr SPRN_DSCR, r29
1515 + ld r4, HSTATE_KVM_VCPU(r13)
1516 + ld r1, HSTATE_HOST_R1(r13)
1517 + ld r2, PACATMSCRATCH(r13)
1518 +
1519 + /* Set the MSR RI since we have our registers back. */
1520 + li r5, MSR_RI
1521 + mtmsrd r5, 1
1522 +
1523 + ld r0, PPC_LR_STKOFF(r1)
1524 + mtlr r0
1525 + blr
1526 +#endif
1527 +
1528 /*
1529 * We come here if we get any exception or interrupt while we are
1530 * executing host real mode code while in guest MMU context.
1531 diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
1532 index 18d2beb89340..42b968a85863 100644
1533 --- a/arch/s390/include/asm/pgtable.h
1534 +++ b/arch/s390/include/asm/pgtable.h
1535 @@ -893,7 +893,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1536 bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
1537 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1538 unsigned char key, bool nq);
1539 -unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
1540 +unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
1541
1542 /*
1543 * Certain architectures need to do special things when PTEs
1544 diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
1545 index a2e6ef32e054..0a2031618f7f 100644
1546 --- a/arch/s390/include/asm/tlbflush.h
1547 +++ b/arch/s390/include/asm/tlbflush.h
1548 @@ -81,7 +81,8 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
1549 }
1550
1551 /*
1552 - * Flush TLB entries for a specific ASCE on all CPUs.
1553 + * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
1554 + * when more than one asce (e.g. gmap) ran on this mm.
1555 */
1556 static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
1557 {
1558 diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
1559 index cace818d86eb..313c3b8cf7dd 100644
1560 --- a/arch/s390/mm/gmap.c
1561 +++ b/arch/s390/mm/gmap.c
1562 @@ -85,7 +85,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc);
1563 static void gmap_flush_tlb(struct gmap *gmap)
1564 {
1565 if (MACHINE_HAS_IDTE)
1566 - __tlb_flush_asce(gmap->mm, gmap->asce);
1567 + __tlb_flush_idte(gmap->asce);
1568 else
1569 __tlb_flush_global();
1570 }
1571 @@ -124,7 +124,7 @@ void gmap_free(struct gmap *gmap)
1572
1573 /* Flush tlb. */
1574 if (MACHINE_HAS_IDTE)
1575 - __tlb_flush_asce(gmap->mm, gmap->asce);
1576 + __tlb_flush_idte(gmap->asce);
1577 else
1578 __tlb_flush_global();
1579
1580 diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
1581 index 9f0ce0e6eeb4..ebb4f87112f4 100644
1582 --- a/arch/s390/mm/pgtable.c
1583 +++ b/arch/s390/mm/pgtable.c
1584 @@ -543,7 +543,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1585 }
1586 EXPORT_SYMBOL(set_guest_storage_key);
1587
1588 -unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
1589 +unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
1590 {
1591 unsigned char key;
1592 spinlock_t *ptl;
1593 diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
1594 index 8acaf4e384c0..a86d7cc2c2d8 100644
1595 --- a/arch/um/os-Linux/signal.c
1596 +++ b/arch/um/os-Linux/signal.c
1597 @@ -15,6 +15,7 @@
1598 #include <kern_util.h>
1599 #include <os.h>
1600 #include <sysdep/mcontext.h>
1601 +#include <um_malloc.h>
1602
1603 void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
1604 [SIGTRAP] = relay_signal,
1605 @@ -32,7 +33,7 @@ static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
1606 struct uml_pt_regs *r;
1607 int save_errno = errno;
1608
1609 - r = malloc(sizeof(struct uml_pt_regs));
1610 + r = uml_kmalloc(sizeof(struct uml_pt_regs), UM_GFP_ATOMIC);
1611 if (!r)
1612 panic("out of memory");
1613
1614 @@ -91,7 +92,7 @@ static void timer_real_alarm_handler(mcontext_t *mc)
1615 {
1616 struct uml_pt_regs *regs;
1617
1618 - regs = malloc(sizeof(struct uml_pt_regs));
1619 + regs = uml_kmalloc(sizeof(struct uml_pt_regs), UM_GFP_ATOMIC);
1620 if (!regs)
1621 panic("out of memory");
1622
1623 diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
1624 index e35632ef23c7..62dfc644c908 100644
1625 --- a/arch/unicore32/include/asm/mmu_context.h
1626 +++ b/arch/unicore32/include/asm/mmu_context.h
1627 @@ -98,7 +98,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
1628 }
1629
1630 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
1631 - bool write, bool foreign)
1632 + bool write, bool execute, bool foreign)
1633 {
1634 /* by default, allow everything */
1635 return true;
1636 diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
1637 index 874e8bd64d1d..bd136ac140be 100644
1638 --- a/arch/x86/events/intel/uncore_snbep.c
1639 +++ b/arch/x86/events/intel/uncore_snbep.c
1640 @@ -2546,7 +2546,7 @@ void hswep_uncore_cpu_init(void)
1641
1642 static struct intel_uncore_type hswep_uncore_ha = {
1643 .name = "ha",
1644 - .num_counters = 5,
1645 + .num_counters = 4,
1646 .num_boxes = 2,
1647 .perf_ctr_bits = 48,
1648 SNBEP_UNCORE_PCI_COMMON_INIT(),
1649 @@ -2565,7 +2565,7 @@ static struct uncore_event_desc hswep_uncore_imc_events[] = {
1650
1651 static struct intel_uncore_type hswep_uncore_imc = {
1652 .name = "imc",
1653 - .num_counters = 5,
1654 + .num_counters = 4,
1655 .num_boxes = 8,
1656 .perf_ctr_bits = 48,
1657 .fixed_ctr_bits = 48,
1658 @@ -2611,7 +2611,7 @@ static struct intel_uncore_type hswep_uncore_irp = {
1659
1660 static struct intel_uncore_type hswep_uncore_qpi = {
1661 .name = "qpi",
1662 - .num_counters = 5,
1663 + .num_counters = 4,
1664 .num_boxes = 3,
1665 .perf_ctr_bits = 48,
1666 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1667 @@ -2693,7 +2693,7 @@ static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
1668
1669 static struct intel_uncore_type hswep_uncore_r3qpi = {
1670 .name = "r3qpi",
1671 - .num_counters = 4,
1672 + .num_counters = 3,
1673 .num_boxes = 3,
1674 .perf_ctr_bits = 44,
1675 .constraints = hswep_uncore_r3qpi_constraints,
1676 @@ -2892,7 +2892,7 @@ static struct intel_uncore_type bdx_uncore_ha = {
1677
1678 static struct intel_uncore_type bdx_uncore_imc = {
1679 .name = "imc",
1680 - .num_counters = 5,
1681 + .num_counters = 4,
1682 .num_boxes = 8,
1683 .perf_ctr_bits = 48,
1684 .fixed_ctr_bits = 48,
1685 diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
1686 index c146f3c262c3..0149ac59c273 100644
1687 --- a/arch/x86/kvm/mtrr.c
1688 +++ b/arch/x86/kvm/mtrr.c
1689 @@ -539,6 +539,7 @@ static void mtrr_lookup_var_start(struct mtrr_iter *iter)
1690
1691 iter->fixed = false;
1692 iter->start_max = iter->start;
1693 + iter->range = NULL;
1694 iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
1695
1696 __mtrr_lookup_var_next(iter);
1697 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1698 index 64a79f271276..8326d6891395 100644
1699 --- a/arch/x86/kvm/vmx.c
1700 +++ b/arch/x86/kvm/vmx.c
1701 @@ -8224,6 +8224,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
1702 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
1703 (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
1704 exit_reason != EXIT_REASON_EPT_VIOLATION &&
1705 + exit_reason != EXIT_REASON_PML_FULL &&
1706 exit_reason != EXIT_REASON_TASK_SWITCH)) {
1707 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1708 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
1709 @@ -8854,6 +8855,22 @@ static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
1710 put_cpu();
1711 }
1712
1713 +/*
1714 + * Ensure that the current vmcs of the logical processor is the
1715 + * vmcs01 of the vcpu before calling free_nested().
1716 + */
1717 +static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
1718 +{
1719 + struct vcpu_vmx *vmx = to_vmx(vcpu);
1720 + int r;
1721 +
1722 + r = vcpu_load(vcpu);
1723 + BUG_ON(r);
1724 + vmx_load_vmcs01(vcpu);
1725 + free_nested(vmx);
1726 + vcpu_put(vcpu);
1727 +}
1728 +
1729 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
1730 {
1731 struct vcpu_vmx *vmx = to_vmx(vcpu);
1732 @@ -8862,8 +8879,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
1733 vmx_destroy_pml_buffer(vmx);
1734 free_vpid(vmx->vpid);
1735 leave_guest_mode(vcpu);
1736 - vmx_load_vmcs01(vcpu);
1737 - free_nested(vmx);
1738 + vmx_free_vcpu_nested(vcpu);
1739 free_loaded_vmcs(vmx->loaded_vmcs);
1740 kfree(vmx->guest_msrs);
1741 kvm_vcpu_uninit(vcpu);
1742 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1743 index 7da5dd2057a9..fea2c5717ec1 100644
1744 --- a/arch/x86/kvm/x86.c
1745 +++ b/arch/x86/kvm/x86.c
1746 @@ -91,6 +91,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
1747
1748 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
1749 static void process_nmi(struct kvm_vcpu *vcpu);
1750 +static void process_smi(struct kvm_vcpu *vcpu);
1751 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
1752
1753 struct kvm_x86_ops *kvm_x86_ops __read_mostly;
1754 @@ -5296,13 +5297,8 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
1755 /* This is a good place to trace that we are exiting SMM. */
1756 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
1757
1758 - if (unlikely(vcpu->arch.smi_pending)) {
1759 - kvm_make_request(KVM_REQ_SMI, vcpu);
1760 - vcpu->arch.smi_pending = 0;
1761 - } else {
1762 - /* Process a latched INIT, if any. */
1763 - kvm_make_request(KVM_REQ_EVENT, vcpu);
1764 - }
1765 + /* Process a latched INIT or SMI, if any. */
1766 + kvm_make_request(KVM_REQ_EVENT, vcpu);
1767 }
1768
1769 kvm_mmu_reset_context(vcpu);
1770 @@ -6102,7 +6098,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
1771 }
1772
1773 /* try to inject new event if pending */
1774 - if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
1775 + if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
1776 + vcpu->arch.smi_pending = false;
1777 + process_smi(vcpu);
1778 + } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
1779 --vcpu->arch.nmi_pending;
1780 vcpu->arch.nmi_injected = true;
1781 kvm_x86_ops->set_nmi(vcpu);
1782 @@ -6312,11 +6311,6 @@ static void process_smi(struct kvm_vcpu *vcpu)
1783 char buf[512];
1784 u32 cr0;
1785
1786 - if (is_smm(vcpu)) {
1787 - vcpu->arch.smi_pending = true;
1788 - return;
1789 - }
1790 -
1791 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
1792 vcpu->arch.hflags |= HF_SMM_MASK;
1793 memset(buf, 0, 512);
1794 @@ -6379,6 +6373,12 @@ static void process_smi(struct kvm_vcpu *vcpu)
1795 kvm_mmu_reset_context(vcpu);
1796 }
1797
1798 +static void process_smi_request(struct kvm_vcpu *vcpu)
1799 +{
1800 + vcpu->arch.smi_pending = true;
1801 + kvm_make_request(KVM_REQ_EVENT, vcpu);
1802 +}
1803 +
1804 void kvm_make_scan_ioapic_request(struct kvm *kvm)
1805 {
1806 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
1807 @@ -6500,7 +6500,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1808 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
1809 record_steal_time(vcpu);
1810 if (kvm_check_request(KVM_REQ_SMI, vcpu))
1811 - process_smi(vcpu);
1812 + process_smi_request(vcpu);
1813 if (kvm_check_request(KVM_REQ_NMI, vcpu))
1814 process_nmi(vcpu);
1815 if (kvm_check_request(KVM_REQ_PMU, vcpu))
1816 @@ -6573,8 +6573,18 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1817
1818 if (inject_pending_event(vcpu, req_int_win) != 0)
1819 req_immediate_exit = true;
1820 - /* enable NMI/IRQ window open exits if needed */
1821 else {
1822 + /* Enable NMI/IRQ window open exits if needed.
1823 + *
1824 + * SMIs have two cases: 1) they can be nested, and
1825 + * then there is nothing to do here because RSM will
1826 + * cause a vmexit anyway; 2) or the SMI can be pending
1827 + * because inject_pending_event has completed the
1828 + * injection of an IRQ or NMI from the previous vmexit,
1829 + * and then we request an immediate exit to inject the SMI.
1830 + */
1831 + if (vcpu->arch.smi_pending && !is_smm(vcpu))
1832 + req_immediate_exit = true;
1833 if (vcpu->arch.nmi_pending)
1834 kvm_x86_ops->enable_nmi_window(vcpu);
1835 if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
1836 @@ -6625,8 +6635,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1837
1838 kvm_load_guest_xcr0(vcpu);
1839
1840 - if (req_immediate_exit)
1841 + if (req_immediate_exit) {
1842 + kvm_make_request(KVM_REQ_EVENT, vcpu);
1843 smp_send_reschedule(vcpu->cpu);
1844 + }
1845
1846 trace_kvm_entry(vcpu->vcpu_id);
1847 wait_lapic_expire(vcpu);
1848 @@ -7427,6 +7439,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1849 {
1850 vcpu->arch.hflags = 0;
1851
1852 + vcpu->arch.smi_pending = 0;
1853 atomic_set(&vcpu->arch.nmi_queued, 0);
1854 vcpu->arch.nmi_pending = 0;
1855 vcpu->arch.nmi_injected = false;
1856 diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
1857 index 8b93e634af84..ae97f24a4371 100644
1858 --- a/arch/x86/pci/intel_mid_pci.c
1859 +++ b/arch/x86/pci/intel_mid_pci.c
1860 @@ -37,6 +37,7 @@
1861
1862 /* Quirks for the listed devices */
1863 #define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
1864 +#define PCI_DEVICE_ID_INTEL_MRFL_HSU 0x1191
1865
1866 /* Fixed BAR fields */
1867 #define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
1868 @@ -225,13 +226,20 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
1869 /* Special treatment for IRQ0 */
1870 if (dev->irq == 0) {
1871 /*
1872 + * Skip HS UART common registers device since it has
1873 + * IRQ0 assigned and not used by the kernel.
1874 + */
1875 + if (dev->device == PCI_DEVICE_ID_INTEL_MRFL_HSU)
1876 + return -EBUSY;
1877 + /*
1878 * TNG has IRQ0 assigned to eMMC controller. But there
1879 * are also other devices with bogus PCI configuration
1880 * that have IRQ0 assigned. This check ensures that
1881 - * eMMC gets it.
1882 + * eMMC gets it. The rest of devices still could be
1883 + * enabled without interrupt line being allocated.
1884 */
1885 if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC)
1886 - return -EBUSY;
1887 + return 0;
1888 }
1889 break;
1890 default:
1891 diff --git a/block/bio.c b/block/bio.c
1892 index 0e4aa42bc30d..462386908835 100644
1893 --- a/block/bio.c
1894 +++ b/block/bio.c
1895 @@ -579,6 +579,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
1896 bio->bi_rw = bio_src->bi_rw;
1897 bio->bi_iter = bio_src->bi_iter;
1898 bio->bi_io_vec = bio_src->bi_io_vec;
1899 +
1900 + bio_clone_blkcg_association(bio, bio_src);
1901 }
1902 EXPORT_SYMBOL(__bio_clone_fast);
1903
1904 @@ -684,6 +686,8 @@ integrity_clone:
1905 }
1906 }
1907
1908 + bio_clone_blkcg_association(bio, bio_src);
1909 +
1910 return bio;
1911 }
1912 EXPORT_SYMBOL(bio_clone_bioset);
1913 @@ -2005,6 +2009,17 @@ void bio_disassociate_task(struct bio *bio)
1914 }
1915 }
1916
1917 +/**
1918 + * bio_clone_blkcg_association - clone blkcg association from src to dst bio
1919 + * @dst: destination bio
1920 + * @src: source bio
1921 + */
1922 +void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
1923 +{
1924 + if (src->bi_css)
1925 + WARN_ON(bio_associate_blkcg(dst, src->bi_css));
1926 +}
1927 +
1928 #endif /* CONFIG_BLK_CGROUP */
1929
1930 static void __init biovec_init_slabs(void)
1931 diff --git a/block/genhd.c b/block/genhd.c
1932 index 3eebd256b765..086f1a357734 100644
1933 --- a/block/genhd.c
1934 +++ b/block/genhd.c
1935 @@ -613,7 +613,7 @@ void add_disk(struct gendisk *disk)
1936
1937 /* Register BDI before referencing it from bdev */
1938 bdi = &disk->queue->backing_dev_info;
1939 - bdi_register_dev(bdi, disk_devt(disk));
1940 + bdi_register_owner(bdi, disk_to_dev(disk));
1941
1942 blk_register_region(disk_devt(disk), disk->minors, NULL,
1943 exact_match, exact_lock, disk);
1944 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1945 index 290d6f5be44b..f4218df00883 100644
1946 --- a/drivers/acpi/ec.c
1947 +++ b/drivers/acpi/ec.c
1948 @@ -101,6 +101,7 @@ enum ec_command {
1949 #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
1950 #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
1951 * when trying to clear the EC */
1952 +#define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
1953
1954 enum {
1955 EC_FLAGS_QUERY_PENDING, /* Query is pending */
1956 @@ -121,6 +122,10 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
1957 module_param(ec_delay, uint, 0644);
1958 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
1959
1960 +static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
1961 +module_param(ec_max_queries, uint, 0644);
1962 +MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
1963 +
1964 static bool ec_busy_polling __read_mostly;
1965 module_param(ec_busy_polling, bool, 0644);
1966 MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
1967 @@ -174,6 +179,7 @@ static void acpi_ec_event_processor(struct work_struct *work);
1968
1969 struct acpi_ec *boot_ec, *first_ec;
1970 EXPORT_SYMBOL(first_ec);
1971 +static struct workqueue_struct *ec_query_wq;
1972
1973 static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
1974 static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
1975 @@ -1098,7 +1104,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
1976 * work queue execution.
1977 */
1978 ec_dbg_evt("Query(0x%02x) scheduled", value);
1979 - if (!schedule_work(&q->work)) {
1980 + if (!queue_work(ec_query_wq, &q->work)) {
1981 ec_dbg_evt("Query(0x%02x) overlapped", value);
1982 result = -EBUSY;
1983 }
1984 @@ -1660,15 +1666,41 @@ static struct acpi_driver acpi_ec_driver = {
1985 },
1986 };
1987
1988 +static inline int acpi_ec_query_init(void)
1989 +{
1990 + if (!ec_query_wq) {
1991 + ec_query_wq = alloc_workqueue("kec_query", 0,
1992 + ec_max_queries);
1993 + if (!ec_query_wq)
1994 + return -ENODEV;
1995 + }
1996 + return 0;
1997 +}
1998 +
1999 +static inline void acpi_ec_query_exit(void)
2000 +{
2001 + if (ec_query_wq) {
2002 + destroy_workqueue(ec_query_wq);
2003 + ec_query_wq = NULL;
2004 + }
2005 +}
2006 +
2007 int __init acpi_ec_init(void)
2008 {
2009 - int result = 0;
2010 + int result;
2011
2012 + /* register workqueue for _Qxx evaluations */
2013 + result = acpi_ec_query_init();
2014 + if (result)
2015 + goto err_exit;
2016 /* Now register the driver for the EC */
2017 result = acpi_bus_register_driver(&acpi_ec_driver);
2018 - if (result < 0)
2019 - return -ENODEV;
2020 + if (result)
2021 + goto err_exit;
2022
2023 +err_exit:
2024 + if (result)
2025 + acpi_ec_query_exit();
2026 return result;
2027 }
2028
2029 @@ -1678,5 +1710,6 @@ static void __exit acpi_ec_exit(void)
2030 {
2031
2032 acpi_bus_unregister_driver(&acpi_ec_driver);
2033 + acpi_ec_query_exit();
2034 }
2035 #endif /* 0 */
2036 diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
2037 index cae5385cf499..bd46569e0e52 100644
2038 --- a/drivers/bcma/host_pci.c
2039 +++ b/drivers/bcma/host_pci.c
2040 @@ -295,6 +295,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
2041 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
2042 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
2043 { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
2044 + { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_FOXCONN, 0xe092) },
2045 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
2046 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
2047 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
2048 diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
2049 index 84708a5f8c52..a1dcf12d3dad 100644
2050 --- a/drivers/block/floppy.c
2051 +++ b/drivers/block/floppy.c
2052 @@ -3663,11 +3663,6 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
2053
2054 opened_bdev[drive] = bdev;
2055
2056 - if (!(mode & (FMODE_READ|FMODE_WRITE))) {
2057 - res = -EINVAL;
2058 - goto out;
2059 - }
2060 -
2061 res = -ENXIO;
2062
2063 if (!floppy_track_buffer) {
2064 @@ -3711,13 +3706,15 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
2065 if (UFDCS->rawcmd == 1)
2066 UFDCS->rawcmd = 2;
2067
2068 - UDRS->last_checked = 0;
2069 - clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
2070 - check_disk_change(bdev);
2071 - if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
2072 - goto out;
2073 - if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
2074 - goto out;
2075 + if (mode & (FMODE_READ|FMODE_WRITE)) {
2076 + UDRS->last_checked = 0;
2077 + clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
2078 + check_disk_change(bdev);
2079 + if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
2080 + goto out;
2081 + if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
2082 + goto out;
2083 + }
2084
2085 res = -EROFS;
2086
2087 diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
2088 index 25894687c168..fadba88745dc 100644
2089 --- a/drivers/bluetooth/ath3k.c
2090 +++ b/drivers/bluetooth/ath3k.c
2091 @@ -123,6 +123,7 @@ static const struct usb_device_id ath3k_table[] = {
2092 { USB_DEVICE(0x13d3, 0x3472) },
2093 { USB_DEVICE(0x13d3, 0x3474) },
2094 { USB_DEVICE(0x13d3, 0x3487) },
2095 + { USB_DEVICE(0x13d3, 0x3490) },
2096
2097 /* Atheros AR5BBU12 with sflash firmware */
2098 { USB_DEVICE(0x0489, 0xE02C) },
2099 @@ -190,6 +191,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
2100 { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
2101 { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
2102 { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 },
2103 + { USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 },
2104
2105 /* Atheros AR5BBU22 with sflash firmware */
2106 { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
2107 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
2108 index a3be65e6231a..9f40c3426f0c 100644
2109 --- a/drivers/bluetooth/btusb.c
2110 +++ b/drivers/bluetooth/btusb.c
2111 @@ -237,6 +237,7 @@ static const struct usb_device_id blacklist_table[] = {
2112 { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
2113 { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
2114 { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 },
2115 + { USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 },
2116
2117 /* Atheros AR5BBU12 with sflash firmware */
2118 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
2119 diff --git a/drivers/char/random.c b/drivers/char/random.c
2120 index 87ab9f6b4112..d72c6d14a1c9 100644
2121 --- a/drivers/char/random.c
2122 +++ b/drivers/char/random.c
2123 @@ -949,6 +949,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
2124 /* award one bit for the contents of the fast pool */
2125 credit_entropy_bits(r, credit + 1);
2126 }
2127 +EXPORT_SYMBOL_GPL(add_interrupt_randomness);
2128
2129 #ifdef CONFIG_BLOCK
2130 void add_disk_randomness(struct gendisk *disk)
2131 @@ -1461,12 +1462,16 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
2132 static ssize_t
2133 urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
2134 {
2135 + static int maxwarn = 10;
2136 int ret;
2137
2138 - if (unlikely(nonblocking_pool.initialized == 0))
2139 - printk_once(KERN_NOTICE "random: %s urandom read "
2140 - "with %d bits of entropy available\n",
2141 - current->comm, nonblocking_pool.entropy_total);
2142 + if (unlikely(nonblocking_pool.initialized == 0) &&
2143 + maxwarn > 0) {
2144 + maxwarn--;
2145 + printk(KERN_NOTICE "random: %s: uninitialized urandom read "
2146 + "(%zd bytes read, %d bits of entropy available)\n",
2147 + current->comm, nbytes, nonblocking_pool.entropy_total);
2148 + }
2149
2150 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
2151 ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
2152 @@ -1774,13 +1779,15 @@ int random_int_secret_init(void)
2153 return 0;
2154 }
2155
2156 +static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
2157 + __aligned(sizeof(unsigned long));
2158 +
2159 /*
2160 * Get a random word for internal kernel use only. Similar to urandom but
2161 * with the goal of minimal entropy pool depletion. As a result, the random
2162 * value is not cryptographically secure but for several uses the cost of
2163 * depleting entropy is too high
2164 */
2165 -static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
2166 unsigned int get_random_int(void)
2167 {
2168 __u32 *hash;
2169 @@ -1850,12 +1857,18 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
2170 {
2171 struct entropy_store *poolp = &input_pool;
2172
2173 - /* Suspend writing if we're above the trickle threshold.
2174 - * We'll be woken up again once below random_write_wakeup_thresh,
2175 - * or when the calling thread is about to terminate.
2176 - */
2177 - wait_event_interruptible(random_write_wait, kthread_should_stop() ||
2178 + if (unlikely(nonblocking_pool.initialized == 0))
2179 + poolp = &nonblocking_pool;
2180 + else {
2181 + /* Suspend writing if we're above the trickle
2182 + * threshold. We'll be woken up again once below
2183 + * random_write_wakeup_thresh, or when the calling
2184 + * thread is about to terminate.
2185 + */
2186 + wait_event_interruptible(random_write_wait,
2187 + kthread_should_stop() ||
2188 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
2189 + }
2190 mix_pool_bytes(poolp, buffer, count);
2191 credit_entropy_bits(poolp, entropy);
2192 }
2193 diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
2194 index a12b31940344..e9fd1d83f9f1 100644
2195 --- a/drivers/char/tpm/tpm_crb.c
2196 +++ b/drivers/char/tpm/tpm_crb.c
2197 @@ -246,7 +246,7 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
2198
2199 /* Detect a 64 bit address on a 32 bit system */
2200 if (start != new_res.start)
2201 - return ERR_PTR(-EINVAL);
2202 + return (void __iomem *) ERR_PTR(-EINVAL);
2203
2204 if (!resource_contains(&priv->res, &new_res))
2205 return devm_ioremap_resource(dev, &new_res);
2206 diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
2207 index 8059a8d3ea36..31b77f71313f 100644
2208 --- a/drivers/clk/rockchip/clk-rk3399.c
2209 +++ b/drivers/clk/rockchip/clk-rk3399.c
2210 @@ -586,7 +586,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
2211 RK3399_CLKGATE_CON(8), 15, GFLAGS),
2212
2213 COMPOSITE(SCLK_SPDIF_REC_DPTX, "clk_spdif_rec_dptx", mux_pll_src_cpll_gpll_p, 0,
2214 - RK3399_CLKSEL_CON(32), 15, 1, MFLAGS, 0, 5, DFLAGS,
2215 + RK3399_CLKSEL_CON(32), 15, 1, MFLAGS, 8, 5, DFLAGS,
2216 RK3399_CLKGATE_CON(10), 6, GFLAGS),
2217 /* i2s */
2218 COMPOSITE(0, "clk_i2s0_div", mux_pll_src_cpll_gpll_p, 0,
2219 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
2220 index 1fa1a32928d7..1b159171f1f9 100644
2221 --- a/drivers/cpufreq/intel_pstate.c
2222 +++ b/drivers/cpufreq/intel_pstate.c
2223 @@ -944,7 +944,7 @@ static int core_get_max_pstate(void)
2224 if (err)
2225 goto skip_tar;
2226
2227 - tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
2228 + tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
2229 err = rdmsrl_safe(tdp_msr, &tdp_ratio);
2230 if (err)
2231 goto skip_tar;
2232 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
2233 index 10c305b4a2e1..4e0f8e720ad9 100644
2234 --- a/drivers/edac/edac_mc_sysfs.c
2235 +++ b/drivers/edac/edac_mc_sysfs.c
2236 @@ -313,7 +313,6 @@ static struct device_type csrow_attr_type = {
2237 * possible dynamic channel DIMM Label attribute files
2238 *
2239 */
2240 -
2241 DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
2242 channel_dimm_label_show, channel_dimm_label_store, 0);
2243 DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
2244 @@ -326,6 +325,10 @@ DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
2245 channel_dimm_label_show, channel_dimm_label_store, 4);
2246 DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
2247 channel_dimm_label_show, channel_dimm_label_store, 5);
2248 +DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR,
2249 + channel_dimm_label_show, channel_dimm_label_store, 6);
2250 +DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR,
2251 + channel_dimm_label_show, channel_dimm_label_store, 7);
2252
2253 /* Total possible dynamic DIMM Label attribute file table */
2254 static struct attribute *dynamic_csrow_dimm_attr[] = {
2255 @@ -335,6 +338,8 @@ static struct attribute *dynamic_csrow_dimm_attr[] = {
2256 &dev_attr_legacy_ch3_dimm_label.attr.attr,
2257 &dev_attr_legacy_ch4_dimm_label.attr.attr,
2258 &dev_attr_legacy_ch5_dimm_label.attr.attr,
2259 + &dev_attr_legacy_ch6_dimm_label.attr.attr,
2260 + &dev_attr_legacy_ch7_dimm_label.attr.attr,
2261 NULL
2262 };
2263
2264 @@ -351,6 +356,10 @@ DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
2265 channel_ce_count_show, NULL, 4);
2266 DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
2267 channel_ce_count_show, NULL, 5);
2268 +DEVICE_CHANNEL(ch6_ce_count, S_IRUGO,
2269 + channel_ce_count_show, NULL, 6);
2270 +DEVICE_CHANNEL(ch7_ce_count, S_IRUGO,
2271 + channel_ce_count_show, NULL, 7);
2272
2273 /* Total possible dynamic ce_count attribute file table */
2274 static struct attribute *dynamic_csrow_ce_count_attr[] = {
2275 @@ -360,6 +369,8 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = {
2276 &dev_attr_legacy_ch3_ce_count.attr.attr,
2277 &dev_attr_legacy_ch4_ce_count.attr.attr,
2278 &dev_attr_legacy_ch5_ce_count.attr.attr,
2279 + &dev_attr_legacy_ch6_ce_count.attr.attr,
2280 + &dev_attr_legacy_ch7_ce_count.attr.attr,
2281 NULL
2282 };
2283
2284 @@ -371,9 +382,16 @@ static umode_t csrow_dev_is_visible(struct kobject *kobj,
2285
2286 if (idx >= csrow->nr_channels)
2287 return 0;
2288 +
2289 + if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) {
2290 + WARN_ONCE(1, "idx: %d\n", idx);
2291 + return 0;
2292 + }
2293 +
2294 /* Only expose populated DIMMs */
2295 if (!csrow->channels[idx]->dimm->nr_pages)
2296 return 0;
2297 +
2298 return attr->mode;
2299 }
2300
2301 diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
2302 index cdaba13cb8e8..c0f7cce23f62 100644
2303 --- a/drivers/gpio/gpio-intel-mid.c
2304 +++ b/drivers/gpio/gpio-intel-mid.c
2305 @@ -17,7 +17,6 @@
2306 * Moorestown platform Langwell chip.
2307 * Medfield platform Penwell chip.
2308 * Clovertrail platform Cloverview chip.
2309 - * Merrifield platform Tangier chip.
2310 */
2311
2312 #include <linux/module.h>
2313 @@ -64,10 +63,6 @@ enum GPIO_REG {
2314 /* intel_mid gpio driver data */
2315 struct intel_mid_gpio_ddata {
2316 u16 ngpio; /* number of gpio pins */
2317 - u32 gplr_offset; /* offset of first GPLR register from base */
2318 - u32 flis_base; /* base address of FLIS registers */
2319 - u32 flis_len; /* length of FLIS registers */
2320 - u32 (*get_flis_offset)(int gpio);
2321 u32 chip_irq_type; /* chip interrupt type */
2322 };
2323
2324 @@ -252,15 +247,6 @@ static const struct intel_mid_gpio_ddata gpio_cloverview_core = {
2325 .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
2326 };
2327
2328 -static const struct intel_mid_gpio_ddata gpio_tangier = {
2329 - .ngpio = 192,
2330 - .gplr_offset = 4,
2331 - .flis_base = 0xff0c0000,
2332 - .flis_len = 0x8000,
2333 - .get_flis_offset = NULL,
2334 - .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
2335 -};
2336 -
2337 static const struct pci_device_id intel_gpio_ids[] = {
2338 {
2339 /* Lincroft */
2340 @@ -287,11 +273,6 @@ static const struct pci_device_id intel_gpio_ids[] = {
2341 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
2342 .driver_data = (kernel_ulong_t)&gpio_cloverview_core,
2343 },
2344 - {
2345 - /* Tangier */
2346 - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199),
2347 - .driver_data = (kernel_ulong_t)&gpio_tangier,
2348 - },
2349 { 0 }
2350 };
2351 MODULE_DEVICE_TABLE(pci, intel_gpio_ids);
2352 @@ -401,7 +382,7 @@ static int intel_gpio_probe(struct pci_dev *pdev,
2353 spin_lock_init(&priv->lock);
2354
2355 pci_set_drvdata(pdev, priv);
2356 - retval = gpiochip_add_data(&priv->chip, priv);
2357 + retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
2358 if (retval) {
2359 dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
2360 return retval;
2361 diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
2362 index 5e3be32ebb8d..3745de659594 100644
2363 --- a/drivers/gpio/gpio-pca953x.c
2364 +++ b/drivers/gpio/gpio-pca953x.c
2365 @@ -90,7 +90,7 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids);
2366 #define MAX_BANK 5
2367 #define BANK_SZ 8
2368
2369 -#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
2370 +#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
2371
2372 struct pca953x_chip {
2373 unsigned gpio_start;
2374 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
2375 index 9df1bcb35bf0..983175363b06 100644
2376 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
2377 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
2378 @@ -551,28 +551,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
2379 le16_to_cpu(firmware_info->info.usReferenceClock);
2380 ppll->reference_div = 0;
2381
2382 - if (crev < 2)
2383 - ppll->pll_out_min =
2384 - le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
2385 - else
2386 - ppll->pll_out_min =
2387 - le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
2388 + ppll->pll_out_min =
2389 + le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
2390 ppll->pll_out_max =
2391 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
2392
2393 - if (crev >= 4) {
2394 - ppll->lcd_pll_out_min =
2395 - le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
2396 - if (ppll->lcd_pll_out_min == 0)
2397 - ppll->lcd_pll_out_min = ppll->pll_out_min;
2398 - ppll->lcd_pll_out_max =
2399 - le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
2400 - if (ppll->lcd_pll_out_max == 0)
2401 - ppll->lcd_pll_out_max = ppll->pll_out_max;
2402 - } else {
2403 + ppll->lcd_pll_out_min =
2404 + le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
2405 + if (ppll->lcd_pll_out_min == 0)
2406 ppll->lcd_pll_out_min = ppll->pll_out_min;
2407 + ppll->lcd_pll_out_max =
2408 + le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
2409 + if (ppll->lcd_pll_out_max == 0)
2410 ppll->lcd_pll_out_max = ppll->pll_out_max;
2411 - }
2412
2413 if (ppll->pll_out_min == 0)
2414 ppll->pll_out_min = 64800;
2415 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
2416 index 35a1248aaa77..1b4c069f7765 100644
2417 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
2418 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
2419 @@ -10,6 +10,7 @@
2420 #include <linux/slab.h>
2421 #include <linux/acpi.h>
2422 #include <linux/pci.h>
2423 +#include <linux/delay.h>
2424
2425 #include "amd_acpi.h"
2426
2427 @@ -259,6 +260,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
2428 if (!info)
2429 return -EIO;
2430 kfree(info);
2431 +
2432 + /* 200ms delay is required after off */
2433 + if (state == 0)
2434 + msleep(200);
2435 }
2436 return 0;
2437 }
2438 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
2439 index cb07da41152b..ff0b55a65ca3 100644
2440 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
2441 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
2442 @@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
2443 DRM_MODE_SCALE_NONE);
2444 /* no HPD on analog connectors */
2445 amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
2446 - connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2447 connector->interlace_allowed = true;
2448 connector->doublescan_allowed = true;
2449 break;
2450 @@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
2451 }
2452
2453 if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
2454 - if (i2c_bus->valid)
2455 - connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2456 + if (i2c_bus->valid) {
2457 + connector->polled = DRM_CONNECTOR_POLL_CONNECT |
2458 + DRM_CONNECTOR_POLL_DISCONNECT;
2459 + }
2460 } else
2461 connector->polled = DRM_CONNECTOR_POLL_HPD;
2462
2463 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2464 index 6e920086af46..b7f5650d8218 100644
2465 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2466 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2467 @@ -1841,7 +1841,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
2468 }
2469
2470 drm_kms_helper_poll_enable(dev);
2471 +
2472 + /*
2473 + * Most of the connector probing functions try to acquire runtime pm
2474 + * refs to ensure that the GPU is powered on when connector polling is
2475 + * performed. Since we're calling this from a runtime PM callback,
2476 + * trying to acquire rpm refs will cause us to deadlock.
2477 + *
2478 + * Since we're guaranteed to be holding the rpm lock, it's safe to
2479 + * temporarily disable the rpm helpers so this doesn't deadlock us.
2480 + */
2481 +#ifdef CONFIG_PM
2482 + dev->dev->power.disable_depth++;
2483 +#endif
2484 drm_helper_hpd_irq_event(dev);
2485 +#ifdef CONFIG_PM
2486 + dev->dev->power.disable_depth--;
2487 +#endif
2488
2489 if (fbcon) {
2490 amdgpu_fbdev_set_suspend(adev, 0);
2491 diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
2492 index 48b6bd671cda..c32eca26155c 100644
2493 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
2494 +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
2495 @@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
2496 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2497 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2498 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2499 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2500 if (dig->backlight_level == 0)
2501 amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
2502 ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
2503 diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
2504 index 1feb6439cb0b..92695481093e 100644
2505 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
2506 +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
2507 @@ -167,6 +167,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
2508 break;
2509 case CHIP_KAVERI:
2510 case CHIP_KABINI:
2511 + case CHIP_MULLINS:
2512 return 0;
2513 default: BUG();
2514 }
2515 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
2516 index 90b35c5c10a4..ffc7c0dd3f14 100644
2517 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
2518 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
2519 @@ -592,12 +592,12 @@ bool atomctrl_get_pp_assign_pin(
2520 const uint32_t pinId,
2521 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
2522 {
2523 - bool bRet = 0;
2524 + bool bRet = false;
2525 ATOM_GPIO_PIN_LUT *gpio_lookup_table =
2526 get_gpio_lookup_table(hwmgr->device);
2527
2528 PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
2529 - "Could not find GPIO lookup Table in BIOS.", return -1);
2530 + "Could not find GPIO lookup Table in BIOS.", return false);
2531
2532 bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
2533 gpio_pin_assignment);
2534 diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
2535 index 059f7c39c582..a7916e5f8864 100644
2536 --- a/drivers/gpu/drm/drm_cache.c
2537 +++ b/drivers/gpu/drm/drm_cache.c
2538 @@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
2539 mb();
2540 for (; addr < end; addr += size)
2541 clflushopt(addr);
2542 + clflushopt(end - 1); /* force serialisation */
2543 mb();
2544 return;
2545 }
2546 diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
2547 index eeaf5a7c3aa7..67b28f8018d2 100644
2548 --- a/drivers/gpu/drm/drm_dp_helper.c
2549 +++ b/drivers/gpu/drm/drm_dp_helper.c
2550 @@ -203,7 +203,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
2551
2552 ret = aux->transfer(aux, &msg);
2553
2554 - if (ret > 0) {
2555 + if (ret >= 0) {
2556 native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
2557 if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
2558 if (ret == size)
2559 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
2560 index 7df26d4b7ad8..2cb472b9976a 100644
2561 --- a/drivers/gpu/drm/drm_edid.c
2562 +++ b/drivers/gpu/drm/drm_edid.c
2563 @@ -74,6 +74,8 @@
2564 #define EDID_QUIRK_FORCE_8BPC (1 << 8)
2565 /* Force 12bpc */
2566 #define EDID_QUIRK_FORCE_12BPC (1 << 9)
2567 +/* Force 6bpc */
2568 +#define EDID_QUIRK_FORCE_6BPC (1 << 10)
2569
2570 struct detailed_mode_closure {
2571 struct drm_connector *connector;
2572 @@ -100,6 +102,9 @@ static struct edid_quirk {
2573 /* Unknown Acer */
2574 { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
2575
2576 + /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
2577 + { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
2578 +
2579 /* Belinea 10 15 55 */
2580 { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
2581 { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
2582 @@ -4082,6 +4087,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
2583
2584 drm_add_display_info(edid, &connector->display_info, connector);
2585
2586 + if (quirks & EDID_QUIRK_FORCE_6BPC)
2587 + connector->display_info.bpc = 6;
2588 +
2589 if (quirks & EDID_QUIRK_FORCE_8BPC)
2590 connector->display_info.bpc = 8;
2591
2592 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
2593 index bc3f2e6842e7..227a63ee0067 100644
2594 --- a/drivers/gpu/drm/i915/i915_drv.h
2595 +++ b/drivers/gpu/drm/i915/i915_drv.h
2596 @@ -2591,6 +2591,8 @@ struct drm_i915_cmd_table {
2597 #define SKL_REVID_D0 0x3
2598 #define SKL_REVID_E0 0x4
2599 #define SKL_REVID_F0 0x5
2600 +#define SKL_REVID_G0 0x6
2601 +#define SKL_REVID_H0 0x7
2602
2603 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2604
2605 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
2606 index 3fcf7dd5b6ca..bc3b6dde7b4b 100644
2607 --- a/drivers/gpu/drm/i915/i915_reg.h
2608 +++ b/drivers/gpu/drm/i915/i915_reg.h
2609 @@ -1672,6 +1672,9 @@ enum skl_disp_power_wells {
2610
2611 #define GEN7_TLB_RD_ADDR _MMIO(0x4700)
2612
2613 +#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
2614 +#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18)
2615 +
2616 #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
2617 #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
2618
2619 @@ -7538,6 +7541,8 @@ enum skl_disp_power_wells {
2620
2621 #define CDCLK_FREQ _MMIO(0x46200)
2622
2623 +#define CDCLK_FREQ _MMIO(0x46200)
2624 +
2625 #define _TRANSA_MSA_MISC 0x60410
2626 #define _TRANSB_MSA_MISC 0x61410
2627 #define _TRANSC_MSA_MISC 0x62410
2628 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2629 index 3074c56a643d..32893195d7c4 100644
2630 --- a/drivers/gpu/drm/i915/intel_display.c
2631 +++ b/drivers/gpu/drm/i915/intel_display.c
2632 @@ -9700,6 +9700,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
2633
2634 I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
2635
2636 + I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
2637 +
2638 intel_update_cdclk(dev);
2639
2640 WARN(cdclk != dev_priv->cdclk_freq,
2641 @@ -12095,21 +12097,11 @@ connected_sink_compute_bpp(struct intel_connector *connector,
2642 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
2643 }
2644
2645 - /* Clamp bpp to default limit on screens without EDID 1.4 */
2646 - if (connector->base.display_info.bpc == 0) {
2647 - int type = connector->base.connector_type;
2648 - int clamp_bpp = 24;
2649 -
2650 - /* Fall back to 18 bpp when DP sink capability is unknown. */
2651 - if (type == DRM_MODE_CONNECTOR_DisplayPort ||
2652 - type == DRM_MODE_CONNECTOR_eDP)
2653 - clamp_bpp = 18;
2654 -
2655 - if (bpp > clamp_bpp) {
2656 - DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
2657 - bpp, clamp_bpp);
2658 - pipe_config->pipe_bpp = clamp_bpp;
2659 - }
2660 + /* Clamp bpp to 8 on screens without EDID 1.4 */
2661 + if (connector->base.display_info.bpc == 0 && bpp > 24) {
2662 + DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
2663 + bpp);
2664 + pipe_config->pipe_bpp = 24;
2665 }
2666 }
2667
2668 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
2669 index 2863b92c9da6..c1ca5a7ba86f 100644
2670 --- a/drivers/gpu/drm/i915/intel_pm.c
2671 +++ b/drivers/gpu/drm/i915/intel_pm.c
2672 @@ -4563,7 +4563,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
2673 else
2674 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
2675 dev_priv->rps.last_adj = 0;
2676 - I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2677 + I915_WRITE(GEN6_PMINTRMSK,
2678 + gen6_sanitize_rps_pm_mask(dev_priv, ~0));
2679 }
2680 mutex_unlock(&dev_priv->rps.hw_lock);
2681
2682 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
2683 index 68c5af079ef8..9d778f3ab27d 100644
2684 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
2685 +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
2686 @@ -1135,6 +1135,11 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
2687 /* WaDisableGafsUnitClkGating:skl */
2688 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
2689
2690 + /* WaInPlaceDecompressionHang:skl */
2691 + if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
2692 + WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
2693 + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
2694 +
2695 /* WaDisableLSQCROPERFforOCL:skl */
2696 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
2697 if (ret)
2698 @@ -1194,6 +1199,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
2699 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
2700 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
2701
2702 + /* WaInPlaceDecompressionHang:bxt */
2703 + if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
2704 + WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
2705 + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
2706 +
2707 return 0;
2708 }
2709
2710 @@ -1241,6 +1251,10 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
2711 GEN7_HALF_SLICE_CHICKEN1,
2712 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
2713
2714 + /* WaInPlaceDecompressionHang:kbl */
2715 + WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
2716 + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
2717 +
2718 /* WaDisableLSQCROPERFforOCL:kbl */
2719 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
2720 if (ret)
2721 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
2722 index 11f8dd9c0edb..d6c134b01833 100644
2723 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
2724 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
2725 @@ -324,7 +324,16 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
2726 !vga_switcheroo_handler_flags())
2727 return -EPROBE_DEFER;
2728
2729 - /* remove conflicting drivers (vesafb, efifb etc) */
2730 + /* We need to check that the chipset is supported before booting
2731 + * fbdev off the hardware, as there's no way to put it back.
2732 + */
2733 + ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
2734 + if (ret)
2735 + return ret;
2736 +
2737 + nvkm_device_del(&device);
2738 +
2739 + /* Remove conflicting drivers (vesafb, efifb etc). */
2740 aper = alloc_apertures(3);
2741 if (!aper)
2742 return -ENOMEM;
2743 diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
2744 index 7d9248b8c664..da8fd5ff9d0f 100644
2745 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
2746 +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
2747 @@ -107,11 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2748 ((image->dx + image->width) & 0xffff));
2749 OUT_RING(chan, bg);
2750 OUT_RING(chan, fg);
2751 - OUT_RING(chan, (image->height << 16) | image->width);
2752 + OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8));
2753 OUT_RING(chan, (image->height << 16) | image->width);
2754 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
2755
2756 - dsize = ALIGN(image->width * image->height, 32) >> 5;
2757 + dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
2758 while (dsize) {
2759 int iter_len = dsize > 128 ? 128 : dsize;
2760
2761 diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
2762 index 1aeb698e9707..af3d3c49411a 100644
2763 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
2764 +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
2765 @@ -125,7 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2766 OUT_RING(chan, 0);
2767 OUT_RING(chan, image->dy);
2768
2769 - dwords = ALIGN(image->width * image->height, 32) >> 5;
2770 + dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
2771 while (dwords) {
2772 int push = dwords > 2047 ? 2047 : dwords;
2773
2774 diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
2775 index 839f4c8c1805..054b6a056d99 100644
2776 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
2777 +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
2778 @@ -125,7 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2779 OUT_RING (chan, 0);
2780 OUT_RING (chan, image->dy);
2781
2782 - dwords = ALIGN(image->width * image->height, 32) >> 5;
2783 + dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
2784 while (dwords) {
2785 int push = dwords > 2047 ? 2047 : dwords;
2786
2787 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
2788 index 69de8c6259fe..f1e15a4d4f64 100644
2789 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
2790 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
2791 @@ -76,8 +76,8 @@ nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
2792 nvkm_wo32(chan->inst, i, 0x00040004);
2793 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
2794 nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
2795 - nvkm_wo32(chan->inst, i + 1, 0x0436086c);
2796 - nvkm_wo32(chan->inst, i + 2, 0x000c001b);
2797 + nvkm_wo32(chan->inst, i + 4, 0x0436086c);
2798 + nvkm_wo32(chan->inst, i + 8, 0x000c001b);
2799 }
2800 for (i = 0x30b8; i < 0x30c8; i += 4)
2801 nvkm_wo32(chan->inst, i, 0x0000ffff);
2802 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
2803 index 2207dac23981..300f5ed5de0b 100644
2804 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
2805 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
2806 @@ -75,8 +75,8 @@ nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
2807 nvkm_wo32(chan->inst, i, 0x00040004);
2808 for (i = 0x15ac; i <= 0x271c ; i += 16) {
2809 nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
2810 - nvkm_wo32(chan->inst, i + 1, 0x0436086c);
2811 - nvkm_wo32(chan->inst, i + 2, 0x000c001b);
2812 + nvkm_wo32(chan->inst, i + 4, 0x0436086c);
2813 + nvkm_wo32(chan->inst, i + 8, 0x000c001b);
2814 }
2815 for (i = 0x274c; i < 0x275c; i += 4)
2816 nvkm_wo32(chan->inst, i, 0x0000ffff);
2817 diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
2818 index 587cae4e73c9..56bb758f4e33 100644
2819 --- a/drivers/gpu/drm/radeon/atombios_encoders.c
2820 +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
2821 @@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
2822 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2823 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2824 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2825 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2826 if (dig->backlight_level == 0)
2827 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
2828 else {
2829 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
2830 index f8097a0e7a79..5df3ec73021b 100644
2831 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
2832 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
2833 @@ -1155,7 +1155,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
2834 le16_to_cpu(firmware_info->info.usReferenceClock);
2835 p1pll->reference_div = 0;
2836
2837 - if (crev < 2)
2838 + if ((frev < 2) && (crev < 2))
2839 p1pll->pll_out_min =
2840 le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
2841 else
2842 @@ -1164,7 +1164,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
2843 p1pll->pll_out_max =
2844 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
2845
2846 - if (crev >= 4) {
2847 + if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
2848 p1pll->lcd_pll_out_min =
2849 le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
2850 if (p1pll->lcd_pll_out_min == 0)
2851 diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2852 index 95f4fea89302..1b3f4e51f5dc 100644
2853 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2854 +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2855 @@ -10,6 +10,7 @@
2856 #include <linux/slab.h>
2857 #include <linux/acpi.h>
2858 #include <linux/pci.h>
2859 +#include <linux/delay.h>
2860
2861 #include "radeon_acpi.h"
2862
2863 @@ -258,6 +259,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
2864 if (!info)
2865 return -EIO;
2866 kfree(info);
2867 +
2868 + /* 200ms delay is required after off */
2869 + if (state == 0)
2870 + msleep(200);
2871 }
2872 return 0;
2873 }
2874 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2875 index 81a63d7f5cd9..b79f3b002471 100644
2876 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
2877 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2878 @@ -2064,7 +2064,6 @@ radeon_add_atom_connector(struct drm_device *dev,
2879 RADEON_OUTPUT_CSC_BYPASS);
2880 /* no HPD on analog connectors */
2881 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
2882 - connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2883 connector->interlace_allowed = true;
2884 connector->doublescan_allowed = true;
2885 break;
2886 @@ -2314,8 +2313,10 @@ radeon_add_atom_connector(struct drm_device *dev,
2887 }
2888
2889 if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
2890 - if (i2c_bus->valid)
2891 - connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2892 + if (i2c_bus->valid) {
2893 + connector->polled = DRM_CONNECTOR_POLL_CONNECT |
2894 + DRM_CONNECTOR_POLL_DISCONNECT;
2895 + }
2896 } else
2897 connector->polled = DRM_CONNECTOR_POLL_HPD;
2898
2899 @@ -2391,7 +2392,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
2900 1);
2901 /* no HPD on analog connectors */
2902 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
2903 - connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2904 connector->interlace_allowed = true;
2905 connector->doublescan_allowed = true;
2906 break;
2907 @@ -2476,10 +2476,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
2908 }
2909
2910 if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
2911 - if (i2c_bus->valid)
2912 - connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2913 + if (i2c_bus->valid) {
2914 + connector->polled = DRM_CONNECTOR_POLL_CONNECT |
2915 + DRM_CONNECTOR_POLL_DISCONNECT;
2916 + }
2917 } else
2918 connector->polled = DRM_CONNECTOR_POLL_HPD;
2919 +
2920 connector->display_info.subpixel_order = subpixel_order;
2921 drm_connector_register(connector);
2922 }
2923 diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
2924 index 1c4d5b5a70a2..b1673236c356 100644
2925 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
2926 +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
2927 @@ -1048,6 +1048,17 @@ static void vop_crtc_destroy(struct drm_crtc *crtc)
2928 drm_crtc_cleanup(crtc);
2929 }
2930
2931 +static void vop_crtc_reset(struct drm_crtc *crtc)
2932 +{
2933 + if (crtc->state)
2934 + __drm_atomic_helper_crtc_destroy_state(crtc->state);
2935 + kfree(crtc->state);
2936 +
2937 + crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
2938 + if (crtc->state)
2939 + crtc->state->crtc = crtc;
2940 +}
2941 +
2942 static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
2943 {
2944 struct rockchip_crtc_state *rockchip_state;
2945 @@ -1073,7 +1084,7 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
2946 .set_config = drm_atomic_helper_set_config,
2947 .page_flip = drm_atomic_helper_page_flip,
2948 .destroy = vop_crtc_destroy,
2949 - .reset = drm_atomic_helper_crtc_reset,
2950 + .reset = vop_crtc_reset,
2951 .atomic_duplicate_state = vop_crtc_duplicate_state,
2952 .atomic_destroy_state = vop_crtc_destroy_state,
2953 };
2954 diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
2955 index 16b6f11a0700..99ec3ff7563b 100644
2956 --- a/drivers/hid/uhid.c
2957 +++ b/drivers/hid/uhid.c
2958 @@ -51,10 +51,26 @@ struct uhid_device {
2959 u32 report_id;
2960 u32 report_type;
2961 struct uhid_event report_buf;
2962 + struct work_struct worker;
2963 };
2964
2965 static struct miscdevice uhid_misc;
2966
2967 +static void uhid_device_add_worker(struct work_struct *work)
2968 +{
2969 + struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
2970 + int ret;
2971 +
2972 + ret = hid_add_device(uhid->hid);
2973 + if (ret) {
2974 + hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
2975 +
2976 + hid_destroy_device(uhid->hid);
2977 + uhid->hid = NULL;
2978 + uhid->running = false;
2979 + }
2980 +}
2981 +
2982 static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
2983 {
2984 __u8 newhead;
2985 @@ -498,18 +514,14 @@ static int uhid_dev_create2(struct uhid_device *uhid,
2986 uhid->hid = hid;
2987 uhid->running = true;
2988
2989 - ret = hid_add_device(hid);
2990 - if (ret) {
2991 - hid_err(hid, "Cannot register HID device\n");
2992 - goto err_hid;
2993 - }
2994 + /* Adding of a HID device is done through a worker, to allow HID drivers
2995 + * which use feature requests during .probe to work, without they would
2996 + * be blocked on devlock, which is held by uhid_char_write.
2997 + */
2998 + schedule_work(&uhid->worker);
2999
3000 return 0;
3001
3002 -err_hid:
3003 - hid_destroy_device(hid);
3004 - uhid->hid = NULL;
3005 - uhid->running = false;
3006 err_free:
3007 kfree(uhid->rd_data);
3008 uhid->rd_data = NULL;
3009 @@ -550,6 +562,8 @@ static int uhid_dev_destroy(struct uhid_device *uhid)
3010 uhid->running = false;
3011 wake_up_interruptible(&uhid->report_wait);
3012
3013 + cancel_work_sync(&uhid->worker);
3014 +
3015 hid_destroy_device(uhid->hid);
3016 kfree(uhid->rd_data);
3017
3018 @@ -612,6 +626,7 @@ static int uhid_char_open(struct inode *inode, struct file *file)
3019 init_waitqueue_head(&uhid->waitq);
3020 init_waitqueue_head(&uhid->report_wait);
3021 uhid->running = false;
3022 + INIT_WORK(&uhid->worker, uhid_device_add_worker);
3023
3024 file->private_data = uhid;
3025 nonseekable_open(inode, file);
3026 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
3027 index 952f20fdc7e3..e82f7e1c217c 100644
3028 --- a/drivers/hv/vmbus_drv.c
3029 +++ b/drivers/hv/vmbus_drv.c
3030 @@ -42,6 +42,7 @@
3031 #include <linux/screen_info.h>
3032 #include <linux/kdebug.h>
3033 #include <linux/efi.h>
3034 +#include <linux/random.h>
3035 #include "hyperv_vmbus.h"
3036
3037 static struct acpi_device *hv_acpi_dev;
3038 @@ -806,6 +807,8 @@ static void vmbus_isr(void)
3039 else
3040 tasklet_schedule(hv_context.msg_dpc[cpu]);
3041 }
3042 +
3043 + add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
3044 }
3045
3046
3047 diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
3048 index 8eff62738877..e253598d764c 100644
3049 --- a/drivers/i2c/busses/i2c-efm32.c
3050 +++ b/drivers/i2c/busses/i2c-efm32.c
3051 @@ -433,7 +433,7 @@ static int efm32_i2c_probe(struct platform_device *pdev)
3052 ret = request_irq(ddata->irq, efm32_i2c_irq, 0, DRIVER_NAME, ddata);
3053 if (ret < 0) {
3054 dev_err(&pdev->dev, "failed to request irq (%d)\n", ret);
3055 - return ret;
3056 + goto err_disable_clk;
3057 }
3058
3059 ret = i2c_add_adapter(&ddata->adapter);
3060 diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
3061 index 1eb9b1294a63..dbfd854c32c9 100644
3062 --- a/drivers/infiniband/core/rw.c
3063 +++ b/drivers/infiniband/core/rw.c
3064 @@ -58,19 +58,13 @@ static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
3065 return false;
3066 }
3067
3068 -static inline u32 rdma_rw_max_sge(struct ib_device *dev,
3069 - enum dma_data_direction dir)
3070 -{
3071 - return dir == DMA_TO_DEVICE ?
3072 - dev->attrs.max_sge : dev->attrs.max_sge_rd;
3073 -}
3074 -
3075 static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
3076 {
3077 /* arbitrary limit to avoid allocating gigantic resources */
3078 return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256);
3079 }
3080
3081 +/* Caller must have zero-initialized *reg. */
3082 static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
3083 struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
3084 u32 sg_cnt, u32 offset)
3085 @@ -114,6 +108,7 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
3086 u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
3087 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
3088 {
3089 + struct rdma_rw_reg_ctx *prev = NULL;
3090 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
3091 int i, j, ret = 0, count = 0;
3092
3093 @@ -125,7 +120,6 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
3094 }
3095
3096 for (i = 0; i < ctx->nr_ops; i++) {
3097 - struct rdma_rw_reg_ctx *prev = i ? &ctx->reg[i - 1] : NULL;
3098 struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
3099 u32 nents = min(sg_cnt, pages_per_mr);
3100
3101 @@ -162,9 +156,13 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
3102 sg_cnt -= nents;
3103 for (j = 0; j < nents; j++)
3104 sg = sg_next(sg);
3105 + prev = reg;
3106 offset = 0;
3107 }
3108
3109 + if (prev)
3110 + prev->wr.wr.next = NULL;
3111 +
3112 ctx->type = RDMA_RW_MR;
3113 return count;
3114
3115 @@ -181,7 +179,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
3116 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
3117 {
3118 struct ib_device *dev = qp->pd->device;
3119 - u32 max_sge = rdma_rw_max_sge(dev, dir);
3120 + u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
3121 + qp->max_read_sge;
3122 struct ib_sge *sge;
3123 u32 total_len = 0, i, j;
3124
3125 @@ -205,11 +204,10 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
3126 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
3127 rdma_wr->remote_addr = remote_addr + total_len;
3128 rdma_wr->rkey = rkey;
3129 + rdma_wr->wr.num_sge = nr_sge;
3130 rdma_wr->wr.sg_list = sge;
3131
3132 for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
3133 - rdma_wr->wr.num_sge++;
3134 -
3135 sge->addr = ib_sg_dma_address(dev, sg) + offset;
3136 sge->length = ib_sg_dma_len(dev, sg) - offset;
3137 sge->lkey = qp->pd->local_dma_lkey;
3138 @@ -220,8 +218,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
3139 offset = 0;
3140 }
3141
3142 - if (i + 1 < ctx->nr_ops)
3143 - rdma_wr->wr.next = &ctx->map.wrs[i + 1].wr;
3144 + rdma_wr->wr.next = i + 1 < ctx->nr_ops ?
3145 + &ctx->map.wrs[i + 1].wr : NULL;
3146 }
3147
3148 ctx->type = RDMA_RW_MULTI_WR;
3149 diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
3150 index 6298f54b4137..e39a0b597234 100644
3151 --- a/drivers/infiniband/core/verbs.c
3152 +++ b/drivers/infiniband/core/verbs.c
3153 @@ -814,6 +814,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
3154 }
3155 }
3156
3157 + /*
3158 + * Note: all hw drivers guarantee that max_send_sge is lower than
3159 + * the device RDMA WRITE SGE limit but not all hw drivers ensure that
3160 + * max_send_sge <= max_sge_rd.
3161 + */
3162 + qp->max_write_sge = qp_init_attr->cap.max_send_sge;
3163 + qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
3164 + device->attrs.max_sge_rd);
3165 +
3166 return qp;
3167 }
3168 EXPORT_SYMBOL(ib_create_qp);
3169 diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
3170 index 53e03c8ede79..79e6309460dc 100644
3171 --- a/drivers/infiniband/hw/mlx5/gsi.c
3172 +++ b/drivers/infiniband/hw/mlx5/gsi.c
3173 @@ -69,15 +69,6 @@ static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
3174 return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
3175 }
3176
3177 -static u32 next_outstanding(struct mlx5_ib_gsi_qp *gsi, u32 index)
3178 -{
3179 - return ++index % gsi->cap.max_send_wr;
3180 -}
3181 -
3182 -#define for_each_outstanding_wr(gsi, index) \
3183 - for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; \
3184 - index = next_outstanding(gsi, index))
3185 -
3186 /* Call with gsi->lock locked */
3187 static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
3188 {
3189 @@ -85,8 +76,9 @@ static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
3190 struct mlx5_ib_gsi_wr *wr;
3191 u32 index;
3192
3193 - for_each_outstanding_wr(gsi, index) {
3194 - wr = &gsi->outstanding_wrs[index];
3195 + for (index = gsi->outstanding_ci; index != gsi->outstanding_pi;
3196 + index++) {
3197 + wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr];
3198
3199 if (!wr->completed)
3200 break;
3201 @@ -430,8 +422,9 @@ static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
3202 return -ENOMEM;
3203 }
3204
3205 - gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi];
3206 - gsi->outstanding_pi = next_outstanding(gsi, gsi->outstanding_pi);
3207 + gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi %
3208 + gsi->cap.max_send_wr];
3209 + gsi->outstanding_pi++;
3210
3211 if (!wc) {
3212 memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
3213 diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
3214 index 11aa6a34bd71..1da8d01a6855 100644
3215 --- a/drivers/infiniband/sw/rdmavt/Kconfig
3216 +++ b/drivers/infiniband/sw/rdmavt/Kconfig
3217 @@ -1,6 +1,5 @@
3218 config INFINIBAND_RDMAVT
3219 tristate "RDMA verbs transport library"
3220 depends on 64BIT
3221 - default m
3222 ---help---
3223 This is a common software verbs provider for RDMA networks.
3224 diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
3225 index 6ca6fa80dd6e..f2f229efbe64 100644
3226 --- a/drivers/infiniband/sw/rdmavt/cq.c
3227 +++ b/drivers/infiniband/sw/rdmavt/cq.c
3228 @@ -510,6 +510,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
3229
3230 if (rdi->worker)
3231 return 0;
3232 + spin_lock_init(&rdi->n_cqs_lock);
3233 rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
3234 if (!rdi->worker)
3235 return -ENOMEM;
3236 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
3237 index 4a4155640d51..9a3b954e862d 100644
3238 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
3239 +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
3240 @@ -1601,6 +1601,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
3241 struct ib_qp_init_attr *qp_init;
3242 struct srpt_port *sport = ch->sport;
3243 struct srpt_device *sdev = sport->sdev;
3244 + const struct ib_device_attr *attrs = &sdev->device->attrs;
3245 u32 srp_sq_size = sport->port_attrib.srp_sq_size;
3246 int ret;
3247
3248 @@ -1638,7 +1639,7 @@ retry:
3249 */
3250 qp_init->cap.max_send_wr = srp_sq_size / 2;
3251 qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
3252 - qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
3253 + qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
3254 qp_init->port_num = ch->sport->port;
3255
3256 ch->qp = ib_create_qp(sdev->pd, qp_init);
3257 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
3258 index 389030487da7..581878782854 100644
3259 --- a/drivers/infiniband/ulp/srpt/ib_srpt.h
3260 +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
3261 @@ -106,7 +106,11 @@ enum {
3262 SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
3263
3264 SRPT_DEF_SG_TABLESIZE = 128,
3265 - SRPT_DEF_SG_PER_WQE = 16,
3266 + /*
3267 + * An experimentally determined value that avoids that QP creation
3268 + * fails due to "swiotlb buffer is full" on systems using the swiotlb.
3269 + */
3270 + SRPT_MAX_SG_PER_WQE = 16,
3271
3272 MIN_SRPT_SQ_SIZE = 16,
3273 DEF_SRPT_SQ_SIZE = 4096,
3274 diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
3275 index 2f589857a039..d15b33813021 100644
3276 --- a/drivers/input/mouse/elan_i2c_core.c
3277 +++ b/drivers/input/mouse/elan_i2c_core.c
3278 @@ -4,7 +4,8 @@
3279 * Copyright (c) 2013 ELAN Microelectronics Corp.
3280 *
3281 * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
3282 - * Version: 1.6.0
3283 + * Author: KT Liao <kt.liao@emc.com.tw>
3284 + * Version: 1.6.2
3285 *
3286 * Based on cyapa driver:
3287 * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
3288 @@ -40,7 +41,7 @@
3289 #include "elan_i2c.h"
3290
3291 #define DRIVER_NAME "elan_i2c"
3292 -#define ELAN_DRIVER_VERSION "1.6.1"
3293 +#define ELAN_DRIVER_VERSION "1.6.2"
3294 #define ELAN_VENDOR_ID 0x04f3
3295 #define ETP_MAX_PRESSURE 255
3296 #define ETP_FWIDTH_REDUCE 90
3297 @@ -199,9 +200,41 @@ static int elan_sleep(struct elan_tp_data *data)
3298 return error;
3299 }
3300
3301 +static int elan_query_product(struct elan_tp_data *data)
3302 +{
3303 + int error;
3304 +
3305 + error = data->ops->get_product_id(data->client, &data->product_id);
3306 + if (error)
3307 + return error;
3308 +
3309 + error = data->ops->get_sm_version(data->client, &data->ic_type,
3310 + &data->sm_version);
3311 + if (error)
3312 + return error;
3313 +
3314 + return 0;
3315 +}
3316 +
3317 +static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
3318 +{
3319 + if (data->ic_type != 0x0E)
3320 + return false;
3321 +
3322 + switch (data->product_id) {
3323 + case 0x05 ... 0x07:
3324 + case 0x09:
3325 + case 0x13:
3326 + return true;
3327 + default:
3328 + return false;
3329 + }
3330 +}
3331 +
3332 static int __elan_initialize(struct elan_tp_data *data)
3333 {
3334 struct i2c_client *client = data->client;
3335 + bool woken_up = false;
3336 int error;
3337
3338 error = data->ops->initialize(client);
3339 @@ -210,6 +243,27 @@ static int __elan_initialize(struct elan_tp_data *data)
3340 return error;
3341 }
3342
3343 + error = elan_query_product(data);
3344 + if (error)
3345 + return error;
3346 +
3347 + /*
3348 + * Some ASUS devices were shipped with firmware that requires
3349 + * touchpads to be woken up first, before attempting to switch
3350 + * them into absolute reporting mode.
3351 + */
3352 + if (elan_check_ASUS_special_fw(data)) {
3353 + error = data->ops->sleep_control(client, false);
3354 + if (error) {
3355 + dev_err(&client->dev,
3356 + "failed to wake device up: %d\n", error);
3357 + return error;
3358 + }
3359 +
3360 + msleep(200);
3361 + woken_up = true;
3362 + }
3363 +
3364 data->mode |= ETP_ENABLE_ABS;
3365 error = data->ops->set_mode(client, data->mode);
3366 if (error) {
3367 @@ -218,11 +272,13 @@ static int __elan_initialize(struct elan_tp_data *data)
3368 return error;
3369 }
3370
3371 - error = data->ops->sleep_control(client, false);
3372 - if (error) {
3373 - dev_err(&client->dev,
3374 - "failed to wake device up: %d\n", error);
3375 - return error;
3376 + if (!woken_up) {
3377 + error = data->ops->sleep_control(client, false);
3378 + if (error) {
3379 + dev_err(&client->dev,
3380 + "failed to wake device up: %d\n", error);
3381 + return error;
3382 + }
3383 }
3384
3385 return 0;
3386 @@ -248,10 +304,6 @@ static int elan_query_device_info(struct elan_tp_data *data)
3387 {
3388 int error;
3389
3390 - error = data->ops->get_product_id(data->client, &data->product_id);
3391 - if (error)
3392 - return error;
3393 -
3394 error = data->ops->get_version(data->client, false, &data->fw_version);
3395 if (error)
3396 return error;
3397 @@ -261,11 +313,6 @@ static int elan_query_device_info(struct elan_tp_data *data)
3398 if (error)
3399 return error;
3400
3401 - error = data->ops->get_sm_version(data->client, &data->ic_type,
3402 - &data->sm_version);
3403 - if (error)
3404 - return error;
3405 -
3406 error = data->ops->get_version(data->client, true, &data->iap_version);
3407 if (error)
3408 return error;
3409 diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
3410 index 880c40b23f66..b7e8c11a6fc2 100644
3411 --- a/drivers/input/touchscreen/sur40.c
3412 +++ b/drivers/input/touchscreen/sur40.c
3413 @@ -126,7 +126,7 @@ struct sur40_image_header {
3414 #define VIDEO_PACKET_SIZE 16384
3415
3416 /* polling interval (ms) */
3417 -#define POLL_INTERVAL 4
3418 +#define POLL_INTERVAL 1
3419
3420 /* maximum number of contacts FIXME: this is a guess? */
3421 #define MAX_CONTACTS 64
3422 @@ -448,7 +448,7 @@ static void sur40_process_video(struct sur40_state *sur40)
3423
3424 /* return error if streaming was stopped in the meantime */
3425 if (sur40->sequence == -1)
3426 - goto err_poll;
3427 + return;
3428
3429 /* mark as finished */
3430 new_buf->vb.vb2_buf.timestamp = ktime_get_ns();
3431 @@ -736,6 +736,7 @@ static int sur40_start_streaming(struct vb2_queue *vq, unsigned int count)
3432 static void sur40_stop_streaming(struct vb2_queue *vq)
3433 {
3434 struct sur40_state *sur40 = vb2_get_drv_priv(vq);
3435 + vb2_wait_for_all_buffers(vq);
3436 sur40->sequence = -1;
3437
3438 /* Release all active buffers */
3439 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
3440 index 634f636393d5..2511c8b6aada 100644
3441 --- a/drivers/iommu/amd_iommu.c
3442 +++ b/drivers/iommu/amd_iommu.c
3443 @@ -466,9 +466,11 @@ static void init_iommu_group(struct device *dev)
3444 if (!domain)
3445 goto out;
3446
3447 - dma_domain = to_pdomain(domain)->priv;
3448 + if (to_pdomain(domain)->flags == PD_DMA_OPS_MASK) {
3449 + dma_domain = to_pdomain(domain)->priv;
3450 + init_unity_mappings_for_device(dev, dma_domain);
3451 + }
3452
3453 - init_unity_mappings_for_device(dev, dma_domain);
3454 out:
3455 iommu_group_put(group);
3456 }
3457 @@ -2512,8 +2514,15 @@ static void update_device_table(struct protection_domain *domain)
3458 {
3459 struct iommu_dev_data *dev_data;
3460
3461 - list_for_each_entry(dev_data, &domain->dev_list, list)
3462 + list_for_each_entry(dev_data, &domain->dev_list, list) {
3463 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
3464 +
3465 + if (dev_data->devid == dev_data->alias)
3466 + continue;
3467 +
3468 + /* There is an alias, update device table entry for it */
3469 + set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled);
3470 + }
3471 }
3472
3473 static void update_domain(struct protection_domain *domain)
3474 @@ -3103,9 +3112,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
3475 static void amd_iommu_domain_free(struct iommu_domain *dom)
3476 {
3477 struct protection_domain *domain;
3478 -
3479 - if (!dom)
3480 - return;
3481 + struct dma_ops_domain *dma_dom;
3482
3483 domain = to_pdomain(dom);
3484
3485 @@ -3114,13 +3121,24 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
3486
3487 BUG_ON(domain->dev_cnt != 0);
3488
3489 - if (domain->mode != PAGE_MODE_NONE)
3490 - free_pagetable(domain);
3491 + if (!dom)
3492 + return;
3493 +
3494 + switch (dom->type) {
3495 + case IOMMU_DOMAIN_DMA:
3496 + dma_dom = domain->priv;
3497 + dma_ops_domain_free(dma_dom);
3498 + break;
3499 + default:
3500 + if (domain->mode != PAGE_MODE_NONE)
3501 + free_pagetable(domain);
3502
3503 - if (domain->flags & PD_IOMMUV2_MASK)
3504 - free_gcr3_table(domain);
3505 + if (domain->flags & PD_IOMMUV2_MASK)
3506 + free_gcr3_table(domain);
3507
3508 - protection_domain_free(domain);
3509 + protection_domain_free(domain);
3510 + break;
3511 + }
3512 }
3513
3514 static void amd_iommu_detach_device(struct iommu_domain *dom,
3515 diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
3516 index 5ecc86cb74c8..e27e3b7df4e7 100644
3517 --- a/drivers/iommu/exynos-iommu.c
3518 +++ b/drivers/iommu/exynos-iommu.c
3519 @@ -709,6 +709,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
3520 .name = "exynos-sysmmu",
3521 .of_match_table = sysmmu_of_match,
3522 .pm = &sysmmu_pm_ops,
3523 + .suppress_bind_attrs = true,
3524 }
3525 };
3526
3527 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
3528 index 323dac9900ba..d416242c4ab8 100644
3529 --- a/drivers/iommu/intel-iommu.c
3530 +++ b/drivers/iommu/intel-iommu.c
3531 @@ -2076,7 +2076,7 @@ out_unlock:
3532 spin_unlock(&iommu->lock);
3533 spin_unlock_irqrestore(&device_domain_lock, flags);
3534
3535 - return 0;
3536 + return ret;
3537 }
3538
3539 struct domain_context_mapping_data {
3540 diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
3541 index a1ed1b73fed4..f5c90e1366ce 100644
3542 --- a/drivers/iommu/io-pgtable-arm.c
3543 +++ b/drivers/iommu/io-pgtable-arm.c
3544 @@ -576,7 +576,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
3545 return 0;
3546
3547 found_translation:
3548 - iova &= (ARM_LPAE_GRANULE(data) - 1);
3549 + iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
3550 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
3551 }
3552
3553 diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
3554 index b7341de87015..4bb49cd602e9 100644
3555 --- a/drivers/md/dm-flakey.c
3556 +++ b/drivers/md/dm-flakey.c
3557 @@ -289,10 +289,16 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
3558 pb->bio_submitted = true;
3559
3560 /*
3561 - * Map reads as normal.
3562 + * Map reads as normal only if corrupt_bio_byte set.
3563 */
3564 - if (bio_data_dir(bio) == READ)
3565 - goto map_bio;
3566 + if (bio_data_dir(bio) == READ) {
3567 + /* If flags were specified, only corrupt those that match. */
3568 + if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
3569 + all_corrupt_bio_flags_match(bio, fc))
3570 + goto map_bio;
3571 + else
3572 + return -EIO;
3573 + }
3574
3575 /*
3576 * Drop writes?
3577 @@ -330,12 +336,13 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
3578
3579 /*
3580 * Corrupt successful READs while in down state.
3581 - * If flags were specified, only corrupt those that match.
3582 */
3583 - if (fc->corrupt_bio_byte && !error && pb->bio_submitted &&
3584 - (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
3585 - all_corrupt_bio_flags_match(bio, fc))
3586 - corrupt_bio_data(bio, fc);
3587 + if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
3588 + if (fc->corrupt_bio_byte)
3589 + corrupt_bio_data(bio, fc);
3590 + else
3591 + return -EIO;
3592 + }
3593
3594 return error;
3595 }
3596 diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
3597 index 459a9f8905ed..0f0eb8a3d922 100644
3598 --- a/drivers/md/dm-verity-fec.c
3599 +++ b/drivers/md/dm-verity-fec.c
3600 @@ -453,9 +453,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
3601 */
3602
3603 offset = block << v->data_dev_block_bits;
3604 -
3605 - res = offset;
3606 - div64_u64(res, v->fec->rounds << v->data_dev_block_bits);
3607 + res = div64_u64(offset, v->fec->rounds << v->data_dev_block_bits);
3608
3609 /*
3610 * The base RS block we can feed to the interleaver to find out all
3611 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3612 index 1b2f96205361..fd40bcb3937d 100644
3613 --- a/drivers/md/dm.c
3614 +++ b/drivers/md/dm.c
3615 @@ -2175,7 +2175,7 @@ static void dm_request_fn(struct request_queue *q)
3616 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
3617 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
3618 (ti->type->busy && ti->type->busy(ti))) {
3619 - blk_delay_queue(q, HZ / 100);
3620 + blk_delay_queue(q, 10);
3621 return;
3622 }
3623
3624 @@ -3128,7 +3128,8 @@ static void unlock_fs(struct mapped_device *md)
3625 * Caller must hold md->suspend_lock
3626 */
3627 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
3628 - unsigned suspend_flags, int interruptible)
3629 + unsigned suspend_flags, int interruptible,
3630 + int dmf_suspended_flag)
3631 {
3632 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
3633 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
3634 @@ -3195,6 +3196,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
3635 * to finish.
3636 */
3637 r = dm_wait_for_completion(md, interruptible);
3638 + if (!r)
3639 + set_bit(dmf_suspended_flag, &md->flags);
3640
3641 if (noflush)
3642 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
3643 @@ -3256,12 +3259,10 @@ retry:
3644
3645 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3646
3647 - r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
3648 + r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
3649 if (r)
3650 goto out_unlock;
3651
3652 - set_bit(DMF_SUSPENDED, &md->flags);
3653 -
3654 dm_table_postsuspend_targets(map);
3655
3656 out_unlock:
3657 @@ -3355,9 +3356,8 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
3658 * would require changing .presuspend to return an error -- avoid this
3659 * until there is a need for more elaborate variants of internal suspend.
3660 */
3661 - (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);
3662 -
3663 - set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3664 + (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
3665 + DMF_SUSPENDED_INTERNALLY);
3666
3667 dm_table_postsuspend_targets(map);
3668 }
3669 diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
3670 index 1100e98a7b1d..7df7fb3738a0 100644
3671 --- a/drivers/media/dvb-core/dvb_ringbuffer.c
3672 +++ b/drivers/media/dvb-core/dvb_ringbuffer.c
3673 @@ -55,7 +55,13 @@ void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len)
3674
3675 int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf)
3676 {
3677 - return (rbuf->pread==rbuf->pwrite);
3678 + /* smp_load_acquire() to load write pointer on reader side
3679 + * this pairs with smp_store_release() in dvb_ringbuffer_write(),
3680 + * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
3681 + *
3682 + * for memory barriers also see Documentation/circular-buffers.txt
3683 + */
3684 + return (rbuf->pread == smp_load_acquire(&rbuf->pwrite));
3685 }
3686
3687
3688 @@ -64,7 +70,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf)
3689 {
3690 ssize_t free;
3691
3692 - free = rbuf->pread - rbuf->pwrite;
3693 + /* ACCESS_ONCE() to load read pointer on writer side
3694 + * this pairs with smp_store_release() in dvb_ringbuffer_read(),
3695 + * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(),
3696 + * or dvb_ringbuffer_reset()
3697 + */
3698 + free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite;
3699 if (free <= 0)
3700 free += rbuf->size;
3701 return free-1;
3702 @@ -76,7 +87,11 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf)
3703 {
3704 ssize_t avail;
3705
3706 - avail = rbuf->pwrite - rbuf->pread;
3707 + /* smp_load_acquire() to load write pointer on reader side
3708 + * this pairs with smp_store_release() in dvb_ringbuffer_write(),
3709 + * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
3710 + */
3711 + avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread;
3712 if (avail < 0)
3713 avail += rbuf->size;
3714 return avail;
3715 @@ -86,14 +101,25 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf)
3716
3717 void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf)
3718 {
3719 - rbuf->pread = rbuf->pwrite;
3720 + /* dvb_ringbuffer_flush() counts as read operation
3721 + * smp_load_acquire() to load write pointer
3722 + * smp_store_release() to update read pointer, this ensures that the
3723 + * correct pointer is visible for subsequent dvb_ringbuffer_free()
3724 + * calls on other cpu cores
3725 + */
3726 + smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite));
3727 rbuf->error = 0;
3728 }
3729 EXPORT_SYMBOL(dvb_ringbuffer_flush);
3730
3731 void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf)
3732 {
3733 - rbuf->pread = rbuf->pwrite = 0;
3734 + /* dvb_ringbuffer_reset() counts as read and write operation
3735 + * smp_store_release() to update read pointer
3736 + */
3737 + smp_store_release(&rbuf->pread, 0);
3738 + /* smp_store_release() to update write pointer */
3739 + smp_store_release(&rbuf->pwrite, 0);
3740 rbuf->error = 0;
3741 }
3742
3743 @@ -119,12 +145,17 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si
3744 return -EFAULT;
3745 buf += split;
3746 todo -= split;
3747 - rbuf->pread = 0;
3748 + /* smp_store_release() for read pointer update to ensure
3749 + * that buf is not overwritten until read is complete,
3750 + * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
3751 + */
3752 + smp_store_release(&rbuf->pread, 0);
3753 }
3754 if (copy_to_user(buf, rbuf->data+rbuf->pread, todo))
3755 return -EFAULT;
3756
3757 - rbuf->pread = (rbuf->pread + todo) % rbuf->size;
3758 + /* smp_store_release() to update read pointer, see above */
3759 + smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);
3760
3761 return len;
3762 }
3763 @@ -139,11 +170,16 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len)
3764 memcpy(buf, rbuf->data+rbuf->pread, split);
3765 buf += split;
3766 todo -= split;
3767 - rbuf->pread = 0;
3768 + /* smp_store_release() for read pointer update to ensure
3769 + * that buf is not overwritten until read is complete,
3770 + * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
3771 + */
3772 + smp_store_release(&rbuf->pread, 0);
3773 }
3774 memcpy(buf, rbuf->data+rbuf->pread, todo);
3775
3776 - rbuf->pread = (rbuf->pread + todo) % rbuf->size;
3777 + /* smp_store_release() to update read pointer, see above */
3778 + smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);
3779 }
3780
3781
3782 @@ -158,10 +194,16 @@ ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t
3783 memcpy(rbuf->data+rbuf->pwrite, buf, split);
3784 buf += split;
3785 todo -= split;
3786 - rbuf->pwrite = 0;
3787 + /* smp_store_release() for write pointer update to ensure that
3788 + * written data is visible on other cpu cores before the pointer
3789 + * update, this pairs with smp_load_acquire() in
3790 + * dvb_ringbuffer_empty() or dvb_ringbuffer_avail()
3791 + */
3792 + smp_store_release(&rbuf->pwrite, 0);
3793 }
3794 memcpy(rbuf->data+rbuf->pwrite, buf, todo);
3795 - rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size;
3796 + /* smp_store_release() for write pointer update, see above */
3797 + smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
3798
3799 return len;
3800 }
3801 @@ -181,12 +223,18 @@ ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
3802 return len - todo;
3803 buf += split;
3804 todo -= split;
3805 - rbuf->pwrite = 0;
3806 + /* smp_store_release() for write pointer update to ensure that
3807 + * written data is visible on other cpu cores before the pointer
3808 + * update, this pairs with smp_load_acquire() in
3809 + * dvb_ringbuffer_empty() or dvb_ringbuffer_avail()
3810 + */
3811 + smp_store_release(&rbuf->pwrite, 0);
3812 }
3813 status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo);
3814 if (status)
3815 return len - todo;
3816 - rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size;
3817 + /* smp_store_release() for write pointer update, see above */
3818 + smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
3819
3820 return len;
3821 }
3822 diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
3823 index b16466fe35ee..beb4fd5bd326 100644
3824 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
3825 +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
3826 @@ -1050,6 +1050,11 @@ static int match_child(struct device *dev, void *data)
3827 return !strcmp(dev_name(dev), (char *)data);
3828 }
3829
3830 +static void s5p_mfc_memdev_release(struct device *dev)
3831 +{
3832 + dma_release_declared_memory(dev);
3833 +}
3834 +
3835 static void *mfc_get_drv_data(struct platform_device *pdev);
3836
3837 static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
3838 @@ -1062,6 +1067,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
3839 mfc_err("Not enough memory\n");
3840 return -ENOMEM;
3841 }
3842 +
3843 + dev_set_name(dev->mem_dev_l, "%s", "s5p-mfc-l");
3844 + dev->mem_dev_l->release = s5p_mfc_memdev_release;
3845 device_initialize(dev->mem_dev_l);
3846 of_property_read_u32_array(dev->plat_dev->dev.of_node,
3847 "samsung,mfc-l", mem_info, 2);
3848 @@ -1079,6 +1087,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
3849 mfc_err("Not enough memory\n");
3850 return -ENOMEM;
3851 }
3852 +
3853 + dev_set_name(dev->mem_dev_r, "%s", "s5p-mfc-r");
3854 + dev->mem_dev_r->release = s5p_mfc_memdev_release;
3855 device_initialize(dev->mem_dev_r);
3856 of_property_read_u32_array(dev->plat_dev->dev.of_node,
3857 "samsung,mfc-r", mem_info, 2);
3858 diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
3859 index 6ffe776abf6b..a0fd4e6b2155 100644
3860 --- a/drivers/media/rc/ir-rc5-decoder.c
3861 +++ b/drivers/media/rc/ir-rc5-decoder.c
3862 @@ -29,7 +29,7 @@
3863 #define RC5_BIT_START (1 * RC5_UNIT)
3864 #define RC5_BIT_END (1 * RC5_UNIT)
3865 #define RC5X_SPACE (4 * RC5_UNIT)
3866 -#define RC5_TRAILER (10 * RC5_UNIT) /* In reality, approx 100 */
3867 +#define RC5_TRAILER (6 * RC5_UNIT) /* In reality, approx 100 */
3868
3869 enum rc5_state {
3870 STATE_INACTIVE,
3871 diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
3872 index 99b303b702ac..e8ceb0e2f6d7 100644
3873 --- a/drivers/media/rc/nuvoton-cir.c
3874 +++ b/drivers/media/rc/nuvoton-cir.c
3875 @@ -401,6 +401,7 @@ static int nvt_hw_detect(struct nvt_dev *nvt)
3876 /* Check if we're wired for the alternate EFER setup */
3877 nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
3878 if (nvt->chip_major == 0xff) {
3879 + nvt_efm_disable(nvt);
3880 nvt->cr_efir = CR_EFIR2;
3881 nvt->cr_efdr = CR_EFDR2;
3882 nvt_efm_enable(nvt);
3883 diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
3884 index 78c12d22dfbb..5dab02432e82 100644
3885 --- a/drivers/media/usb/usbtv/usbtv-audio.c
3886 +++ b/drivers/media/usb/usbtv/usbtv-audio.c
3887 @@ -278,6 +278,9 @@ static void snd_usbtv_trigger(struct work_struct *work)
3888 {
3889 struct usbtv *chip = container_of(work, struct usbtv, snd_trigger);
3890
3891 + if (!chip->snd)
3892 + return;
3893 +
3894 if (atomic_read(&chip->snd_stream))
3895 usbtv_audio_start(chip);
3896 else
3897 @@ -378,6 +381,8 @@ err:
3898
3899 void usbtv_audio_free(struct usbtv *usbtv)
3900 {
3901 + cancel_work_sync(&usbtv->snd_trigger);
3902 +
3903 if (usbtv->snd && usbtv->udev) {
3904 snd_card_free(usbtv->snd);
3905 usbtv->snd = NULL;
3906 diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
3907 index 9fbcb67a9ee6..633fc1ab1d7a 100644
3908 --- a/drivers/media/v4l2-core/videobuf2-core.c
3909 +++ b/drivers/media/v4l2-core/videobuf2-core.c
3910 @@ -1648,7 +1648,7 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
3911 void *pb, int nonblocking)
3912 {
3913 unsigned long flags;
3914 - int ret;
3915 + int ret = 0;
3916
3917 /*
3918 * Wait for at least one buffer to become available on the done_list.
3919 @@ -1664,10 +1664,12 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
3920 spin_lock_irqsave(&q->done_lock, flags);
3921 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
3922 /*
3923 - * Only remove the buffer from done_list if v4l2_buffer can handle all
3924 - * the planes.
3925 + * Only remove the buffer from done_list if all planes can be
3926 + * handled. Some cases such as V4L2 file I/O and DVB have pb
3927 + * == NULL; skip the check then as there's nothing to verify.
3928 */
3929 - ret = call_bufop(q, verify_planes_array, *vb, pb);
3930 + if (pb)
3931 + ret = call_bufop(q, verify_planes_array, *vb, pb);
3932 if (!ret)
3933 list_del(&(*vb)->done_entry);
3934 spin_unlock_irqrestore(&q->done_lock, flags);
3935 diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
3936 index 0b1b8c7b6ce5..7f366f1b0377 100644
3937 --- a/drivers/media/v4l2-core/videobuf2-v4l2.c
3938 +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
3939 @@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
3940 return 0;
3941 }
3942
3943 +static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
3944 +{
3945 + return __verify_planes_array(vb, pb);
3946 +}
3947 +
3948 /**
3949 * __verify_length() - Verify that the bytesused value for each plane fits in
3950 * the plane length and that the data offset doesn't exceed the bytesused value.
3951 @@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
3952 }
3953
3954 static const struct vb2_buf_ops v4l2_buf_ops = {
3955 + .verify_planes_array = __verify_planes_array_core,
3956 .fill_user_buffer = __fill_v4l2_buffer,
3957 .fill_vb2_buffer = __fill_vb2_buffer,
3958 .copy_timestamp = __copy_timestamp,
3959 diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
3960 index 1be47ad6441b..880d4699bcb0 100644
3961 --- a/drivers/mfd/qcom_rpm.c
3962 +++ b/drivers/mfd/qcom_rpm.c
3963 @@ -34,7 +34,13 @@ struct qcom_rpm_resource {
3964 struct qcom_rpm_data {
3965 u32 version;
3966 const struct qcom_rpm_resource *resource_table;
3967 - unsigned n_resources;
3968 + unsigned int n_resources;
3969 + unsigned int req_ctx_off;
3970 + unsigned int req_sel_off;
3971 + unsigned int ack_ctx_off;
3972 + unsigned int ack_sel_off;
3973 + unsigned int req_sel_size;
3974 + unsigned int ack_sel_size;
3975 };
3976
3977 struct qcom_rpm {
3978 @@ -61,11 +67,7 @@ struct qcom_rpm {
3979
3980 #define RPM_REQUEST_TIMEOUT (5 * HZ)
3981
3982 -#define RPM_REQUEST_CONTEXT 3
3983 -#define RPM_REQ_SELECT 11
3984 -#define RPM_ACK_CONTEXT 15
3985 -#define RPM_ACK_SELECTOR 23
3986 -#define RPM_SELECT_SIZE 7
3987 +#define RPM_MAX_SEL_SIZE 7
3988
3989 #define RPM_NOTIFICATION BIT(30)
3990 #define RPM_REJECTED BIT(31)
3991 @@ -157,6 +159,12 @@ static const struct qcom_rpm_data apq8064_template = {
3992 .version = 3,
3993 .resource_table = apq8064_rpm_resource_table,
3994 .n_resources = ARRAY_SIZE(apq8064_rpm_resource_table),
3995 + .req_ctx_off = 3,
3996 + .req_sel_off = 11,
3997 + .ack_ctx_off = 15,
3998 + .ack_sel_off = 23,
3999 + .req_sel_size = 4,
4000 + .ack_sel_size = 7,
4001 };
4002
4003 static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = {
4004 @@ -240,6 +248,12 @@ static const struct qcom_rpm_data msm8660_template = {
4005 .version = 2,
4006 .resource_table = msm8660_rpm_resource_table,
4007 .n_resources = ARRAY_SIZE(msm8660_rpm_resource_table),
4008 + .req_ctx_off = 3,
4009 + .req_sel_off = 11,
4010 + .ack_ctx_off = 19,
4011 + .ack_sel_off = 27,
4012 + .req_sel_size = 7,
4013 + .ack_sel_size = 7,
4014 };
4015
4016 static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = {
4017 @@ -322,6 +336,12 @@ static const struct qcom_rpm_data msm8960_template = {
4018 .version = 3,
4019 .resource_table = msm8960_rpm_resource_table,
4020 .n_resources = ARRAY_SIZE(msm8960_rpm_resource_table),
4021 + .req_ctx_off = 3,
4022 + .req_sel_off = 11,
4023 + .ack_ctx_off = 15,
4024 + .ack_sel_off = 23,
4025 + .req_sel_size = 4,
4026 + .ack_sel_size = 7,
4027 };
4028
4029 static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = {
4030 @@ -362,6 +382,12 @@ static const struct qcom_rpm_data ipq806x_template = {
4031 .version = 3,
4032 .resource_table = ipq806x_rpm_resource_table,
4033 .n_resources = ARRAY_SIZE(ipq806x_rpm_resource_table),
4034 + .req_ctx_off = 3,
4035 + .req_sel_off = 11,
4036 + .ack_ctx_off = 15,
4037 + .ack_sel_off = 23,
4038 + .req_sel_size = 4,
4039 + .ack_sel_size = 7,
4040 };
4041
4042 static const struct of_device_id qcom_rpm_of_match[] = {
4043 @@ -380,7 +406,7 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
4044 {
4045 const struct qcom_rpm_resource *res;
4046 const struct qcom_rpm_data *data = rpm->data;
4047 - u32 sel_mask[RPM_SELECT_SIZE] = { 0 };
4048 + u32 sel_mask[RPM_MAX_SEL_SIZE] = { 0 };
4049 int left;
4050 int ret = 0;
4051 int i;
4052 @@ -398,12 +424,12 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
4053 writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i));
4054
4055 bitmap_set((unsigned long *)sel_mask, res->select_id, 1);
4056 - for (i = 0; i < ARRAY_SIZE(sel_mask); i++) {
4057 + for (i = 0; i < rpm->data->req_sel_size; i++) {
4058 writel_relaxed(sel_mask[i],
4059 - RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i));
4060 + RPM_CTRL_REG(rpm, rpm->data->req_sel_off + i));
4061 }
4062
4063 - writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT));
4064 + writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, rpm->data->req_ctx_off));
4065
4066 reinit_completion(&rpm->ack);
4067 regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
4068 @@ -426,10 +452,11 @@ static irqreturn_t qcom_rpm_ack_interrupt(int irq, void *dev)
4069 u32 ack;
4070 int i;
4071
4072 - ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
4073 - for (i = 0; i < RPM_SELECT_SIZE; i++)
4074 - writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i));
4075 - writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
4076 + ack = readl_relaxed(RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
4077 + for (i = 0; i < rpm->data->ack_sel_size; i++)
4078 + writel_relaxed(0,
4079 + RPM_CTRL_REG(rpm, rpm->data->ack_sel_off + i));
4080 + writel(0, RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
4081
4082 if (ack & RPM_NOTIFICATION) {
4083 dev_warn(rpm->dev, "ignoring notification!\n");
4084 diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
4085 index 0b0dc29d2af7..77533f7f2429 100644
4086 --- a/drivers/mtd/nand/nand_base.c
4087 +++ b/drivers/mtd/nand/nand_base.c
4088 @@ -2610,7 +2610,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
4089 int cached = writelen > bytes && page != blockmask;
4090 uint8_t *wbuf = buf;
4091 int use_bufpoi;
4092 - int part_pagewr = (column || writelen < (mtd->writesize - 1));
4093 + int part_pagewr = (column || writelen < mtd->writesize);
4094
4095 if (part_pagewr)
4096 use_bufpoi = 1;
4097 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
4098 index ef3618299494..0680516bb472 100644
4099 --- a/drivers/mtd/ubi/build.c
4100 +++ b/drivers/mtd/ubi/build.c
4101 @@ -874,7 +874,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
4102 for (i = 0; i < UBI_MAX_DEVICES; i++) {
4103 ubi = ubi_devices[i];
4104 if (ubi && mtd->index == ubi->mtd->index) {
4105 - ubi_err(ubi, "mtd%d is already attached to ubi%d",
4106 + pr_err("ubi: mtd%d is already attached to ubi%d",
4107 mtd->index, i);
4108 return -EEXIST;
4109 }
4110 @@ -889,7 +889,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
4111 * no sense to attach emulated MTD devices, so we prohibit this.
4112 */
4113 if (mtd->type == MTD_UBIVOLUME) {
4114 - ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
4115 + pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI",
4116 mtd->index);
4117 return -EINVAL;
4118 }
4119 @@ -900,7 +900,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
4120 if (!ubi_devices[ubi_num])
4121 break;
4122 if (ubi_num == UBI_MAX_DEVICES) {
4123 - ubi_err(ubi, "only %d UBI devices may be created",
4124 + pr_err("ubi: only %d UBI devices may be created",
4125 UBI_MAX_DEVICES);
4126 return -ENFILE;
4127 }
4128 @@ -910,7 +910,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
4129
4130 /* Make sure ubi_num is not busy */
4131 if (ubi_devices[ubi_num]) {
4132 - ubi_err(ubi, "already exists");
4133 + pr_err("ubi: ubi%i already exists", ubi_num);
4134 return -EEXIST;
4135 }
4136 }
4137 @@ -992,6 +992,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
4138 goto out_detach;
4139 }
4140
4141 + /* Make device "available" before it becomes accessible via sysfs */
4142 + ubi_devices[ubi_num] = ubi;
4143 +
4144 err = uif_init(ubi, &ref);
4145 if (err)
4146 goto out_detach;
4147 @@ -1036,7 +1039,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
4148 wake_up_process(ubi->bgt_thread);
4149 spin_unlock(&ubi->wl_lock);
4150
4151 - ubi_devices[ubi_num] = ubi;
4152 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
4153 return ubi_num;
4154
4155 @@ -1047,6 +1049,7 @@ out_uif:
4156 ubi_assert(ref);
4157 uif_close(ubi);
4158 out_detach:
4159 + ubi_devices[ubi_num] = NULL;
4160 ubi_wl_close(ubi);
4161 ubi_free_internal_volumes(ubi);
4162 vfree(ubi->vtbl);
4163 diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
4164 index 10059dfdc1b6..0138f526474a 100644
4165 --- a/drivers/mtd/ubi/vmt.c
4166 +++ b/drivers/mtd/ubi/vmt.c
4167 @@ -488,13 +488,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
4168 spin_unlock(&ubi->volumes_lock);
4169 }
4170
4171 - /* Change volume table record */
4172 - vtbl_rec = ubi->vtbl[vol_id];
4173 - vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
4174 - err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
4175 - if (err)
4176 - goto out_acc;
4177 -
4178 if (pebs < 0) {
4179 for (i = 0; i < -pebs; i++) {
4180 err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
4181 @@ -512,6 +505,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
4182 spin_unlock(&ubi->volumes_lock);
4183 }
4184
4185 + /*
4186 + * When we shrink a volume we have to flush all pending (erase) work.
4187 + * Otherwise it can happen that upon next attach UBI finds a LEB with
4188 + * lnum > highest_lnum and refuses to attach.
4189 + */
4190 + if (pebs < 0) {
4191 + err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
4192 + if (err)
4193 + goto out_acc;
4194 + }
4195 +
4196 + /* Change volume table record */
4197 + vtbl_rec = ubi->vtbl[vol_id];
4198 + vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
4199 + err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
4200 + if (err)
4201 + goto out_acc;
4202 +
4203 vol->reserved_pebs = reserved_pebs;
4204 if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
4205 vol->used_ebs = reserved_pebs;
4206 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
4207 index 5b30922b67ec..2ce319903cfb 100644
4208 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
4209 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
4210 @@ -2469,10 +2469,22 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
4211 void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
4212 {
4213 struct brcmf_fws_info *fws = drvr->fws;
4214 + struct brcmf_if *ifp;
4215 + int i;
4216
4217 - fws->bus_flow_blocked = flow_blocked;
4218 - if (!flow_blocked)
4219 - brcmf_fws_schedule_deq(fws);
4220 - else
4221 - fws->stats.bus_flow_block++;
4222 + if (fws->avoid_queueing) {
4223 + for (i = 0; i < BRCMF_MAX_IFS; i++) {
4224 + ifp = drvr->iflist[i];
4225 + if (!ifp || !ifp->ndev)
4226 + continue;
4227 + brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW,
4228 + flow_blocked);
4229 + }
4230 + } else {
4231 + fws->bus_flow_blocked = flow_blocked;
4232 + if (!flow_blocked)
4233 + brcmf_fws_schedule_deq(fws);
4234 + else
4235 + fws->stats.bus_flow_block++;
4236 + }
4237 }
4238 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4239 index a588b05e38eb..6f020e4ec7dc 100644
4240 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4241 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4242 @@ -433,6 +433,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4243 /* 8000 Series */
4244 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
4245 {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
4246 + {IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8260_2ac_cfg)},
4247 {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
4248 {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
4249 {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
4250 @@ -454,6 +455,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4251 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
4252 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
4253 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
4254 + {IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8260_2ac_cfg)},
4255 + {IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8260_2ac_cfg)},
4256 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
4257 {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
4258 {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
4259 @@ -481,6 +484,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4260 {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
4261 {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
4262 {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
4263 + {IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)},
4264 + {IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)},
4265 {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
4266 {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
4267 {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
4268 @@ -491,6 +496,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4269 {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
4270 {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
4271 {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
4272 + {IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8265_2ac_cfg)},
4273 + {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
4274 + {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
4275 + {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
4276
4277 /* 9000 Series */
4278 {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
4279 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
4280 index de6974f9c52f..2d8cce290a15 100644
4281 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
4282 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
4283 @@ -496,7 +496,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans);
4284 /*****************************************************
4285 * Helpers
4286 ******************************************************/
4287 -static inline void iwl_disable_interrupts(struct iwl_trans *trans)
4288 +static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
4289 {
4290 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4291
4292 @@ -519,7 +519,16 @@ static inline void iwl_disable_interrupts(struct iwl_trans *trans)
4293 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
4294 }
4295
4296 -static inline void iwl_enable_interrupts(struct iwl_trans *trans)
4297 +static inline void iwl_disable_interrupts(struct iwl_trans *trans)
4298 +{
4299 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4300 +
4301 + spin_lock(&trans_pcie->irq_lock);
4302 + _iwl_disable_interrupts(trans);
4303 + spin_unlock(&trans_pcie->irq_lock);
4304 +}
4305 +
4306 +static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
4307 {
4308 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4309
4310 @@ -542,6 +551,14 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
4311 }
4312 }
4313
4314 +static inline void iwl_enable_interrupts(struct iwl_trans *trans)
4315 +{
4316 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4317 +
4318 + spin_lock(&trans_pcie->irq_lock);
4319 + _iwl_enable_interrupts(trans);
4320 + spin_unlock(&trans_pcie->irq_lock);
4321 +}
4322 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
4323 {
4324 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4325 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
4326 index 0a4a3c502c3c..aaaf2ad6e4da 100644
4327 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
4328 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
4329 @@ -1507,7 +1507,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
4330 * have anything to service
4331 */
4332 if (test_bit(STATUS_INT_ENABLED, &trans->status))
4333 - iwl_enable_interrupts(trans);
4334 + _iwl_enable_interrupts(trans);
4335 spin_unlock(&trans_pcie->irq_lock);
4336 lock_map_release(&trans->sync_cmd_lockdep_map);
4337 return IRQ_NONE;
4338 @@ -1699,15 +1699,17 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
4339 inta & ~trans_pcie->inta_mask);
4340 }
4341
4342 + spin_lock(&trans_pcie->irq_lock);
4343 + /* only Re-enable all interrupt if disabled by irq */
4344 + if (test_bit(STATUS_INT_ENABLED, &trans->status))
4345 + _iwl_enable_interrupts(trans);
4346 /* we are loading the firmware, enable FH_TX interrupt only */
4347 - if (handled & CSR_INT_BIT_FH_TX)
4348 + else if (handled & CSR_INT_BIT_FH_TX)
4349 iwl_enable_fw_load_int(trans);
4350 - /* only Re-enable all interrupt if disabled by irq */
4351 - else if (test_bit(STATUS_INT_ENABLED, &trans->status))
4352 - iwl_enable_interrupts(trans);
4353 /* Re-enable RF_KILL if it occurred */
4354 else if (handled & CSR_INT_BIT_RF_KILL)
4355 iwl_enable_rfkill_int(trans);
4356 + spin_unlock(&trans_pcie->irq_lock);
4357
4358 out:
4359 lock_map_release(&trans->sync_cmd_lockdep_map);
4360 @@ -1771,7 +1773,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
4361 return;
4362
4363 spin_lock(&trans_pcie->irq_lock);
4364 - iwl_disable_interrupts(trans);
4365 + _iwl_disable_interrupts(trans);
4366
4367 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
4368
4369 @@ -1787,7 +1789,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
4370 trans_pcie->use_ict = true;
4371 trans_pcie->ict_index = 0;
4372 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
4373 - iwl_enable_interrupts(trans);
4374 + _iwl_enable_interrupts(trans);
4375 spin_unlock(&trans_pcie->irq_lock);
4376 }
4377
4378 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
4379 index f603d7830a6b..d9f139462b31 100644
4380 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
4381 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
4382 @@ -801,6 +801,8 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
4383
4384 *first_ucode_section = last_read_idx;
4385
4386 + iwl_enable_interrupts(trans);
4387 +
4388 if (cpu == 1)
4389 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
4390 else
4391 @@ -980,6 +982,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
4392 iwl_pcie_apply_destination(trans);
4393 }
4394
4395 + iwl_enable_interrupts(trans);
4396 +
4397 /* release CPU reset */
4398 iwl_write32(trans, CSR_RESET, 0);
4399
4400 @@ -1033,9 +1037,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
4401 was_hw_rfkill = iwl_is_rfkill_set(trans);
4402
4403 /* tell the device to stop sending interrupts */
4404 - spin_lock(&trans_pcie->irq_lock);
4405 iwl_disable_interrupts(trans);
4406 - spin_unlock(&trans_pcie->irq_lock);
4407
4408 /* device going down, Stop using ICT table */
4409 iwl_pcie_disable_ict(trans);
4410 @@ -1079,9 +1081,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
4411 * the time, unless the interrupt is ACKed even if the interrupt
4412 * should be masked. Re-ACK all the interrupts here.
4413 */
4414 - spin_lock(&trans_pcie->irq_lock);
4415 iwl_disable_interrupts(trans);
4416 - spin_unlock(&trans_pcie->irq_lock);
4417
4418 /* clear all status bits */
4419 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
4420 @@ -1215,7 +1215,6 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
4421 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
4422 else
4423 ret = iwl_pcie_load_given_ucode(trans, fw);
4424 - iwl_enable_interrupts(trans);
4425
4426 /* re-check RF-Kill state since we may have missed the interrupt */
4427 hw_rfkill = iwl_is_rfkill_set(trans);
4428 @@ -1567,15 +1566,11 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
4429 mutex_lock(&trans_pcie->mutex);
4430
4431 /* disable interrupts - don't enable HW RF kill interrupt */
4432 - spin_lock(&trans_pcie->irq_lock);
4433 iwl_disable_interrupts(trans);
4434 - spin_unlock(&trans_pcie->irq_lock);
4435
4436 iwl_pcie_apm_stop(trans, true);
4437
4438 - spin_lock(&trans_pcie->irq_lock);
4439 iwl_disable_interrupts(trans);
4440 - spin_unlock(&trans_pcie->irq_lock);
4441
4442 iwl_pcie_disable_ict(trans);
4443
4444 diff --git a/drivers/of/base.c b/drivers/of/base.c
4445 index ebf84e3b56d5..8bb3d1adf1b0 100644
4446 --- a/drivers/of/base.c
4447 +++ b/drivers/of/base.c
4448 @@ -112,6 +112,7 @@ static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
4449 return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
4450 }
4451
4452 +/* always return newly allocated name, caller must free after use */
4453 static const char *safe_name(struct kobject *kobj, const char *orig_name)
4454 {
4455 const char *name = orig_name;
4456 @@ -126,9 +127,12 @@ static const char *safe_name(struct kobject *kobj, const char *orig_name)
4457 name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
4458 }
4459
4460 - if (name != orig_name)
4461 + if (name == orig_name) {
4462 + name = kstrdup(orig_name, GFP_KERNEL);
4463 + } else {
4464 pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n",
4465 kobject_name(kobj), name);
4466 + }
4467 return name;
4468 }
4469
4470 @@ -159,6 +163,7 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp)
4471 int __of_attach_node_sysfs(struct device_node *np)
4472 {
4473 const char *name;
4474 + struct kobject *parent;
4475 struct property *pp;
4476 int rc;
4477
4478 @@ -171,15 +176,16 @@ int __of_attach_node_sysfs(struct device_node *np)
4479 np->kobj.kset = of_kset;
4480 if (!np->parent) {
4481 /* Nodes without parents are new top level trees */
4482 - rc = kobject_add(&np->kobj, NULL, "%s",
4483 - safe_name(&of_kset->kobj, "base"));
4484 + name = safe_name(&of_kset->kobj, "base");
4485 + parent = NULL;
4486 } else {
4487 name = safe_name(&np->parent->kobj, kbasename(np->full_name));
4488 - if (!name || !name[0])
4489 - return -EINVAL;
4490 -
4491 - rc = kobject_add(&np->kobj, &np->parent->kobj, "%s", name);
4492 + parent = &np->parent->kobj;
4493 }
4494 + if (!name)
4495 + return -ENOMEM;
4496 + rc = kobject_add(&np->kobj, parent, "%s", name);
4497 + kfree(name);
4498 if (rc)
4499 return rc;
4500
4501 @@ -1815,6 +1821,12 @@ int __of_remove_property(struct device_node *np, struct property *prop)
4502 return 0;
4503 }
4504
4505 +void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
4506 +{
4507 + sysfs_remove_bin_file(&np->kobj, &prop->attr);
4508 + kfree(prop->attr.attr.name);
4509 +}
4510 +
4511 void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
4512 {
4513 if (!IS_ENABLED(CONFIG_SYSFS))
4514 @@ -1822,7 +1834,7 @@ void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
4515
4516 /* at early boot, bail here and defer setup to of_init() */
4517 if (of_kset && of_node_is_attached(np))
4518 - sysfs_remove_bin_file(&np->kobj, &prop->attr);
4519 + __of_sysfs_remove_bin_file(np, prop);
4520 }
4521
4522 /**
4523 @@ -1895,7 +1907,7 @@ void __of_update_property_sysfs(struct device_node *np, struct property *newprop
4524 return;
4525
4526 if (oldprop)
4527 - sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
4528 + __of_sysfs_remove_bin_file(np, oldprop);
4529 __of_add_property_sysfs(np, newprop);
4530 }
4531
4532 diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
4533 index 3033fa3250dc..a2015599ed7e 100644
4534 --- a/drivers/of/dynamic.c
4535 +++ b/drivers/of/dynamic.c
4536 @@ -55,7 +55,7 @@ void __of_detach_node_sysfs(struct device_node *np)
4537 /* only remove properties if on sysfs */
4538 if (of_node_is_attached(np)) {
4539 for_each_property_of_node(np, pp)
4540 - sysfs_remove_bin_file(&np->kobj, &pp->attr);
4541 + __of_sysfs_remove_bin_file(np, pp);
4542 kobject_del(&np->kobj);
4543 }
4544
4545 diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
4546 index 829469faeb23..18bbb4517e25 100644
4547 --- a/drivers/of/of_private.h
4548 +++ b/drivers/of/of_private.h
4549 @@ -83,6 +83,9 @@ extern int __of_attach_node_sysfs(struct device_node *np);
4550 extern void __of_detach_node(struct device_node *np);
4551 extern void __of_detach_node_sysfs(struct device_node *np);
4552
4553 +extern void __of_sysfs_remove_bin_file(struct device_node *np,
4554 + struct property *prop);
4555 +
4556 /* iterators for transactions, used for overlays */
4557 /* forward iterator */
4558 #define for_each_transaction_entry(_oft, _te) \
4559 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
4560 index ee72ebe18f4b..7902fbf47f89 100644
4561 --- a/drivers/pci/quirks.c
4562 +++ b/drivers/pci/quirks.c
4563 @@ -3189,13 +3189,15 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
4564 }
4565
4566 /*
4567 - * Atheros AR93xx chips do not behave after a bus reset. The device will
4568 - * throw a Link Down error on AER-capable systems and regardless of AER,
4569 - * config space of the device is never accessible again and typically
4570 - * causes the system to hang or reset when access is attempted.
4571 + * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
4572 + * The device will throw a Link Down error on AER-capable systems and
4573 + * regardless of AER, config space of the device is never accessible again
4574 + * and typically causes the system to hang or reset when access is attempted.
4575 * http://www.spinics.net/lists/linux-pci/msg34797.html
4576 */
4577 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
4578 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
4579 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
4580
4581 static void quirk_no_pm_reset(struct pci_dev *dev)
4582 {
4583 diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c
4584 index 4be3f5dbbc9f..31156c9c4707 100644
4585 --- a/drivers/phy/phy-rcar-gen3-usb2.c
4586 +++ b/drivers/phy/phy-rcar-gen3-usb2.c
4587 @@ -21,6 +21,7 @@
4588 #include <linux/phy/phy.h>
4589 #include <linux/platform_device.h>
4590 #include <linux/regulator/consumer.h>
4591 +#include <linux/workqueue.h>
4592
4593 /******* USB2.0 Host registers (original offset is +0x200) *******/
4594 #define USB2_INT_ENABLE 0x000
4595 @@ -81,9 +82,25 @@ struct rcar_gen3_chan {
4596 struct extcon_dev *extcon;
4597 struct phy *phy;
4598 struct regulator *vbus;
4599 + struct work_struct work;
4600 + bool extcon_host;
4601 bool has_otg;
4602 };
4603
4604 +static void rcar_gen3_phy_usb2_work(struct work_struct *work)
4605 +{
4606 + struct rcar_gen3_chan *ch = container_of(work, struct rcar_gen3_chan,
4607 + work);
4608 +
4609 + if (ch->extcon_host) {
4610 + extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, true);
4611 + extcon_set_cable_state_(ch->extcon, EXTCON_USB, false);
4612 + } else {
4613 + extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, false);
4614 + extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
4615 + }
4616 +}
4617 +
4618 static void rcar_gen3_set_host_mode(struct rcar_gen3_chan *ch, int host)
4619 {
4620 void __iomem *usb2_base = ch->base;
4621 @@ -130,8 +147,8 @@ static void rcar_gen3_init_for_host(struct rcar_gen3_chan *ch)
4622 rcar_gen3_set_host_mode(ch, 1);
4623 rcar_gen3_enable_vbus_ctrl(ch, 1);
4624
4625 - extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, true);
4626 - extcon_set_cable_state_(ch->extcon, EXTCON_USB, false);
4627 + ch->extcon_host = true;
4628 + schedule_work(&ch->work);
4629 }
4630
4631 static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
4632 @@ -140,8 +157,8 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
4633 rcar_gen3_set_host_mode(ch, 0);
4634 rcar_gen3_enable_vbus_ctrl(ch, 0);
4635
4636 - extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, false);
4637 - extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
4638 + ch->extcon_host = false;
4639 + schedule_work(&ch->work);
4640 }
4641
4642 static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
4643 @@ -301,6 +318,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
4644 if (irq >= 0) {
4645 int ret;
4646
4647 + INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
4648 irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
4649 IRQF_SHARED, dev_name(dev), channel);
4650 if (irq < 0)
4651 diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
4652 index ac4f564f1c3e..bf65c948b31d 100644
4653 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c
4654 +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
4655 @@ -160,7 +160,6 @@ struct chv_pin_context {
4656 * @pctldev: Pointer to the pin controller device
4657 * @chip: GPIO chip in this pin controller
4658 * @regs: MMIO registers
4659 - * @lock: Lock to serialize register accesses
4660 * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
4661 * offset (in GPIO number space)
4662 * @community: Community this pinctrl instance represents
4663 @@ -174,7 +173,6 @@ struct chv_pinctrl {
4664 struct pinctrl_dev *pctldev;
4665 struct gpio_chip chip;
4666 void __iomem *regs;
4667 - raw_spinlock_t lock;
4668 unsigned intr_lines[16];
4669 const struct chv_community *community;
4670 u32 saved_intmask;
4671 @@ -657,6 +655,17 @@ static const struct chv_community *chv_communities[] = {
4672 &southeast_community,
4673 };
4674
4675 +/*
4676 + * Lock to serialize register accesses
4677 + *
4678 + * Due to a silicon issue, a shared lock must be used to prevent
4679 + * concurrent accesses across the 4 GPIO controllers.
4680 + *
4681 + * See Intel Atom Z8000 Processor Series Specification Update (Rev. 005),
4682 + * errata #CHT34, for further information.
4683 + */
4684 +static DEFINE_RAW_SPINLOCK(chv_lock);
4685 +
4686 static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned offset,
4687 unsigned reg)
4688 {
4689 @@ -718,13 +727,13 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
4690 u32 ctrl0, ctrl1;
4691 bool locked;
4692
4693 - raw_spin_lock_irqsave(&pctrl->lock, flags);
4694 + raw_spin_lock_irqsave(&chv_lock, flags);
4695
4696 ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
4697 ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
4698 locked = chv_pad_locked(pctrl, offset);
4699
4700 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4701 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4702
4703 if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
4704 seq_puts(s, "GPIO ");
4705 @@ -787,14 +796,14 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
4706
4707 grp = &pctrl->community->groups[group];
4708
4709 - raw_spin_lock_irqsave(&pctrl->lock, flags);
4710 + raw_spin_lock_irqsave(&chv_lock, flags);
4711
4712 /* Check first that the pad is not locked */
4713 for (i = 0; i < grp->npins; i++) {
4714 if (chv_pad_locked(pctrl, grp->pins[i])) {
4715 dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
4716 grp->pins[i]);
4717 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4718 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4719 return -EBUSY;
4720 }
4721 }
4722 @@ -837,7 +846,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
4723 pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
4724 }
4725
4726 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4727 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4728
4729 return 0;
4730 }
4731 @@ -851,13 +860,13 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
4732 void __iomem *reg;
4733 u32 value;
4734
4735 - raw_spin_lock_irqsave(&pctrl->lock, flags);
4736 + raw_spin_lock_irqsave(&chv_lock, flags);
4737
4738 if (chv_pad_locked(pctrl, offset)) {
4739 value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
4740 if (!(value & CHV_PADCTRL0_GPIOEN)) {
4741 /* Locked so cannot enable */
4742 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4743 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4744 return -EBUSY;
4745 }
4746 } else {
4747 @@ -897,7 +906,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
4748 chv_writel(value, reg);
4749 }
4750
4751 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4752 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4753
4754 return 0;
4755 }
4756 @@ -911,13 +920,13 @@ static void chv_gpio_disable_free(struct pinctrl_dev *pctldev,
4757 void __iomem *reg;
4758 u32 value;
4759
4760 - raw_spin_lock_irqsave(&pctrl->lock, flags);
4761 + raw_spin_lock_irqsave(&chv_lock, flags);
4762
4763 reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
4764 value = readl(reg) & ~CHV_PADCTRL0_GPIOEN;
4765 chv_writel(value, reg);
4766
4767 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4768 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4769 }
4770
4771 static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
4772 @@ -929,7 +938,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
4773 unsigned long flags;
4774 u32 ctrl0;
4775
4776 - raw_spin_lock_irqsave(&pctrl->lock, flags);
4777 + raw_spin_lock_irqsave(&chv_lock, flags);
4778
4779 ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
4780 if (input)
4781 @@ -938,7 +947,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
4782 ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
4783 chv_writel(ctrl0, reg);
4784
4785 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4786 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4787
4788 return 0;
4789 }
4790 @@ -963,10 +972,10 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin,
4791 u16 arg = 0;
4792 u32 term;
4793
4794 - raw_spin_lock_irqsave(&pctrl->lock, flags);
4795 + raw_spin_lock_irqsave(&chv_lock, flags);
4796 ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
4797 ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
4798 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4799 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4800
4801 term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
4802
4803 @@ -1040,7 +1049,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
4804 unsigned long flags;
4805 u32 ctrl0, pull;
4806
4807 - raw_spin_lock_irqsave(&pctrl->lock, flags);
4808 + raw_spin_lock_irqsave(&chv_lock, flags);
4809 ctrl0 = readl(reg);
4810
4811 switch (param) {
4812 @@ -1063,7 +1072,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
4813 pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
4814 break;
4815 default:
4816 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4817 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4818 return -EINVAL;
4819 }
4820
4821 @@ -1081,7 +1090,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
4822 pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
4823 break;
4824 default:
4825 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4826 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4827 return -EINVAL;
4828 }
4829
4830 @@ -1089,12 +1098,12 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
4831 break;
4832
4833 default:
4834 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4835 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4836 return -EINVAL;
4837 }
4838
4839 chv_writel(ctrl0, reg);
4840 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4841 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4842
4843 return 0;
4844 }
4845 @@ -1160,9 +1169,9 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned offset)
4846 unsigned long flags;
4847 u32 ctrl0, cfg;
4848
4849 - raw_spin_lock_irqsave(&pctrl->lock, flags);
4850 + raw_spin_lock_irqsave(&chv_lock, flags);
4851 ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
4852 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4853 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4854
4855 cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
4856 cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
4857 @@ -1180,7 +1189,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
4858 void __iomem *reg;
4859 u32 ctrl0;
4860
4861 - raw_spin_lock_irqsave(&pctrl->lock, flags);
4862 + raw_spin_lock_irqsave(&chv_lock, flags);
4863
4864 reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
4865 ctrl0 = readl(reg);
4866 @@ -1192,7 +1201,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
4867
4868 chv_writel(ctrl0, reg);
4869
4870 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4871 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4872 }
4873
4874 static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
4875 @@ -1202,9 +1211,9 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
4876 u32 ctrl0, direction;
4877 unsigned long flags;
4878
4879 - raw_spin_lock_irqsave(&pctrl->lock, flags);
4880 + raw_spin_lock_irqsave(&chv_lock, flags);
4881 ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
4882 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4883 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4884
4885 direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
4886 direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
4887 @@ -1242,14 +1251,14 @@ static void chv_gpio_irq_ack(struct irq_data *d)
4888 int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
4889 u32 intr_line;
4890
4891 - raw_spin_lock(&pctrl->lock);
4892 + raw_spin_lock(&chv_lock);
4893
4894 intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
4895 intr_line &= CHV_PADCTRL0_INTSEL_MASK;
4896 intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
4897 chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
4898
4899 - raw_spin_unlock(&pctrl->lock);
4900 + raw_spin_unlock(&chv_lock);
4901 }
4902
4903 static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
4904 @@ -1260,7 +1269,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
4905 u32 value, intr_line;
4906 unsigned long flags;
4907
4908 - raw_spin_lock_irqsave(&pctrl->lock, flags);
4909 + raw_spin_lock_irqsave(&chv_lock, flags);
4910
4911 intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
4912 intr_line &= CHV_PADCTRL0_INTSEL_MASK;
4913 @@ -1273,7 +1282,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
4914 value |= BIT(intr_line);
4915 chv_writel(value, pctrl->regs + CHV_INTMASK);
4916
4917 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4918 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4919 }
4920
4921 static void chv_gpio_irq_mask(struct irq_data *d)
4922 @@ -1307,7 +1316,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
4923 unsigned long flags;
4924 u32 intsel, value;
4925
4926 - raw_spin_lock_irqsave(&pctrl->lock, flags);
4927 + raw_spin_lock_irqsave(&chv_lock, flags);
4928 intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
4929 intsel &= CHV_PADCTRL0_INTSEL_MASK;
4930 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
4931 @@ -1322,7 +1331,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
4932 irq_set_handler_locked(d, handler);
4933 pctrl->intr_lines[intsel] = offset;
4934 }
4935 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4936 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4937 }
4938
4939 chv_gpio_irq_unmask(d);
4940 @@ -1338,7 +1347,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
4941 unsigned long flags;
4942 u32 value;
4943
4944 - raw_spin_lock_irqsave(&pctrl->lock, flags);
4945 + raw_spin_lock_irqsave(&chv_lock, flags);
4946
4947 /*
4948 * Pins which can be used as shared interrupt are configured in
4949 @@ -1387,7 +1396,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
4950 else if (type & IRQ_TYPE_LEVEL_MASK)
4951 irq_set_handler_locked(d, handle_level_irq);
4952
4953 - raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4954 + raw_spin_unlock_irqrestore(&chv_lock, flags);
4955
4956 return 0;
4957 }
4958 @@ -1499,7 +1508,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
4959 if (i == ARRAY_SIZE(chv_communities))
4960 return -ENODEV;
4961
4962 - raw_spin_lock_init(&pctrl->lock);
4963 pctrl->dev = &pdev->dev;
4964
4965 #ifdef CONFIG_PM_SLEEP
4966 diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
4967 index 6f145f2d004d..96ffda493266 100644
4968 --- a/drivers/platform/x86/hp-wmi.c
4969 +++ b/drivers/platform/x86/hp-wmi.c
4970 @@ -718,6 +718,11 @@ static int __init hp_wmi_rfkill_setup(struct platform_device *device)
4971 if (err)
4972 return err;
4973
4974 + err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &wireless,
4975 + sizeof(wireless), 0);
4976 + if (err)
4977 + return err;
4978 +
4979 if (wireless & 0x1) {
4980 wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
4981 RFKILL_TYPE_WLAN,
4982 @@ -882,7 +887,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
4983 wwan_rfkill = NULL;
4984 rfkill2_count = 0;
4985
4986 - if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device))
4987 + if (hp_wmi_rfkill_setup(device))
4988 hp_wmi_rfkill2_setup(device);
4989
4990 err = device_create_file(&device->dev, &dev_attr_display);
4991 diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
4992 index 02fb6b4ea820..d838e77dd947 100644
4993 --- a/drivers/regulator/s2mps11.c
4994 +++ b/drivers/regulator/s2mps11.c
4995 @@ -750,7 +750,7 @@ static const struct regulator_linear_range s2mps15_ldo_voltage_ranges3[] = {
4996
4997 /* voltage range for s2mps15 LDO 7, 8, 9 and 10 */
4998 static const struct regulator_linear_range s2mps15_ldo_voltage_ranges4[] = {
4999 - REGULATOR_LINEAR_RANGE(700000, 0xc, 0x18, 25000),
5000 + REGULATOR_LINEAR_RANGE(700000, 0x10, 0x20, 25000),
5001 };
5002
5003 /* voltage range for s2mps15 LDO 1 */
5004 @@ -760,12 +760,12 @@ static const struct regulator_linear_range s2mps15_ldo_voltage_ranges5[] = {
5005
5006 /* voltage range for s2mps15 BUCK 1, 2, 3, 4, 5, 6 and 7 */
5007 static const struct regulator_linear_range s2mps15_buck_voltage_ranges1[] = {
5008 - REGULATOR_LINEAR_RANGE(500000, 0x20, 0xb0, 6250),
5009 + REGULATOR_LINEAR_RANGE(500000, 0x20, 0xc0, 6250),
5010 };
5011
5012 /* voltage range for s2mps15 BUCK 8, 9 and 10 */
5013 static const struct regulator_linear_range s2mps15_buck_voltage_ranges2[] = {
5014 - REGULATOR_LINEAR_RANGE(1000000, 0x20, 0xc0, 12500),
5015 + REGULATOR_LINEAR_RANGE(1000000, 0x20, 0x78, 12500),
5016 };
5017
5018 static const struct regulator_desc s2mps15_regulators[] = {
5019 diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
5020 index db3958b3f094..fe0539ed9cb5 100644
5021 --- a/drivers/remoteproc/remoteproc_core.c
5022 +++ b/drivers/remoteproc/remoteproc_core.c
5023 @@ -1264,11 +1264,6 @@ int rproc_add(struct rproc *rproc)
5024 if (ret < 0)
5025 return ret;
5026
5027 - /* expose to rproc_get_by_phandle users */
5028 - mutex_lock(&rproc_list_mutex);
5029 - list_add(&rproc->node, &rproc_list);
5030 - mutex_unlock(&rproc_list_mutex);
5031 -
5032 dev_info(dev, "%s is available\n", rproc->name);
5033
5034 dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
5035 @@ -1276,8 +1271,16 @@ int rproc_add(struct rproc *rproc)
5036
5037 /* create debugfs entries */
5038 rproc_create_debug_dir(rproc);
5039 + ret = rproc_add_virtio_devices(rproc);
5040 + if (ret < 0)
5041 + return ret;
5042
5043 - return rproc_add_virtio_devices(rproc);
5044 + /* expose to rproc_get_by_phandle users */
5045 + mutex_lock(&rproc_list_mutex);
5046 + list_add(&rproc->node, &rproc_list);
5047 + mutex_unlock(&rproc_list_mutex);
5048 +
5049 + return 0;
5050 }
5051 EXPORT_SYMBOL(rproc_add);
5052
5053 diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
5054 index d01ad7e8078e..4e823c4b7a4d 100644
5055 --- a/drivers/rtc/rtc-s3c.c
5056 +++ b/drivers/rtc/rtc-s3c.c
5057 @@ -149,12 +149,14 @@ static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq)
5058 if (!is_power_of_2(freq))
5059 return -EINVAL;
5060
5061 + s3c_rtc_enable_clk(info);
5062 spin_lock_irq(&info->pie_lock);
5063
5064 if (info->data->set_freq)
5065 info->data->set_freq(info, freq);
5066
5067 spin_unlock_irq(&info->pie_lock);
5068 + s3c_rtc_disable_clk(info);
5069
5070 return 0;
5071 }
5072 diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
5073 index b2afad5a5682..2a34eb5f6161 100644
5074 --- a/drivers/s390/cio/cmf.c
5075 +++ b/drivers/s390/cio/cmf.c
5076 @@ -753,6 +753,17 @@ static void reset_cmb(struct ccw_device *cdev)
5077 cmf_generic_reset(cdev);
5078 }
5079
5080 +static int cmf_enabled(struct ccw_device *cdev)
5081 +{
5082 + int enabled;
5083 +
5084 + spin_lock_irq(cdev->ccwlock);
5085 + enabled = !!cdev->private->cmb;
5086 + spin_unlock_irq(cdev->ccwlock);
5087 +
5088 + return enabled;
5089 +}
5090 +
5091 static struct attribute_group cmf_attr_group;
5092
5093 static struct cmb_operations cmbops_basic = {
5094 @@ -1153,13 +1164,8 @@ static ssize_t cmb_enable_show(struct device *dev,
5095 char *buf)
5096 {
5097 struct ccw_device *cdev = to_ccwdev(dev);
5098 - int enabled;
5099
5100 - spin_lock_irq(cdev->ccwlock);
5101 - enabled = !!cdev->private->cmb;
5102 - spin_unlock_irq(cdev->ccwlock);
5103 -
5104 - return sprintf(buf, "%d\n", enabled);
5105 + return sprintf(buf, "%d\n", cmf_enabled(cdev));
5106 }
5107
5108 static ssize_t cmb_enable_store(struct device *dev,
5109 @@ -1199,15 +1205,20 @@ int ccw_set_cmf(struct ccw_device *cdev, int enable)
5110 * @cdev: The ccw device to be enabled
5111 *
5112 * Returns %0 for success or a negative error value.
5113 - *
5114 + * Note: If this is called on a device for which channel measurement is already
5115 + * enabled a reset of the measurement data is triggered.
5116 * Context:
5117 * non-atomic
5118 */
5119 int enable_cmf(struct ccw_device *cdev)
5120 {
5121 - int ret;
5122 + int ret = 0;
5123
5124 device_lock(&cdev->dev);
5125 + if (cmf_enabled(cdev)) {
5126 + cmbops->reset(cdev);
5127 + goto out_unlock;
5128 + }
5129 get_device(&cdev->dev);
5130 ret = cmbops->alloc(cdev);
5131 if (ret)
5132 @@ -1226,7 +1237,7 @@ int enable_cmf(struct ccw_device *cdev)
5133 out:
5134 if (ret)
5135 put_device(&cdev->dev);
5136 -
5137 +out_unlock:
5138 device_unlock(&cdev->dev);
5139 return ret;
5140 }
5141 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
5142 index 3bd0be6277b3..c7e5695da4f5 100644
5143 --- a/drivers/scsi/lpfc/lpfc_scsi.c
5144 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
5145 @@ -3874,7 +3874,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
5146 uint32_t tag;
5147 uint16_t hwq;
5148
5149 - if (shost_use_blk_mq(cmnd->device->host)) {
5150 + if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
5151 tag = blk_mq_unique_tag(cmnd->request);
5152 hwq = blk_mq_unique_tag_to_hwq(tag);
5153
5154 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
5155 index f4b0690450d2..2dab3dc2aa69 100644
5156 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
5157 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
5158 @@ -4079,6 +4079,12 @@ megasas_get_pd_list(struct megasas_instance *instance)
5159 struct MR_PD_ADDRESS *pd_addr;
5160 dma_addr_t ci_h = 0;
5161
5162 + if (instance->pd_list_not_supported) {
5163 + dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
5164 + "not supported by firmware\n");
5165 + return ret;
5166 + }
5167 +
5168 cmd = megasas_get_cmd(instance);
5169
5170 if (!cmd) {
5171 diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
5172 index f1eed7f9dd67..9c2788b8f2c3 100644
5173 --- a/drivers/soc/qcom/smp2p.c
5174 +++ b/drivers/soc/qcom/smp2p.c
5175 @@ -344,11 +344,12 @@ static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
5176 /* Allocate an entry from the smem item */
5177 strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
5178 memcpy_toio(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
5179 - out->valid_entries++;
5180
5181 /* Make the logical entry reference the physical value */
5182 entry->value = &out->entries[out->valid_entries].value;
5183
5184 + out->valid_entries++;
5185 +
5186 entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
5187 if (IS_ERR(entry->state)) {
5188 dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
5189 diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
5190 index fe07c0592b44..daf28443b7ad 100644
5191 --- a/drivers/spi/spi-pxa2xx.c
5192 +++ b/drivers/spi/spi-pxa2xx.c
5193 @@ -585,7 +585,14 @@ static void reset_sccr1(struct driver_data *drv_data)
5194 u32 sccr1_reg;
5195
5196 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
5197 - sccr1_reg &= ~SSCR1_RFT;
5198 + switch (drv_data->ssp_type) {
5199 + case QUARK_X1000_SSP:
5200 + sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
5201 + break;
5202 + default:
5203 + sccr1_reg &= ~SSCR1_RFT;
5204 + break;
5205 + }
5206 sccr1_reg |= chip->threshold;
5207 pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
5208 }
5209 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
5210 index 50f3d3a0dd7b..39b928c2849d 100644
5211 --- a/drivers/target/iscsi/iscsi_target.c
5212 +++ b/drivers/target/iscsi/iscsi_target.c
5213 @@ -492,7 +492,8 @@ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
5214 bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
5215
5216 spin_lock_bh(&conn->cmd_lock);
5217 - if (!list_empty(&cmd->i_conn_node))
5218 + if (!list_empty(&cmd->i_conn_node) &&
5219 + !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
5220 list_del_init(&cmd->i_conn_node);
5221 spin_unlock_bh(&conn->cmd_lock);
5222
5223 @@ -4034,6 +4035,7 @@ int iscsi_target_rx_thread(void *arg)
5224
5225 static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
5226 {
5227 + LIST_HEAD(tmp_list);
5228 struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
5229 struct iscsi_session *sess = conn->sess;
5230 /*
5231 @@ -4042,18 +4044,26 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
5232 * has been reset -> returned sleeping pre-handler state.
5233 */
5234 spin_lock_bh(&conn->cmd_lock);
5235 - list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
5236 + list_splice_init(&conn->conn_cmd_list, &tmp_list);
5237
5238 + list_for_each_entry(cmd, &tmp_list, i_conn_node) {
5239 + struct se_cmd *se_cmd = &cmd->se_cmd;
5240 +
5241 + if (se_cmd->se_tfo != NULL) {
5242 + spin_lock(&se_cmd->t_state_lock);
5243 + se_cmd->transport_state |= CMD_T_FABRIC_STOP;
5244 + spin_unlock(&se_cmd->t_state_lock);
5245 + }
5246 + }
5247 + spin_unlock_bh(&conn->cmd_lock);
5248 +
5249 + list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
5250 list_del_init(&cmd->i_conn_node);
5251 - spin_unlock_bh(&conn->cmd_lock);
5252
5253 iscsit_increment_maxcmdsn(cmd, sess);
5254 -
5255 iscsit_free_cmd(cmd, true);
5256
5257 - spin_lock_bh(&conn->cmd_lock);
5258 }
5259 - spin_unlock_bh(&conn->cmd_lock);
5260 }
5261
5262 static void iscsit_stop_timers_for_cmds(
5263 diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
5264 index b5212f0f9571..adf419fa4291 100644
5265 --- a/drivers/target/iscsi/iscsi_target_login.c
5266 +++ b/drivers/target/iscsi/iscsi_target_login.c
5267 @@ -1371,8 +1371,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
5268 }
5269 login->zero_tsih = zero_tsih;
5270
5271 - conn->sess->se_sess->sup_prot_ops =
5272 - conn->conn_transport->iscsit_get_sup_prot_ops(conn);
5273 + if (conn->sess)
5274 + conn->sess->se_sess->sup_prot_ops =
5275 + conn->conn_transport->iscsit_get_sup_prot_ops(conn);
5276
5277 tpg = conn->tpg;
5278 if (!tpg) {
5279 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
5280 index a4046ca6e60d..6b423485c5d6 100644
5281 --- a/drivers/target/target_core_device.c
5282 +++ b/drivers/target/target_core_device.c
5283 @@ -821,13 +821,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
5284 * in ATA and we need to set TPE=1
5285 */
5286 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
5287 - struct request_queue *q, int block_size)
5288 + struct request_queue *q)
5289 {
5290 + int block_size = queue_logical_block_size(q);
5291 +
5292 if (!blk_queue_discard(q))
5293 return false;
5294
5295 - attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
5296 - block_size;
5297 + attrib->max_unmap_lba_count =
5298 + q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
5299 /*
5300 * Currently hardcoded to 1 in Linux/SCSI code..
5301 */
5302 diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
5303 index 75f0f08b2a34..79291869bce6 100644
5304 --- a/drivers/target/target_core_file.c
5305 +++ b/drivers/target/target_core_file.c
5306 @@ -161,8 +161,7 @@ static int fd_configure_device(struct se_device *dev)
5307 dev_size, div_u64(dev_size, fd_dev->fd_block_size),
5308 fd_dev->fd_block_size);
5309
5310 - if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
5311 - fd_dev->fd_block_size))
5312 + if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
5313 pr_debug("IFILE: BLOCK Discard support available,"
5314 " disabled by default\n");
5315 /*
5316 diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
5317 index 7c4efb4417b0..2077bc28640a 100644
5318 --- a/drivers/target/target_core_iblock.c
5319 +++ b/drivers/target/target_core_iblock.c
5320 @@ -121,8 +121,7 @@ static int iblock_configure_device(struct se_device *dev)
5321 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
5322 dev->dev_attrib.hw_queue_depth = q->nr_requests;
5323
5324 - if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
5325 - dev->dev_attrib.hw_block_size))
5326 + if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
5327 pr_debug("IBLOCK: BLOCK Discard support available,"
5328 " disabled by default\n");
5329
5330 diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
5331 index fc91e85f54ba..e2c970a9d61c 100644
5332 --- a/drivers/target/target_core_internal.h
5333 +++ b/drivers/target/target_core_internal.h
5334 @@ -146,6 +146,7 @@ sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
5335 void target_qf_do_work(struct work_struct *work);
5336 bool target_check_wce(struct se_device *dev);
5337 bool target_check_fua(struct se_device *dev);
5338 +void __target_execute_cmd(struct se_cmd *, bool);
5339
5340 /* target_core_stat.c */
5341 void target_stat_setup_dev_default_groups(struct se_device *);
5342 diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
5343 index a9057aa07176..04f616b3ba0a 100644
5344 --- a/drivers/target/target_core_sbc.c
5345 +++ b/drivers/target/target_core_sbc.c
5346 @@ -602,7 +602,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
5347 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
5348 spin_unlock_irq(&cmd->t_state_lock);
5349
5350 - __target_execute_cmd(cmd);
5351 + __target_execute_cmd(cmd, false);
5352
5353 kfree(buf);
5354 return ret;
5355 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
5356 index 5ab3967dda43..42c2a44b83dd 100644
5357 --- a/drivers/target/target_core_transport.c
5358 +++ b/drivers/target/target_core_transport.c
5359 @@ -1303,23 +1303,6 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
5360
5361 trace_target_sequencer_start(cmd);
5362
5363 - /*
5364 - * Check for an existing UNIT ATTENTION condition
5365 - */
5366 - ret = target_scsi3_ua_check(cmd);
5367 - if (ret)
5368 - return ret;
5369 -
5370 - ret = target_alua_state_check(cmd);
5371 - if (ret)
5372 - return ret;
5373 -
5374 - ret = target_check_reservation(cmd);
5375 - if (ret) {
5376 - cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
5377 - return ret;
5378 - }
5379 -
5380 ret = dev->transport->parse_cdb(cmd);
5381 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
5382 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
5383 @@ -1761,20 +1744,45 @@ queue_full:
5384 }
5385 EXPORT_SYMBOL(transport_generic_request_failure);
5386
5387 -void __target_execute_cmd(struct se_cmd *cmd)
5388 +void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
5389 {
5390 sense_reason_t ret;
5391
5392 - if (cmd->execute_cmd) {
5393 - ret = cmd->execute_cmd(cmd);
5394 - if (ret) {
5395 - spin_lock_irq(&cmd->t_state_lock);
5396 - cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
5397 - spin_unlock_irq(&cmd->t_state_lock);
5398 + if (!cmd->execute_cmd) {
5399 + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
5400 + goto err;
5401 + }
5402 + if (do_checks) {
5403 + /*
5404 + * Check for an existing UNIT ATTENTION condition after
5405 + * target_handle_task_attr() has done SAM task attr
5406 + * checking, and possibly have already defered execution
5407 + * out to target_restart_delayed_cmds() context.
5408 + */
5409 + ret = target_scsi3_ua_check(cmd);
5410 + if (ret)
5411 + goto err;
5412 +
5413 + ret = target_alua_state_check(cmd);
5414 + if (ret)
5415 + goto err;
5416
5417 - transport_generic_request_failure(cmd, ret);
5418 + ret = target_check_reservation(cmd);
5419 + if (ret) {
5420 + cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
5421 + goto err;
5422 }
5423 }
5424 +
5425 + ret = cmd->execute_cmd(cmd);
5426 + if (!ret)
5427 + return;
5428 +err:
5429 + spin_lock_irq(&cmd->t_state_lock);
5430 + cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
5431 + spin_unlock_irq(&cmd->t_state_lock);
5432 +
5433 + transport_generic_request_failure(cmd, ret);
5434 }
5435
5436 static int target_write_prot_action(struct se_cmd *cmd)
5437 @@ -1819,6 +1827,8 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
5438 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
5439 return false;
5440
5441 + cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
5442 +
5443 /*
5444 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
5445 * to allow the passed struct se_cmd list of tasks to the front of the list.
5446 @@ -1899,7 +1909,7 @@ void target_execute_cmd(struct se_cmd *cmd)
5447 return;
5448 }
5449
5450 - __target_execute_cmd(cmd);
5451 + __target_execute_cmd(cmd, true);
5452 }
5453 EXPORT_SYMBOL(target_execute_cmd);
5454
5455 @@ -1923,7 +1933,7 @@ static void target_restart_delayed_cmds(struct se_device *dev)
5456 list_del(&cmd->se_delayed_node);
5457 spin_unlock(&dev->delayed_cmd_lock);
5458
5459 - __target_execute_cmd(cmd);
5460 + __target_execute_cmd(cmd, true);
5461
5462 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
5463 break;
5464 @@ -1941,6 +1951,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
5465 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
5466 return;
5467
5468 + if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
5469 + goto restart;
5470 +
5471 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
5472 atomic_dec_mb(&dev->simple_cmds);
5473 dev->dev_cur_ordered_id++;
5474 @@ -1957,7 +1970,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
5475 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
5476 dev->dev_cur_ordered_id);
5477 }
5478 -
5479 +restart:
5480 target_restart_delayed_cmds(dev);
5481 }
5482
5483 @@ -2557,15 +2570,10 @@ static void target_release_cmd_kref(struct kref *kref)
5484 bool fabric_stop;
5485
5486 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
5487 - if (list_empty(&se_cmd->se_cmd_list)) {
5488 - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
5489 - target_free_cmd_mem(se_cmd);
5490 - se_cmd->se_tfo->release_cmd(se_cmd);
5491 - return;
5492 - }
5493
5494 spin_lock(&se_cmd->t_state_lock);
5495 - fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
5496 + fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
5497 + (se_cmd->transport_state & CMD_T_ABORTED);
5498 spin_unlock(&se_cmd->t_state_lock);
5499
5500 if (se_cmd->cmd_wait_set || fabric_stop) {
5501 diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
5502 index 954941dd8124..f9c798cba83f 100644
5503 --- a/drivers/tty/serial/atmel_serial.c
5504 +++ b/drivers/tty/serial/atmel_serial.c
5505 @@ -482,19 +482,21 @@ static void atmel_start_tx(struct uart_port *port)
5506 {
5507 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
5508
5509 - if (atmel_use_pdc_tx(port)) {
5510 - if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)
5511 - /* The transmitter is already running. Yes, we
5512 - really need this.*/
5513 - return;
5514 + if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
5515 + & ATMEL_PDC_TXTEN))
5516 + /* The transmitter is already running. Yes, we
5517 + really need this.*/
5518 + return;
5519
5520 + if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
5521 if ((port->rs485.flags & SER_RS485_ENABLED) &&
5522 !(port->rs485.flags & SER_RS485_RX_DURING_TX))
5523 atmel_stop_rx(port);
5524
5525 + if (atmel_use_pdc_tx(port))
5526 /* re-enable PDC transmit */
5527 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
5528 - }
5529 +
5530 /* Enable interrupts */
5531 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
5532 }
5533 diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
5534 index b7d80bd57db9..7d62610d9de5 100644
5535 --- a/drivers/tty/serial/msm_serial.c
5536 +++ b/drivers/tty/serial/msm_serial.c
5537 @@ -726,7 +726,7 @@ static void msm_handle_tx(struct uart_port *port)
5538 return;
5539 }
5540
5541 - pio_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
5542 + pio_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
5543 dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
5544
5545 dma_min = 1; /* Always DMA */
5546 diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
5547 index 99bb23161dd6..f0bd2ec0db59 100644
5548 --- a/drivers/tty/serial/samsung.c
5549 +++ b/drivers/tty/serial/samsung.c
5550 @@ -1684,7 +1684,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
5551 return -ENODEV;
5552
5553 if (port->mapbase != 0)
5554 - return 0;
5555 + return -EINVAL;
5556
5557 /* setup info for port */
5558 port->dev = &platdev->dev;
5559 @@ -1738,22 +1738,25 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
5560 ourport->dma = devm_kzalloc(port->dev,
5561 sizeof(*ourport->dma),
5562 GFP_KERNEL);
5563 - if (!ourport->dma)
5564 - return -ENOMEM;
5565 + if (!ourport->dma) {
5566 + ret = -ENOMEM;
5567 + goto err;
5568 + }
5569 }
5570
5571 ourport->clk = clk_get(&platdev->dev, "uart");
5572 if (IS_ERR(ourport->clk)) {
5573 pr_err("%s: Controller clock not found\n",
5574 dev_name(&platdev->dev));
5575 - return PTR_ERR(ourport->clk);
5576 + ret = PTR_ERR(ourport->clk);
5577 + goto err;
5578 }
5579
5580 ret = clk_prepare_enable(ourport->clk);
5581 if (ret) {
5582 pr_err("uart: clock failed to prepare+enable: %d\n", ret);
5583 clk_put(ourport->clk);
5584 - return ret;
5585 + goto err;
5586 }
5587
5588 /* Keep all interrupts masked and cleared */
5589 @@ -1769,7 +1772,12 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
5590
5591 /* reset the fifos (and setup the uart) */
5592 s3c24xx_serial_resetport(port, cfg);
5593 +
5594 return 0;
5595 +
5596 +err:
5597 + port->mapbase = 0;
5598 + return ret;
5599 }
5600
5601 /* Device driver serial port probe */
5602 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
5603 index 944a6dca0fcb..d2e50a27140c 100644
5604 --- a/drivers/usb/core/quirks.c
5605 +++ b/drivers/usb/core/quirks.c
5606 @@ -128,6 +128,9 @@ static const struct usb_device_id usb_quirk_list[] = {
5607 { USB_DEVICE(0x04f3, 0x016f), .driver_info =
5608 USB_QUIRK_DEVICE_QUALIFIER },
5609
5610 + { USB_DEVICE(0x04f3, 0x0381), .driver_info =
5611 + USB_QUIRK_NO_LPM },
5612 +
5613 { USB_DEVICE(0x04f3, 0x21b8), .driver_info =
5614 USB_QUIRK_DEVICE_QUALIFIER },
5615
5616 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
5617 index 07248ff1be5c..716f4f051a0a 100644
5618 --- a/drivers/usb/dwc3/gadget.c
5619 +++ b/drivers/usb/dwc3/gadget.c
5620 @@ -258,11 +258,13 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
5621 * We will also set SUSPHY bit to what it was before returning as stated
5622 * by the same section on Synopsys databook.
5623 */
5624 - reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
5625 - if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
5626 - susphy = true;
5627 - reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
5628 - dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
5629 + if (dwc->gadget.speed <= USB_SPEED_HIGH) {
5630 + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
5631 + if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
5632 + susphy = true;
5633 + reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
5634 + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
5635 + }
5636 }
5637
5638 if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
5639 @@ -2023,6 +2025,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
5640 return 1;
5641 }
5642
5643 + if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
5644 + if ((event->status & DEPEVT_STATUS_IOC) &&
5645 + (trb->ctrl & DWC3_TRB_CTRL_IOC))
5646 + return 0;
5647 return 1;
5648 }
5649
5650 diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
5651 index 18569de06b04..bb1f6c8f0f01 100644
5652 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c
5653 +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
5654 @@ -1920,6 +1920,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
5655
5656 udc->errata = match->data;
5657 udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
5658 + if (IS_ERR(udc->pmc))
5659 + udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc");
5660 if (udc->errata && IS_ERR(udc->pmc))
5661 return ERR_CAST(udc->pmc);
5662
5663 diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
5664 index ebc51ec5790a..71751429814f 100644
5665 --- a/drivers/usb/gadget/udc/pch_udc.c
5666 +++ b/drivers/usb/gadget/udc/pch_udc.c
5667 @@ -1477,11 +1477,11 @@ static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
5668 req->dma_mapped = 0;
5669 }
5670 ep->halted = 1;
5671 - spin_lock(&dev->lock);
5672 + spin_unlock(&dev->lock);
5673 if (!ep->in)
5674 pch_udc_ep_clear_rrdy(ep);
5675 usb_gadget_giveback_request(&ep->ep, &req->req);
5676 - spin_unlock(&dev->lock);
5677 + spin_lock(&dev->lock);
5678 ep->halted = halted;
5679 }
5680
5681 @@ -2573,9 +2573,9 @@ static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
5682 empty_req_queue(ep);
5683 }
5684 if (dev->driver) {
5685 - spin_lock(&dev->lock);
5686 - usb_gadget_udc_reset(&dev->gadget, dev->driver);
5687 spin_unlock(&dev->lock);
5688 + usb_gadget_udc_reset(&dev->gadget, dev->driver);
5689 + spin_lock(&dev->lock);
5690 }
5691 }
5692
5693 @@ -2654,9 +2654,9 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
5694 dev->ep[i].halted = 0;
5695 }
5696 dev->stall = 0;
5697 - spin_lock(&dev->lock);
5698 - dev->driver->setup(&dev->gadget, &dev->setup_data);
5699 spin_unlock(&dev->lock);
5700 + dev->driver->setup(&dev->gadget, &dev->setup_data);
5701 + spin_lock(&dev->lock);
5702 }
5703
5704 /**
5705 @@ -2691,9 +2691,9 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
5706 dev->stall = 0;
5707
5708 /* call gadget zero with setup data received */
5709 - spin_lock(&dev->lock);
5710 - dev->driver->setup(&dev->gadget, &dev->setup_data);
5711 spin_unlock(&dev->lock);
5712 + dev->driver->setup(&dev->gadget, &dev->setup_data);
5713 + spin_lock(&dev->lock);
5714 }
5715
5716 /**
5717 diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
5718 index 7be4e7d57ace..280ed5ff021b 100644
5719 --- a/drivers/usb/renesas_usbhs/fifo.c
5720 +++ b/drivers/usb/renesas_usbhs/fifo.c
5721 @@ -810,20 +810,27 @@ static void xfer_work(struct work_struct *work)
5722 {
5723 struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
5724 struct usbhs_pipe *pipe = pkt->pipe;
5725 - struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
5726 + struct usbhs_fifo *fifo;
5727 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
5728 struct dma_async_tx_descriptor *desc;
5729 - struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
5730 + struct dma_chan *chan;
5731 struct device *dev = usbhs_priv_to_dev(priv);
5732 enum dma_transfer_direction dir;
5733 + unsigned long flags;
5734
5735 + usbhs_lock(priv, flags);
5736 + fifo = usbhs_pipe_to_fifo(pipe);
5737 + if (!fifo)
5738 + goto xfer_work_end;
5739 +
5740 + chan = usbhsf_dma_chan_get(fifo, pkt);
5741 dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
5742
5743 desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
5744 pkt->trans, dir,
5745 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
5746 if (!desc)
5747 - return;
5748 + goto xfer_work_end;
5749
5750 desc->callback = usbhsf_dma_complete;
5751 desc->callback_param = pipe;
5752 @@ -831,7 +838,7 @@ static void xfer_work(struct work_struct *work)
5753 pkt->cookie = dmaengine_submit(desc);
5754 if (pkt->cookie < 0) {
5755 dev_err(dev, "Failed to submit dma descriptor\n");
5756 - return;
5757 + goto xfer_work_end;
5758 }
5759
5760 dev_dbg(dev, " %s %d (%d/ %d)\n",
5761 @@ -842,6 +849,9 @@ static void xfer_work(struct work_struct *work)
5762 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
5763 dma_async_issue_pending(chan);
5764 usbhs_pipe_enable(pipe);
5765 +
5766 +xfer_work_end:
5767 + usbhs_unlock(priv, flags);
5768 }
5769
5770 /*
5771 diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
5772 index 30345c2d01be..50f3363cc382 100644
5773 --- a/drivers/usb/renesas_usbhs/mod_gadget.c
5774 +++ b/drivers/usb/renesas_usbhs/mod_gadget.c
5775 @@ -585,6 +585,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
5776 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
5777 struct usbhs_pipe *pipe;
5778 int ret = -EIO;
5779 + unsigned long flags;
5780 +
5781 + usbhs_lock(priv, flags);
5782
5783 /*
5784 * if it already have pipe,
5785 @@ -593,7 +596,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
5786 if (uep->pipe) {
5787 usbhs_pipe_clear(uep->pipe);
5788 usbhs_pipe_sequence_data0(uep->pipe);
5789 - return 0;
5790 + ret = 0;
5791 + goto usbhsg_ep_enable_end;
5792 }
5793
5794 pipe = usbhs_pipe_malloc(priv,
5795 @@ -621,6 +625,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
5796 ret = 0;
5797 }
5798
5799 +usbhsg_ep_enable_end:
5800 + usbhs_unlock(priv, flags);
5801 +
5802 return ret;
5803 }
5804
5805 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
5806 index d96d423d00e6..8e07536c233a 100644
5807 --- a/drivers/usb/serial/option.c
5808 +++ b/drivers/usb/serial/option.c
5809 @@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb);
5810 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045
5811 #define TELIT_PRODUCT_LE920 0x1200
5812 #define TELIT_PRODUCT_LE910 0x1201
5813 +#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
5814
5815 /* ZTE PRODUCTS */
5816 #define ZTE_VENDOR_ID 0x19d2
5817 @@ -1198,6 +1199,8 @@ static const struct usb_device_id option_ids[] = {
5818 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
5819 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
5820 .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
5821 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
5822 + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
5823 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
5824 .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
5825 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
5826 diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
5827 index 476c0e3a7150..f6ea8f4ba7cf 100644
5828 --- a/drivers/virtio/virtio_balloon.c
5829 +++ b/drivers/virtio/virtio_balloon.c
5830 @@ -202,6 +202,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
5831 num = min(num, ARRAY_SIZE(vb->pfns));
5832
5833 mutex_lock(&vb->balloon_lock);
5834 + /* We can't release more pages than taken */
5835 + num = min(num, (size_t)vb->num_pages);
5836 for (vb->num_pfns = 0; vb->num_pfns < num;
5837 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
5838 page = balloon_page_dequeue(vb_dev_info);
5839 diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
5840 index a2eec97d5064..bb09de633939 100644
5841 --- a/drivers/w1/masters/omap_hdq.c
5842 +++ b/drivers/w1/masters/omap_hdq.c
5843 @@ -390,8 +390,6 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
5844 goto out;
5845 }
5846
5847 - hdq_data->hdq_irqstatus = 0;
5848 -
5849 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
5850 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
5851 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
5852 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
5853 index 75533adef998..92fe3f8012db 100644
5854 --- a/fs/btrfs/extent_io.c
5855 +++ b/fs/btrfs/extent_io.c
5856 @@ -2696,12 +2696,6 @@ struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
5857 btrfs_bio->csum = NULL;
5858 btrfs_bio->csum_allocated = NULL;
5859 btrfs_bio->end_io = NULL;
5860 -
5861 -#ifdef CONFIG_BLK_CGROUP
5862 - /* FIXME, put this into bio_clone_bioset */
5863 - if (bio->bi_css)
5864 - bio_associate_blkcg(new, bio->bi_css);
5865 -#endif
5866 }
5867 return new;
5868 }
5869 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
5870 index 2234e88cf674..b56887b35889 100644
5871 --- a/fs/btrfs/file.c
5872 +++ b/fs/btrfs/file.c
5873 @@ -1629,13 +1629,11 @@ again:
5874 * managed to copy.
5875 */
5876 if (num_sectors > dirty_sectors) {
5877 - /*
5878 - * we round down because we don't want to count
5879 - * any partial blocks actually sent through the
5880 - * IO machines
5881 - */
5882 - release_bytes = round_down(release_bytes - copied,
5883 - root->sectorsize);
5884 +
5885 + /* release everything except the sectors we dirtied */
5886 + release_bytes -= dirty_sectors <<
5887 + root->fs_info->sb->s_blocksize_bits;
5888 +
5889 if (copied > 0) {
5890 spin_lock(&BTRFS_I(inode)->lock);
5891 BTRFS_I(inode)->outstanding_extents++;
5892 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
5893 index 4ae75006e73b..3f7c2cd41f8f 100644
5894 --- a/fs/cachefiles/namei.c
5895 +++ b/fs/cachefiles/namei.c
5896 @@ -263,6 +263,8 @@ requeue:
5897 void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
5898 struct cachefiles_object *object)
5899 {
5900 + blkcnt_t i_blocks = d_backing_inode(object->dentry)->i_blocks;
5901 +
5902 write_lock(&cache->active_lock);
5903 rb_erase(&object->active_node, &cache->active_nodes);
5904 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
5905 @@ -273,8 +275,7 @@ void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
5906 /* This object can now be culled, so we need to let the daemon know
5907 * that there is something it can remove if it needs to.
5908 */
5909 - atomic_long_add(d_backing_inode(object->dentry)->i_blocks,
5910 - &cache->b_released);
5911 + atomic_long_add(i_blocks, &cache->b_released);
5912 if (atomic_inc_return(&cache->f_released))
5913 cachefiles_state_changed(cache);
5914 }
5915 diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
5916 index 3182273a3407..1418daa03d95 100644
5917 --- a/fs/cifs/cifs_fs_sb.h
5918 +++ b/fs/cifs/cifs_fs_sb.h
5919 @@ -46,6 +46,9 @@
5920 #define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
5921 #define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
5922 #define CIFS_MOUNT_MAP_SFM_CHR 0x800000 /* SFM/MAC mapping for illegal chars */
5923 +#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible
5924 + * root mountable
5925 + */
5926
5927 struct cifs_sb_info {
5928 struct rb_root tlink_tree;
5929 @@ -67,5 +70,6 @@ struct cifs_sb_info {
5930 struct backing_dev_info bdi;
5931 struct delayed_work prune_tlinks;
5932 struct rcu_head rcu;
5933 + char *prepath;
5934 };
5935 #endif /* _CIFS_FS_SB_H */
5936 diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
5937 index 6aeb8d4616a4..8347c90cf483 100644
5938 --- a/fs/cifs/cifsencrypt.c
5939 +++ b/fs/cifs/cifsencrypt.c
5940 @@ -743,24 +743,26 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
5941
5942 memcpy(ses->auth_key.response + baselen, tiblob, tilen);
5943
5944 + mutex_lock(&ses->server->srv_mutex);
5945 +
5946 rc = crypto_hmacmd5_alloc(ses->server);
5947 if (rc) {
5948 cifs_dbg(VFS, "could not crypto alloc hmacmd5 rc %d\n", rc);
5949 - goto setup_ntlmv2_rsp_ret;
5950 + goto unlock;
5951 }
5952
5953 /* calculate ntlmv2_hash */
5954 rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
5955 if (rc) {
5956 cifs_dbg(VFS, "could not get v2 hash rc %d\n", rc);
5957 - goto setup_ntlmv2_rsp_ret;
5958 + goto unlock;
5959 }
5960
5961 /* calculate first part of the client response (CR1) */
5962 rc = CalcNTLMv2_response(ses, ntlmv2_hash);
5963 if (rc) {
5964 cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc);
5965 - goto setup_ntlmv2_rsp_ret;
5966 + goto unlock;
5967 }
5968
5969 /* now calculate the session key for NTLMv2 */
5970 @@ -769,13 +771,13 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
5971 if (rc) {
5972 cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
5973 __func__);
5974 - goto setup_ntlmv2_rsp_ret;
5975 + goto unlock;
5976 }
5977
5978 rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
5979 if (rc) {
5980 cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
5981 - goto setup_ntlmv2_rsp_ret;
5982 + goto unlock;
5983 }
5984
5985 rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
5986 @@ -783,7 +785,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
5987 CIFS_HMAC_MD5_HASH_SIZE);
5988 if (rc) {
5989 cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
5990 - goto setup_ntlmv2_rsp_ret;
5991 + goto unlock;
5992 }
5993
5994 rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
5995 @@ -791,6 +793,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
5996 if (rc)
5997 cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
5998
5999 +unlock:
6000 + mutex_unlock(&ses->server->srv_mutex);
6001 setup_ntlmv2_rsp_ret:
6002 kfree(tiblob);
6003
6004 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
6005 index 5d841f39c4b7..6bbec5e784cd 100644
6006 --- a/fs/cifs/cifsfs.c
6007 +++ b/fs/cifs/cifsfs.c
6008 @@ -689,6 +689,14 @@ cifs_do_mount(struct file_system_type *fs_type,
6009 goto out_cifs_sb;
6010 }
6011
6012 + if (volume_info->prepath) {
6013 + cifs_sb->prepath = kstrdup(volume_info->prepath, GFP_KERNEL);
6014 + if (cifs_sb->prepath == NULL) {
6015 + root = ERR_PTR(-ENOMEM);
6016 + goto out_cifs_sb;
6017 + }
6018 + }
6019 +
6020 cifs_setup_cifs_sb(volume_info, cifs_sb);
6021
6022 rc = cifs_mount(cifs_sb, volume_info);
6023 @@ -727,7 +735,11 @@ cifs_do_mount(struct file_system_type *fs_type,
6024 sb->s_flags |= MS_ACTIVE;
6025 }
6026
6027 - root = cifs_get_root(volume_info, sb);
6028 + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
6029 + root = dget(sb->s_root);
6030 + else
6031 + root = cifs_get_root(volume_info, sb);
6032 +
6033 if (IS_ERR(root))
6034 goto out_super;
6035
6036 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
6037 index 7d2b15c06090..7ae03283bd61 100644
6038 --- a/fs/cifs/connect.c
6039 +++ b/fs/cifs/connect.c
6040 @@ -1228,6 +1228,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
6041 vol->ops = &smb1_operations;
6042 vol->vals = &smb1_values;
6043
6044 + vol->echo_interval = SMB_ECHO_INTERVAL_DEFAULT;
6045 +
6046 if (!mountdata)
6047 goto cifs_parse_mount_err;
6048
6049 @@ -2049,7 +2051,7 @@ static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol)
6050 if (!match_security(server, vol))
6051 return 0;
6052
6053 - if (server->echo_interval != vol->echo_interval)
6054 + if (server->echo_interval != vol->echo_interval * HZ)
6055 return 0;
6056
6057 return 1;
6058 @@ -3483,6 +3485,44 @@ cifs_get_volume_info(char *mount_data, const char *devname)
6059 return volume_info;
6060 }
6061
6062 +static int
6063 +cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
6064 + unsigned int xid,
6065 + struct cifs_tcon *tcon,
6066 + struct cifs_sb_info *cifs_sb,
6067 + char *full_path)
6068 +{
6069 + int rc;
6070 + char *s;
6071 + char sep, tmp;
6072 +
6073 + sep = CIFS_DIR_SEP(cifs_sb);
6074 + s = full_path;
6075 +
6076 + rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
6077 + while (rc == 0) {
6078 + /* skip separators */
6079 + while (*s == sep)
6080 + s++;
6081 + if (!*s)
6082 + break;
6083 + /* next separator */
6084 + while (*s && *s != sep)
6085 + s++;
6086 +
6087 + /*
6088 + * temporarily null-terminate the path at the end of
6089 + * the current component
6090 + */
6091 + tmp = *s;
6092 + *s = 0;
6093 + rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
6094 + full_path);
6095 + *s = tmp;
6096 + }
6097 + return rc;
6098 +}
6099 +
6100 int
6101 cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
6102 {
6103 @@ -3620,6 +3660,16 @@ remote_path_check:
6104 kfree(full_path);
6105 goto mount_fail_check;
6106 }
6107 +
6108 + rc = cifs_are_all_path_components_accessible(server,
6109 + xid, tcon, cifs_sb,
6110 + full_path);
6111 + if (rc != 0) {
6112 + cifs_dbg(VFS, "cannot query dirs between root and final path, "
6113 + "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
6114 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
6115 + rc = 0;
6116 + }
6117 kfree(full_path);
6118 }
6119
6120 @@ -3889,6 +3939,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
6121
6122 bdi_destroy(&cifs_sb->bdi);
6123 kfree(cifs_sb->mountdata);
6124 + kfree(cifs_sb->prepath);
6125 call_rcu(&cifs_sb->rcu, delayed_free);
6126 }
6127
6128 diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
6129 index fb0903fffc22..6f7333d552a3 100644
6130 --- a/fs/cifs/dir.c
6131 +++ b/fs/cifs/dir.c
6132 @@ -84,6 +84,7 @@ build_path_from_dentry(struct dentry *direntry)
6133 struct dentry *temp;
6134 int namelen;
6135 int dfsplen;
6136 + int pplen = 0;
6137 char *full_path;
6138 char dirsep;
6139 struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
6140 @@ -95,8 +96,12 @@ build_path_from_dentry(struct dentry *direntry)
6141 dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
6142 else
6143 dfsplen = 0;
6144 +
6145 + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
6146 + pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
6147 +
6148 cifs_bp_rename_retry:
6149 - namelen = dfsplen;
6150 + namelen = dfsplen + pplen;
6151 seq = read_seqbegin(&rename_lock);
6152 rcu_read_lock();
6153 for (temp = direntry; !IS_ROOT(temp);) {
6154 @@ -137,7 +142,7 @@ cifs_bp_rename_retry:
6155 }
6156 }
6157 rcu_read_unlock();
6158 - if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
6159 + if (namelen != dfsplen + pplen || read_seqretry(&rename_lock, seq)) {
6160 cifs_dbg(FYI, "did not end path lookup where expected. namelen=%ddfsplen=%d\n",
6161 namelen, dfsplen);
6162 /* presumably this is only possible if racing with a rename
6163 @@ -153,6 +158,17 @@ cifs_bp_rename_retry:
6164 those safely to '/' if any are found in the middle of the prepath */
6165 /* BB test paths to Windows with '/' in the midst of prepath */
6166
6167 + if (pplen) {
6168 + int i;
6169 +
6170 + cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
6171 + memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
6172 + full_path[dfsplen] = '\\';
6173 + for (i = 0; i < pplen-1; i++)
6174 + if (full_path[dfsplen+1+i] == '/')
6175 + full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
6176 + }
6177 +
6178 if (dfsplen) {
6179 strncpy(full_path, tcon->treeName, dfsplen);
6180 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
6181 @@ -229,6 +245,13 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
6182 goto cifs_create_get_file_info;
6183 }
6184
6185 + if (S_ISDIR(newinode->i_mode)) {
6186 + CIFSSMBClose(xid, tcon, fid->netfid);
6187 + iput(newinode);
6188 + rc = -EISDIR;
6189 + goto out;
6190 + }
6191 +
6192 if (!S_ISREG(newinode->i_mode)) {
6193 /*
6194 * The server may allow us to open things like
6195 @@ -399,10 +422,14 @@ cifs_create_set_dentry:
6196 if (rc != 0) {
6197 cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n",
6198 rc);
6199 - if (server->ops->close)
6200 - server->ops->close(xid, tcon, fid);
6201 - goto out;
6202 + goto out_err;
6203 }
6204 +
6205 + if (S_ISDIR(newinode->i_mode)) {
6206 + rc = -EISDIR;
6207 + goto out_err;
6208 + }
6209 +
6210 d_drop(direntry);
6211 d_add(direntry, newinode);
6212
6213 @@ -410,6 +437,13 @@ out:
6214 kfree(buf);
6215 kfree(full_path);
6216 return rc;
6217 +
6218 +out_err:
6219 + if (server->ops->close)
6220 + server->ops->close(xid, tcon, fid);
6221 + if (newinode)
6222 + iput(newinode);
6223 + goto out;
6224 }
6225
6226 int
6227 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
6228 index 514dadb0575d..b87efd0c92d6 100644
6229 --- a/fs/cifs/inode.c
6230 +++ b/fs/cifs/inode.c
6231 @@ -1002,10 +1002,26 @@ struct inode *cifs_root_iget(struct super_block *sb)
6232 struct inode *inode = NULL;
6233 long rc;
6234 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
6235 + char *path = NULL;
6236 + int len;
6237 +
6238 + if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
6239 + && cifs_sb->prepath) {
6240 + len = strlen(cifs_sb->prepath);
6241 + path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL);
6242 + if (path == NULL)
6243 + return ERR_PTR(-ENOMEM);
6244 + path[0] = '/';
6245 + memcpy(path+1, cifs_sb->prepath, len);
6246 + } else {
6247 + path = kstrdup("", GFP_KERNEL);
6248 + if (path == NULL)
6249 + return ERR_PTR(-ENOMEM);
6250 + }
6251
6252 xid = get_xid();
6253 if (tcon->unix_ext) {
6254 - rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
6255 + rc = cifs_get_inode_info_unix(&inode, path, sb, xid);
6256 /* some servers mistakenly claim POSIX support */
6257 if (rc != -EOPNOTSUPP)
6258 goto iget_no_retry;
6259 @@ -1013,7 +1029,8 @@ struct inode *cifs_root_iget(struct super_block *sb)
6260 tcon->unix_ext = false;
6261 }
6262
6263 - rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL);
6264 + convert_delimiter(path, CIFS_DIR_SEP(cifs_sb));
6265 + rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL);
6266
6267 iget_no_retry:
6268 if (!inode) {
6269 @@ -1042,6 +1059,7 @@ iget_no_retry:
6270 }
6271
6272 out:
6273 + kfree(path);
6274 /* can not call macro free_xid here since in a void func
6275 * TODO: This is no longer true
6276 */
6277 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
6278 index 3525ed756173..505e6d6406fd 100644
6279 --- a/fs/cifs/smb2ops.c
6280 +++ b/fs/cifs/smb2ops.c
6281 @@ -1044,6 +1044,9 @@ smb2_new_lease_key(struct cifs_fid *fid)
6282 get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
6283 }
6284
6285 +#define SMB2_SYMLINK_STRUCT_SIZE \
6286 + (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
6287 +
6288 static int
6289 smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
6290 const char *full_path, char **target_path,
6291 @@ -1056,7 +1059,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
6292 struct cifs_fid fid;
6293 struct smb2_err_rsp *err_buf = NULL;
6294 struct smb2_symlink_err_rsp *symlink;
6295 - unsigned int sub_len, sub_offset;
6296 + unsigned int sub_len;
6297 + unsigned int sub_offset;
6298 + unsigned int print_len;
6299 + unsigned int print_offset;
6300
6301 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
6302
6303 @@ -1077,11 +1083,33 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
6304 kfree(utf16_path);
6305 return -ENOENT;
6306 }
6307 +
6308 + if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
6309 + get_rfc1002_length(err_buf) + 4 < SMB2_SYMLINK_STRUCT_SIZE) {
6310 + kfree(utf16_path);
6311 + return -ENOENT;
6312 + }
6313 +
6314 /* open must fail on symlink - reset rc */
6315 rc = 0;
6316 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
6317 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
6318 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
6319 + print_len = le16_to_cpu(symlink->PrintNameLength);
6320 + print_offset = le16_to_cpu(symlink->PrintNameOffset);
6321 +
6322 + if (get_rfc1002_length(err_buf) + 4 <
6323 + SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
6324 + kfree(utf16_path);
6325 + return -ENOENT;
6326 + }
6327 +
6328 + if (get_rfc1002_length(err_buf) + 4 <
6329 + SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
6330 + kfree(utf16_path);
6331 + return -ENOENT;
6332 + }
6333 +
6334 *target_path = cifs_strndup_from_utf16(
6335 (char *)symlink->PathBuffer + sub_offset,
6336 sub_len, true, cifs_sb->local_nls);
6337 diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
6338 index 70078096117d..78313adb3c95 100644
6339 --- a/fs/jbd2/commit.c
6340 +++ b/fs/jbd2/commit.c
6341 @@ -124,7 +124,7 @@ static int journal_submit_commit_record(journal_t *journal,
6342 struct commit_header *tmp;
6343 struct buffer_head *bh;
6344 int ret;
6345 - struct timespec now = current_kernel_time();
6346 + struct timespec64 now = current_kernel_time64();
6347
6348 *cbh = NULL;
6349
6350 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
6351 index ff416d0e24bc..7796beacdefb 100644
6352 --- a/fs/nfs/nfs4proc.c
6353 +++ b/fs/nfs/nfs4proc.c
6354 @@ -427,6 +427,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
6355 case -NFS4ERR_DELAY:
6356 nfs_inc_server_stats(server, NFSIOS_DELAY);
6357 case -NFS4ERR_GRACE:
6358 + case -NFS4ERR_LAYOUTTRYLATER:
6359 case -NFS4ERR_RECALLCONFLICT:
6360 exception->delay = 1;
6361 return 0;
6362 @@ -7869,11 +7870,13 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
6363 struct inode *inode = lgp->args.inode;
6364 struct nfs_server *server = NFS_SERVER(inode);
6365 struct pnfs_layout_hdr *lo;
6366 - int status = task->tk_status;
6367 + int nfs4err = task->tk_status;
6368 + int err, status = 0;
6369 + LIST_HEAD(head);
6370
6371 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
6372
6373 - switch (status) {
6374 + switch (nfs4err) {
6375 case 0:
6376 goto out;
6377
6378 @@ -7905,45 +7908,43 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
6379 status = -EOVERFLOW;
6380 goto out;
6381 }
6382 - /* Fallthrough */
6383 + status = -EBUSY;
6384 + break;
6385 case -NFS4ERR_RECALLCONFLICT:
6386 - nfs4_handle_exception(server, -NFS4ERR_RECALLCONFLICT,
6387 - exception);
6388 status = -ERECALLCONFLICT;
6389 - goto out;
6390 + break;
6391 case -NFS4ERR_EXPIRED:
6392 case -NFS4ERR_BAD_STATEID:
6393 exception->timeout = 0;
6394 spin_lock(&inode->i_lock);
6395 - if (nfs4_stateid_match(&lgp->args.stateid,
6396 + lo = NFS_I(inode)->layout;
6397 + /* If the open stateid was bad, then recover it. */
6398 + if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
6399 + nfs4_stateid_match_other(&lgp->args.stateid,
6400 &lgp->args.ctx->state->stateid)) {
6401 spin_unlock(&inode->i_lock);
6402 - /* If the open stateid was bad, then recover it. */
6403 exception->state = lgp->args.ctx->state;
6404 break;
6405 }
6406 - lo = NFS_I(inode)->layout;
6407 - if (lo && !test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) &&
6408 - nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
6409 - LIST_HEAD(head);
6410 -
6411 - /*
6412 - * Mark the bad layout state as invalid, then retry
6413 - * with the current stateid.
6414 - */
6415 - set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
6416 - pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
6417 - spin_unlock(&inode->i_lock);
6418 - pnfs_free_lseg_list(&head);
6419 - status = -EAGAIN;
6420 - goto out;
6421 - } else
6422 - spin_unlock(&inode->i_lock);
6423 - }
6424
6425 - status = nfs4_handle_exception(server, status, exception);
6426 - if (exception->retry)
6427 + /*
6428 + * Mark the bad layout state as invalid, then retry
6429 + */
6430 + set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
6431 + pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
6432 + spin_unlock(&inode->i_lock);
6433 + pnfs_free_lseg_list(&head);
6434 status = -EAGAIN;
6435 + goto out;
6436 + }
6437 +
6438 + err = nfs4_handle_exception(server, nfs4err, exception);
6439 + if (!status) {
6440 + if (exception->retry)
6441 + status = -EAGAIN;
6442 + else
6443 + status = err;
6444 + }
6445 out:
6446 dprintk("<-- %s\n", __func__);
6447 return status;
6448 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
6449 index 0fbe734cc38c..7d992362ff04 100644
6450 --- a/fs/nfs/pnfs.c
6451 +++ b/fs/nfs/pnfs.c
6452 @@ -1505,7 +1505,7 @@ pnfs_update_layout(struct inode *ino,
6453 struct pnfs_layout_segment *lseg = NULL;
6454 nfs4_stateid stateid;
6455 long timeout = 0;
6456 - unsigned long giveup = jiffies + rpc_get_timeout(server->client);
6457 + unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
6458 bool first;
6459
6460 if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
6461 @@ -1645,33 +1645,44 @@ lookup_again:
6462 lseg = send_layoutget(lo, ctx, &stateid, &arg, &timeout, gfp_flags);
6463 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
6464 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
6465 + atomic_dec(&lo->plh_outstanding);
6466 if (IS_ERR(lseg)) {
6467 switch(PTR_ERR(lseg)) {
6468 - case -ERECALLCONFLICT:
6469 + case -EBUSY:
6470 if (time_after(jiffies, giveup))
6471 lseg = NULL;
6472 - /* Fallthrough */
6473 - case -EAGAIN:
6474 - pnfs_put_layout_hdr(lo);
6475 - if (first)
6476 - pnfs_clear_first_layoutget(lo);
6477 - if (lseg) {
6478 - trace_pnfs_update_layout(ino, pos, count,
6479 - iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
6480 - goto lookup_again;
6481 + break;
6482 + case -ERECALLCONFLICT:
6483 + /* Huh? We hold no layouts, how is there a recall? */
6484 + if (first) {
6485 + lseg = NULL;
6486 + break;
6487 }
6488 + /* Destroy the existing layout and start over */
6489 + if (time_after(jiffies, giveup))
6490 + pnfs_destroy_layout(NFS_I(ino));
6491 /* Fallthrough */
6492 + case -EAGAIN:
6493 + break;
6494 default:
6495 if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
6496 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
6497 lseg = NULL;
6498 }
6499 + goto out_put_layout_hdr;
6500 + }
6501 + if (lseg) {
6502 + if (first)
6503 + pnfs_clear_first_layoutget(lo);
6504 + trace_pnfs_update_layout(ino, pos, count,
6505 + iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
6506 + pnfs_put_layout_hdr(lo);
6507 + goto lookup_again;
6508 }
6509 } else {
6510 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
6511 }
6512
6513 - atomic_dec(&lo->plh_outstanding);
6514 out_put_layout_hdr:
6515 if (first)
6516 pnfs_clear_first_layoutget(lo);
6517 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
6518 index e1c74d3db64d..649fa5e26050 100644
6519 --- a/fs/nfs/write.c
6520 +++ b/fs/nfs/write.c
6521 @@ -1289,6 +1289,9 @@ int nfs_updatepage(struct file *file, struct page *page,
6522 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n",
6523 file, count, (long long)(page_file_offset(page) + offset));
6524
6525 + if (!count)
6526 + goto out;
6527 +
6528 if (nfs_can_extend_write(file, page, inode)) {
6529 count = max(count + offset, nfs_page_length(page));
6530 offset = 0;
6531 @@ -1299,7 +1302,7 @@ int nfs_updatepage(struct file *file, struct page *page,
6532 nfs_set_pageerror(page);
6533 else
6534 __set_page_dirty_nobuffers(page);
6535 -
6536 +out:
6537 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
6538 status, (long long)i_size_read(inode));
6539 return status;
6540 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
6541 index 70d0b9b33031..806eda192d1c 100644
6542 --- a/fs/nfsd/nfs4state.c
6543 +++ b/fs/nfsd/nfs4state.c
6544 @@ -4906,6 +4906,32 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6545 return nfs_ok;
6546 }
6547
6548 +static __be32
6549 +nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
6550 +{
6551 + struct nfs4_ol_stateid *stp = openlockstateid(s);
6552 + __be32 ret;
6553 +
6554 + mutex_lock(&stp->st_mutex);
6555 +
6556 + ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6557 + if (ret)
6558 + goto out;
6559 +
6560 + ret = nfserr_locks_held;
6561 + if (check_for_locks(stp->st_stid.sc_file,
6562 + lockowner(stp->st_stateowner)))
6563 + goto out;
6564 +
6565 + release_lock_stateid(stp);
6566 + ret = nfs_ok;
6567 +
6568 +out:
6569 + mutex_unlock(&stp->st_mutex);
6570 + nfs4_put_stid(s);
6571 + return ret;
6572 +}
6573 +
6574 __be32
6575 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6576 struct nfsd4_free_stateid *free_stateid)
6577 @@ -4913,7 +4939,6 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6578 stateid_t *stateid = &free_stateid->fr_stateid;
6579 struct nfs4_stid *s;
6580 struct nfs4_delegation *dp;
6581 - struct nfs4_ol_stateid *stp;
6582 struct nfs4_client *cl = cstate->session->se_client;
6583 __be32 ret = nfserr_bad_stateid;
6584
6585 @@ -4932,18 +4957,9 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6586 ret = nfserr_locks_held;
6587 break;
6588 case NFS4_LOCK_STID:
6589 - ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6590 - if (ret)
6591 - break;
6592 - stp = openlockstateid(s);
6593 - ret = nfserr_locks_held;
6594 - if (check_for_locks(stp->st_stid.sc_file,
6595 - lockowner(stp->st_stateowner)))
6596 - break;
6597 - WARN_ON(!unhash_lock_stateid(stp));
6598 + atomic_inc(&s->sc_count);
6599 spin_unlock(&cl->cl_lock);
6600 - nfs4_put_stid(s);
6601 - ret = nfs_ok;
6602 + ret = nfsd4_free_lock_stateid(stateid, s);
6603 goto out;
6604 case NFS4_REVOKED_DELEG_STID:
6605 dp = delegstateid(s);
6606 @@ -5510,7 +5526,7 @@ static __be32
6607 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
6608 struct nfs4_ol_stateid *ost,
6609 struct nfsd4_lock *lock,
6610 - struct nfs4_ol_stateid **lst, bool *new)
6611 + struct nfs4_ol_stateid **plst, bool *new)
6612 {
6613 __be32 status;
6614 struct nfs4_file *fi = ost->st_stid.sc_file;
6615 @@ -5518,7 +5534,9 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
6616 struct nfs4_client *cl = oo->oo_owner.so_client;
6617 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
6618 struct nfs4_lockowner *lo;
6619 + struct nfs4_ol_stateid *lst;
6620 unsigned int strhashval;
6621 + bool hashed;
6622
6623 lo = find_lockowner_str(cl, &lock->lk_new_owner);
6624 if (!lo) {
6625 @@ -5534,12 +5552,27 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
6626 goto out;
6627 }
6628
6629 - *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
6630 - if (*lst == NULL) {
6631 +retry:
6632 + lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
6633 + if (lst == NULL) {
6634 status = nfserr_jukebox;
6635 goto out;
6636 }
6637 +
6638 + mutex_lock(&lst->st_mutex);
6639 +
6640 + /* See if it's still hashed to avoid race with FREE_STATEID */
6641 + spin_lock(&cl->cl_lock);
6642 + hashed = !list_empty(&lst->st_perfile);
6643 + spin_unlock(&cl->cl_lock);
6644 +
6645 + if (!hashed) {
6646 + mutex_unlock(&lst->st_mutex);
6647 + nfs4_put_stid(&lst->st_stid);
6648 + goto retry;
6649 + }
6650 status = nfs_ok;
6651 + *plst = lst;
6652 out:
6653 nfs4_put_stateowner(&lo->lo_owner);
6654 return status;
6655 @@ -5606,8 +5639,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6656 goto out;
6657 status = lookup_or_create_lock_state(cstate, open_stp, lock,
6658 &lock_stp, &new);
6659 - if (status == nfs_ok)
6660 - mutex_lock(&lock_stp->st_mutex);
6661 } else {
6662 status = nfs4_preprocess_seqid_op(cstate,
6663 lock->lk_old_lock_seqid,
6664 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
6665 index 9a7693d5f8ff..6db75cbb668f 100644
6666 --- a/fs/overlayfs/super.c
6667 +++ b/fs/overlayfs/super.c
6668 @@ -404,7 +404,8 @@ static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
6669 static bool ovl_dentry_remote(struct dentry *dentry)
6670 {
6671 return dentry->d_flags &
6672 - (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
6673 + (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE |
6674 + DCACHE_OP_REAL);
6675 }
6676
6677 static bool ovl_dentry_weird(struct dentry *dentry)
6678 diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
6679 index 4c463b99fe57..a36a5a418f7d 100644
6680 --- a/fs/xfs/xfs_aops.c
6681 +++ b/fs/xfs/xfs_aops.c
6682 @@ -87,6 +87,12 @@ xfs_find_bdev_for_inode(
6683 * We're now finished for good with this page. Update the page state via the
6684 * associated buffer_heads, paying attention to the start and end offsets that
6685 * we need to process on the page.
6686 + *
6687 + * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last
6688 + * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or
6689 + * the page at all, as we may be racing with memory reclaim and it can free both
6690 + * the bufferhead chain and the page as it will see the page as clean and
6691 + * unused.
6692 */
6693 static void
6694 xfs_finish_page_writeback(
6695 @@ -95,8 +101,9 @@ xfs_finish_page_writeback(
6696 int error)
6697 {
6698 unsigned int end = bvec->bv_offset + bvec->bv_len - 1;
6699 - struct buffer_head *head, *bh;
6700 + struct buffer_head *head, *bh, *next;
6701 unsigned int off = 0;
6702 + unsigned int bsize;
6703
6704 ASSERT(bvec->bv_offset < PAGE_SIZE);
6705 ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
6706 @@ -105,15 +112,17 @@ xfs_finish_page_writeback(
6707
6708 bh = head = page_buffers(bvec->bv_page);
6709
6710 + bsize = bh->b_size;
6711 do {
6712 + next = bh->b_this_page;
6713 if (off < bvec->bv_offset)
6714 goto next_bh;
6715 if (off > end)
6716 break;
6717 bh->b_end_io(bh, !error);
6718 next_bh:
6719 - off += bh->b_size;
6720 - } while ((bh = bh->b_this_page) != head);
6721 + off += bsize;
6722 + } while ((bh = next) != head);
6723 }
6724
6725 /*
6726 diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
6727 index 3f103076d0bf..c357f27d5483 100644
6728 --- a/include/linux/backing-dev-defs.h
6729 +++ b/include/linux/backing-dev-defs.h
6730 @@ -163,6 +163,7 @@ struct backing_dev_info {
6731 wait_queue_head_t wb_waitq;
6732
6733 struct device *dev;
6734 + struct device *owner;
6735
6736 struct timer_list laptop_mode_wb_timer;
6737
6738 diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
6739 index c82794f20110..89d3de3e096b 100644
6740 --- a/include/linux/backing-dev.h
6741 +++ b/include/linux/backing-dev.h
6742 @@ -24,6 +24,7 @@ __printf(3, 4)
6743 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
6744 const char *fmt, ...);
6745 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
6746 +int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
6747 void bdi_unregister(struct backing_dev_info *bdi);
6748
6749 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
6750 diff --git a/include/linux/bio.h b/include/linux/bio.h
6751 index 9faebf7f9a33..75fadd28eec8 100644
6752 --- a/include/linux/bio.h
6753 +++ b/include/linux/bio.h
6754 @@ -527,11 +527,14 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
6755 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
6756 int bio_associate_current(struct bio *bio);
6757 void bio_disassociate_task(struct bio *bio);
6758 +void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
6759 #else /* CONFIG_BLK_CGROUP */
6760 static inline int bio_associate_blkcg(struct bio *bio,
6761 struct cgroup_subsys_state *blkcg_css) { return 0; }
6762 static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
6763 static inline void bio_disassociate_task(struct bio *bio) { }
6764 +static inline void bio_clone_blkcg_association(struct bio *dst,
6765 + struct bio *src) { }
6766 #endif /* CONFIG_BLK_CGROUP */
6767
6768 #ifdef CONFIG_HIGHMEM
6769 diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
6770 index ab310819ac36..7879bf411891 100644
6771 --- a/include/linux/mlx5/qp.h
6772 +++ b/include/linux/mlx5/qp.h
6773 @@ -556,9 +556,9 @@ struct mlx5_destroy_qp_mbox_out {
6774 struct mlx5_modify_qp_mbox_in {
6775 struct mlx5_inbox_hdr hdr;
6776 __be32 qpn;
6777 - u8 rsvd1[4];
6778 - __be32 optparam;
6779 u8 rsvd0[4];
6780 + __be32 optparam;
6781 + u8 rsvd1[4];
6782 struct mlx5_qp_context ctx;
6783 u8 rsvd2[16];
6784 };
6785 diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
6786 index 7e440d41487a..e694f02d42e3 100644
6787 --- a/include/rdma/ib_verbs.h
6788 +++ b/include/rdma/ib_verbs.h
6789 @@ -1428,6 +1428,10 @@ struct ib_srq {
6790 } ext;
6791 };
6792
6793 +/*
6794 + * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
6795 + * @max_read_sge: Maximum SGE elements per RDMA READ request.
6796 + */
6797 struct ib_qp {
6798 struct ib_device *device;
6799 struct ib_pd *pd;
6800 @@ -1449,6 +1453,8 @@ struct ib_qp {
6801 void (*event_handler)(struct ib_event *, void *);
6802 void *qp_context;
6803 u32 qp_num;
6804 + u32 max_write_sge;
6805 + u32 max_read_sge;
6806 enum ib_qp_type qp_type;
6807 };
6808
6809 diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
6810 index d8ab5101fad5..f6f3bc52c1ac 100644
6811 --- a/include/target/target_core_backend.h
6812 +++ b/include/target/target_core_backend.h
6813 @@ -95,6 +95,6 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
6814 bool target_sense_desc_format(struct se_device *dev);
6815 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
6816 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
6817 - struct request_queue *q, int block_size);
6818 + struct request_queue *q);
6819
6820 #endif /* TARGET_CORE_BACKEND_H */
6821 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
6822 index b316b44d03f3..fb8e3b6febdf 100644
6823 --- a/include/target/target_core_base.h
6824 +++ b/include/target/target_core_base.h
6825 @@ -142,6 +142,7 @@ enum se_cmd_flags_table {
6826 SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
6827 SCF_ACK_KREF = 0x00400000,
6828 SCF_USE_CPUID = 0x00800000,
6829 + SCF_TASK_ATTR_SET = 0x01000000,
6830 };
6831
6832 /*
6833 diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
6834 index de44462a7680..5cd6faa6e0d1 100644
6835 --- a/include/target/target_core_fabric.h
6836 +++ b/include/target/target_core_fabric.h
6837 @@ -163,7 +163,6 @@ int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
6838 void core_tmr_release_req(struct se_tmr_req *);
6839 int transport_generic_handle_tmr(struct se_cmd *);
6840 void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
6841 -void __target_execute_cmd(struct se_cmd *);
6842 int transport_lookup_tmr_lun(struct se_cmd *, u64);
6843 void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
6844
6845 diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
6846 index 003dca933803..5664ca07c9c7 100644
6847 --- a/include/trace/events/sunrpc.h
6848 +++ b/include/trace/events/sunrpc.h
6849 @@ -529,20 +529,27 @@ TRACE_EVENT(svc_xprt_do_enqueue,
6850
6851 TP_STRUCT__entry(
6852 __field(struct svc_xprt *, xprt)
6853 - __field_struct(struct sockaddr_storage, ss)
6854 __field(int, pid)
6855 __field(unsigned long, flags)
6856 + __dynamic_array(unsigned char, addr, xprt != NULL ?
6857 + xprt->xpt_remotelen : 0)
6858 ),
6859
6860 TP_fast_assign(
6861 __entry->xprt = xprt;
6862 - xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
6863 __entry->pid = rqst? rqst->rq_task->pid : 0;
6864 - __entry->flags = xprt ? xprt->xpt_flags : 0;
6865 + if (xprt) {
6866 + memcpy(__get_dynamic_array(addr),
6867 + &xprt->xpt_remote,
6868 + xprt->xpt_remotelen);
6869 + __entry->flags = xprt->xpt_flags;
6870 + } else
6871 + __entry->flags = 0;
6872 ),
6873
6874 TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
6875 - (struct sockaddr *)&__entry->ss,
6876 + __get_dynamic_array_len(addr) != 0 ?
6877 + (struct sockaddr *)__get_dynamic_array(addr) : NULL,
6878 __entry->pid, show_svc_xprt_flags(__entry->flags))
6879 );
6880
6881 @@ -553,18 +560,25 @@ TRACE_EVENT(svc_xprt_dequeue,
6882
6883 TP_STRUCT__entry(
6884 __field(struct svc_xprt *, xprt)
6885 - __field_struct(struct sockaddr_storage, ss)
6886 __field(unsigned long, flags)
6887 + __dynamic_array(unsigned char, addr, xprt != NULL ?
6888 + xprt->xpt_remotelen : 0)
6889 ),
6890
6891 TP_fast_assign(
6892 - __entry->xprt = xprt,
6893 - xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
6894 - __entry->flags = xprt ? xprt->xpt_flags : 0;
6895 + __entry->xprt = xprt;
6896 + if (xprt) {
6897 + memcpy(__get_dynamic_array(addr),
6898 + &xprt->xpt_remote,
6899 + xprt->xpt_remotelen);
6900 + __entry->flags = xprt->xpt_flags;
6901 + } else
6902 + __entry->flags = 0;
6903 ),
6904
6905 TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt,
6906 - (struct sockaddr *)&__entry->ss,
6907 + __get_dynamic_array_len(addr) != 0 ?
6908 + (struct sockaddr *)__get_dynamic_array(addr) : NULL,
6909 show_svc_xprt_flags(__entry->flags))
6910 );
6911
6912 @@ -592,19 +606,26 @@ TRACE_EVENT(svc_handle_xprt,
6913 TP_STRUCT__entry(
6914 __field(struct svc_xprt *, xprt)
6915 __field(int, len)
6916 - __field_struct(struct sockaddr_storage, ss)
6917 __field(unsigned long, flags)
6918 + __dynamic_array(unsigned char, addr, xprt != NULL ?
6919 + xprt->xpt_remotelen : 0)
6920 ),
6921
6922 TP_fast_assign(
6923 __entry->xprt = xprt;
6924 - xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
6925 __entry->len = len;
6926 - __entry->flags = xprt ? xprt->xpt_flags : 0;
6927 + if (xprt) {
6928 + memcpy(__get_dynamic_array(addr),
6929 + &xprt->xpt_remote,
6930 + xprt->xpt_remotelen);
6931 + __entry->flags = xprt->xpt_flags;
6932 + } else
6933 + __entry->flags = 0;
6934 ),
6935
6936 TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
6937 - (struct sockaddr *)&__entry->ss,
6938 + __get_dynamic_array_len(addr) != 0 ?
6939 + (struct sockaddr *)__get_dynamic_array(addr) : NULL,
6940 __entry->len, show_svc_xprt_flags(__entry->flags))
6941 );
6942 #endif /* _TRACE_SUNRPC_H */
6943 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
6944 index 2672d105cffc..b3341284f1c6 100644
6945 --- a/kernel/auditsc.c
6946 +++ b/kernel/auditsc.c
6947 @@ -72,6 +72,7 @@
6948 #include <linux/compat.h>
6949 #include <linux/ctype.h>
6950 #include <linux/string.h>
6951 +#include <linux/uaccess.h>
6952 #include <uapi/linux/limits.h>
6953
6954 #include "audit.h"
6955 @@ -81,7 +82,8 @@
6956 #define AUDITSC_SUCCESS 1
6957 #define AUDITSC_FAILURE 2
6958
6959 -/* no execve audit message should be longer than this (userspace limits) */
6960 +/* no execve audit message should be longer than this (userspace limits),
6961 + * see the note near the top of audit_log_execve_info() about this value */
6962 #define MAX_EXECVE_AUDIT_LEN 7500
6963
6964 /* max length to print of cmdline/proctitle value during audit */
6965 @@ -987,184 +989,178 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
6966 return rc;
6967 }
6968
6969 -/*
6970 - * to_send and len_sent accounting are very loose estimates. We aren't
6971 - * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being
6972 - * within about 500 bytes (next page boundary)
6973 - *
6974 - * why snprintf? an int is up to 12 digits long. if we just assumed when
6975 - * logging that a[%d]= was going to be 16 characters long we would be wasting
6976 - * space in every audit message. In one 7500 byte message we can log up to
6977 - * about 1000 min size arguments. That comes down to about 50% waste of space
6978 - * if we didn't do the snprintf to find out how long arg_num_len was.
6979 - */
6980 -static int audit_log_single_execve_arg(struct audit_context *context,
6981 - struct audit_buffer **ab,
6982 - int arg_num,
6983 - size_t *len_sent,
6984 - const char __user *p,
6985 - char *buf)
6986 +static void audit_log_execve_info(struct audit_context *context,
6987 + struct audit_buffer **ab)
6988 {
6989 - char arg_num_len_buf[12];
6990 - const char __user *tmp_p = p;
6991 - /* how many digits are in arg_num? 5 is the length of ' a=""' */
6992 - size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 5;
6993 - size_t len, len_left, to_send;
6994 - size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN;
6995 - unsigned int i, has_cntl = 0, too_long = 0;
6996 - int ret;
6997 -
6998 - /* strnlen_user includes the null we don't want to send */
6999 - len_left = len = strnlen_user(p, MAX_ARG_STRLEN) - 1;
7000 -
7001 - /*
7002 - * We just created this mm, if we can't find the strings
7003 - * we just copied into it something is _very_ wrong. Similar
7004 - * for strings that are too long, we should not have created
7005 - * any.
7006 - */
7007 - if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) {
7008 - send_sig(SIGKILL, current, 0);
7009 - return -1;
7010 + long len_max;
7011 + long len_rem;
7012 + long len_full;
7013 + long len_buf;
7014 + long len_abuf;
7015 + long len_tmp;
7016 + bool require_data;
7017 + bool encode;
7018 + unsigned int iter;
7019 + unsigned int arg;
7020 + char *buf_head;
7021 + char *buf;
7022 + const char __user *p = (const char __user *)current->mm->arg_start;
7023 +
7024 + /* NOTE: this buffer needs to be large enough to hold all the non-arg
7025 + * data we put in the audit record for this argument (see the
7026 + * code below) ... at this point in time 96 is plenty */
7027 + char abuf[96];
7028 +
7029 + /* NOTE: we set MAX_EXECVE_AUDIT_LEN to a rather arbitrary limit, the
7030 + * current value of 7500 is not as important as the fact that it
7031 + * is less than 8k, a setting of 7500 gives us plenty of wiggle
7032 + * room if we go over a little bit in the logging below */
7033 + WARN_ON_ONCE(MAX_EXECVE_AUDIT_LEN > 7500);
7034 + len_max = MAX_EXECVE_AUDIT_LEN;
7035 +
7036 + /* scratch buffer to hold the userspace args */
7037 + buf_head = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
7038 + if (!buf_head) {
7039 + audit_panic("out of memory for argv string");
7040 + return;
7041 }
7042 + buf = buf_head;
7043
7044 - /* walk the whole argument looking for non-ascii chars */
7045 + audit_log_format(*ab, "argc=%d", context->execve.argc);
7046 +
7047 + len_rem = len_max;
7048 + len_buf = 0;
7049 + len_full = 0;
7050 + require_data = true;
7051 + encode = false;
7052 + iter = 0;
7053 + arg = 0;
7054 do {
7055 - if (len_left > MAX_EXECVE_AUDIT_LEN)
7056 - to_send = MAX_EXECVE_AUDIT_LEN;
7057 - else
7058 - to_send = len_left;
7059 - ret = copy_from_user(buf, tmp_p, to_send);
7060 - /*
7061 - * There is no reason for this copy to be short. We just
7062 - * copied them here, and the mm hasn't been exposed to user-
7063 - * space yet.
7064 - */
7065 - if (ret) {
7066 - WARN_ON(1);
7067 - send_sig(SIGKILL, current, 0);
7068 - return -1;
7069 - }
7070 - buf[to_send] = '\0';
7071 - has_cntl = audit_string_contains_control(buf, to_send);
7072 - if (has_cntl) {
7073 - /*
7074 - * hex messages get logged as 2 bytes, so we can only
7075 - * send half as much in each message
7076 - */
7077 - max_execve_audit_len = MAX_EXECVE_AUDIT_LEN / 2;
7078 - break;
7079 - }
7080 - len_left -= to_send;
7081 - tmp_p += to_send;
7082 - } while (len_left > 0);
7083 -
7084 - len_left = len;
7085 -
7086 - if (len > max_execve_audit_len)
7087 - too_long = 1;
7088 -
7089 - /* rewalk the argument actually logging the message */
7090 - for (i = 0; len_left > 0; i++) {
7091 - int room_left;
7092 -
7093 - if (len_left > max_execve_audit_len)
7094 - to_send = max_execve_audit_len;
7095 - else
7096 - to_send = len_left;
7097 -
7098 - /* do we have space left to send this argument in this ab? */
7099 - room_left = MAX_EXECVE_AUDIT_LEN - arg_num_len - *len_sent;
7100 - if (has_cntl)
7101 - room_left -= (to_send * 2);
7102 - else
7103 - room_left -= to_send;
7104 - if (room_left < 0) {
7105 - *len_sent = 0;
7106 - audit_log_end(*ab);
7107 - *ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE);
7108 - if (!*ab)
7109 - return 0;
7110 - }
7111 + /* NOTE: we don't ever want to trust this value for anything
7112 + * serious, but the audit record format insists we
7113 + * provide an argument length for really long arguments,
7114 + * e.g. > MAX_EXECVE_AUDIT_LEN, so we have no choice but
7115 + * to use strncpy_from_user() to obtain this value for
7116 + * recording in the log, although we don't use it
7117 + * anywhere here to avoid a double-fetch problem */
7118 + if (len_full == 0)
7119 + len_full = strnlen_user(p, MAX_ARG_STRLEN) - 1;
7120 +
7121 + /* read more data from userspace */
7122 + if (require_data) {
7123 + /* can we make more room in the buffer? */
7124 + if (buf != buf_head) {
7125 + memmove(buf_head, buf, len_buf);
7126 + buf = buf_head;
7127 + }
7128 +
7129 + /* fetch as much as we can of the argument */
7130 + len_tmp = strncpy_from_user(&buf_head[len_buf], p,
7131 + len_max - len_buf);
7132 + if (len_tmp == -EFAULT) {
7133 + /* unable to copy from userspace */
7134 + send_sig(SIGKILL, current, 0);
7135 + goto out;
7136 + } else if (len_tmp == (len_max - len_buf)) {
7137 + /* buffer is not large enough */
7138 + require_data = true;
7139 + /* NOTE: if we are going to span multiple
7140 + * buffers force the encoding so we stand
7141 + * a chance at a sane len_full value and
7142 + * consistent record encoding */
7143 + encode = true;
7144 + len_full = len_full * 2;
7145 + p += len_tmp;
7146 + } else {
7147 + require_data = false;
7148 + if (!encode)
7149 + encode = audit_string_contains_control(
7150 + buf, len_tmp);
7151 + /* try to use a trusted value for len_full */
7152 + if (len_full < len_max)
7153 + len_full = (encode ?
7154 + len_tmp * 2 : len_tmp);
7155 + p += len_tmp + 1;
7156 + }
7157 + len_buf += len_tmp;
7158 + buf_head[len_buf] = '\0';
7159
7160 - /*
7161 - * first record needs to say how long the original string was
7162 - * so we can be sure nothing was lost.
7163 - */
7164 - if ((i == 0) && (too_long))
7165 - audit_log_format(*ab, " a%d_len=%zu", arg_num,
7166 - has_cntl ? 2*len : len);
7167 -
7168 - /*
7169 - * normally arguments are small enough to fit and we already
7170 - * filled buf above when we checked for control characters
7171 - * so don't bother with another copy_from_user
7172 - */
7173 - if (len >= max_execve_audit_len)
7174 - ret = copy_from_user(buf, p, to_send);
7175 - else
7176 - ret = 0;
7177 - if (ret) {
7178 - WARN_ON(1);
7179 - send_sig(SIGKILL, current, 0);
7180 - return -1;
7181 + /* length of the buffer in the audit record? */
7182 + len_abuf = (encode ? len_buf * 2 : len_buf + 2);
7183 }
7184 - buf[to_send] = '\0';
7185 -
7186 - /* actually log it */
7187 - audit_log_format(*ab, " a%d", arg_num);
7188 - if (too_long)
7189 - audit_log_format(*ab, "[%d]", i);
7190 - audit_log_format(*ab, "=");
7191 - if (has_cntl)
7192 - audit_log_n_hex(*ab, buf, to_send);
7193 - else
7194 - audit_log_string(*ab, buf);
7195 -
7196 - p += to_send;
7197 - len_left -= to_send;
7198 - *len_sent += arg_num_len;
7199 - if (has_cntl)
7200 - *len_sent += to_send * 2;
7201 - else
7202 - *len_sent += to_send;
7203 - }
7204 - /* include the null we didn't log */
7205 - return len + 1;
7206 -}
7207
7208 -static void audit_log_execve_info(struct audit_context *context,
7209 - struct audit_buffer **ab)
7210 -{
7211 - int i, len;
7212 - size_t len_sent = 0;
7213 - const char __user *p;
7214 - char *buf;
7215 + /* write as much as we can to the audit log */
7216 + if (len_buf > 0) {
7217 + /* NOTE: some magic numbers here - basically if we
7218 + * can't fit a reasonable amount of data into the
7219 + * existing audit buffer, flush it and start with
7220 + * a new buffer */
7221 + if ((sizeof(abuf) + 8) > len_rem) {
7222 + len_rem = len_max;
7223 + audit_log_end(*ab);
7224 + *ab = audit_log_start(context,
7225 + GFP_KERNEL, AUDIT_EXECVE);
7226 + if (!*ab)
7227 + goto out;
7228 + }
7229
7230 - p = (const char __user *)current->mm->arg_start;
7231 + /* create the non-arg portion of the arg record */
7232 + len_tmp = 0;
7233 + if (require_data || (iter > 0) ||
7234 + ((len_abuf + sizeof(abuf)) > len_rem)) {
7235 + if (iter == 0) {
7236 + len_tmp += snprintf(&abuf[len_tmp],
7237 + sizeof(abuf) - len_tmp,
7238 + " a%d_len=%lu",
7239 + arg, len_full);
7240 + }
7241 + len_tmp += snprintf(&abuf[len_tmp],
7242 + sizeof(abuf) - len_tmp,
7243 + " a%d[%d]=", arg, iter++);
7244 + } else
7245 + len_tmp += snprintf(&abuf[len_tmp],
7246 + sizeof(abuf) - len_tmp,
7247 + " a%d=", arg);
7248 + WARN_ON(len_tmp >= sizeof(abuf));
7249 + abuf[sizeof(abuf) - 1] = '\0';
7250 +
7251 + /* log the arg in the audit record */
7252 + audit_log_format(*ab, "%s", abuf);
7253 + len_rem -= len_tmp;
7254 + len_tmp = len_buf;
7255 + if (encode) {
7256 + if (len_abuf > len_rem)
7257 + len_tmp = len_rem / 2; /* encoding */
7258 + audit_log_n_hex(*ab, buf, len_tmp);
7259 + len_rem -= len_tmp * 2;
7260 + len_abuf -= len_tmp * 2;
7261 + } else {
7262 + if (len_abuf > len_rem)
7263 + len_tmp = len_rem - 2; /* quotes */
7264 + audit_log_n_string(*ab, buf, len_tmp);
7265 + len_rem -= len_tmp + 2;
7266 + /* don't subtract the "2" because we still need
7267 + * to add quotes to the remaining string */
7268 + len_abuf -= len_tmp;
7269 + }
7270 + len_buf -= len_tmp;
7271 + buf += len_tmp;
7272 + }
7273
7274 - audit_log_format(*ab, "argc=%d", context->execve.argc);
7275 + /* ready to move to the next argument? */
7276 + if ((len_buf == 0) && !require_data) {
7277 + arg++;
7278 + iter = 0;
7279 + len_full = 0;
7280 + require_data = true;
7281 + encode = false;
7282 + }
7283 + } while (arg < context->execve.argc);
7284
7285 - /*
7286 - * we need some kernel buffer to hold the userspace args. Just
7287 - * allocate one big one rather than allocating one of the right size
7288 - * for every single argument inside audit_log_single_execve_arg()
7289 - * should be <8k allocation so should be pretty safe.
7290 - */
7291 - buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
7292 - if (!buf) {
7293 - audit_panic("out of memory for argv string");
7294 - return;
7295 - }
7296 + /* NOTE: the caller handles the final audit_log_end() call */
7297
7298 - for (i = 0; i < context->execve.argc; i++) {
7299 - len = audit_log_single_execve_arg(context, ab, i,
7300 - &len_sent, p, buf);
7301 - if (len <= 0)
7302 - break;
7303 - p += len;
7304 - }
7305 - kfree(buf);
7306 +out:
7307 + kfree(buf_head);
7308 }
7309
7310 static void show_special(struct audit_context *context, int *call_panic)
7311 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
7312 index 75c0ff00aca6..e0be49fc382f 100644
7313 --- a/kernel/cgroup.c
7314 +++ b/kernel/cgroup.c
7315 @@ -2215,12 +2215,8 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
7316 goto out_unlock;
7317 }
7318
7319 - /*
7320 - * We know this subsystem has not yet been bound. Users in a non-init
7321 - * user namespace may only mount hierarchies with no bound subsystems,
7322 - * i.e. 'none,name=user1'
7323 - */
7324 - if (!opts.none && !capable(CAP_SYS_ADMIN)) {
7325 + /* Hierarchies may only be created in the initial cgroup namespace. */
7326 + if (ns != &init_cgroup_ns) {
7327 ret = -EPERM;
7328 goto out_unlock;
7329 }
7330 @@ -2962,6 +2958,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
7331 int retval = 0;
7332
7333 mutex_lock(&cgroup_mutex);
7334 + percpu_down_write(&cgroup_threadgroup_rwsem);
7335 for_each_root(root) {
7336 struct cgroup *from_cgrp;
7337
7338 @@ -2976,6 +2973,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
7339 if (retval)
7340 break;
7341 }
7342 + percpu_up_write(&cgroup_threadgroup_rwsem);
7343 mutex_unlock(&cgroup_mutex);
7344
7345 return retval;
7346 @@ -4343,6 +4341,8 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
7347
7348 mutex_lock(&cgroup_mutex);
7349
7350 + percpu_down_write(&cgroup_threadgroup_rwsem);
7351 +
7352 /* all tasks in @from are being moved, all csets are source */
7353 spin_lock_irq(&css_set_lock);
7354 list_for_each_entry(link, &from->cset_links, cset_link)
7355 @@ -4371,6 +4371,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
7356 } while (task && !ret);
7357 out_err:
7358 cgroup_migrate_finish(&preloaded_csets);
7359 + percpu_up_write(&cgroup_threadgroup_rwsem);
7360 mutex_unlock(&cgroup_mutex);
7361 return ret;
7362 }
7363 @@ -6309,14 +6310,11 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
7364 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
7365 return ERR_PTR(-EPERM);
7366
7367 - mutex_lock(&cgroup_mutex);
7368 + /* It is not safe to take cgroup_mutex here */
7369 spin_lock_irq(&css_set_lock);
7370 -
7371 cset = task_css_set(current);
7372 get_css_set(cset);
7373 -
7374 spin_unlock_irq(&css_set_lock);
7375 - mutex_unlock(&cgroup_mutex);
7376
7377 new_ns = alloc_cgroup_ns();
7378 if (IS_ERR(new_ns)) {
7379 diff --git a/kernel/module.c b/kernel/module.c
7380 index 5f71aa63ed2a..6458a2f17d58 100644
7381 --- a/kernel/module.c
7382 +++ b/kernel/module.c
7383 @@ -2687,13 +2687,18 @@ static inline void kmemleak_load_module(const struct module *mod,
7384 #endif
7385
7386 #ifdef CONFIG_MODULE_SIG
7387 -static int module_sig_check(struct load_info *info)
7388 +static int module_sig_check(struct load_info *info, int flags)
7389 {
7390 int err = -ENOKEY;
7391 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
7392 const void *mod = info->hdr;
7393
7394 - if (info->len > markerlen &&
7395 + /*
7396 + * Require flags == 0, as a module with version information
7397 + * removed is no longer the module that was signed
7398 + */
7399 + if (flags == 0 &&
7400 + info->len > markerlen &&
7401 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
7402 /* We truncate the module to discard the signature */
7403 info->len -= markerlen;
7404 @@ -2712,7 +2717,7 @@ static int module_sig_check(struct load_info *info)
7405 return err;
7406 }
7407 #else /* !CONFIG_MODULE_SIG */
7408 -static int module_sig_check(struct load_info *info)
7409 +static int module_sig_check(struct load_info *info, int flags)
7410 {
7411 return 0;
7412 }
7413 @@ -3498,7 +3503,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
7414 long err;
7415 char *after_dashes;
7416
7417 - err = module_sig_check(info);
7418 + err = module_sig_check(info, flags);
7419 if (err)
7420 goto free_copy;
7421
7422 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
7423 index ed173b8ae8f2..9269911d10dd 100644
7424 --- a/mm/backing-dev.c
7425 +++ b/mm/backing-dev.c
7426 @@ -825,6 +825,20 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
7427 }
7428 EXPORT_SYMBOL(bdi_register_dev);
7429
7430 +int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
7431 +{
7432 + int rc;
7433 +
7434 + rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt),
7435 + MINOR(owner->devt));
7436 + if (rc)
7437 + return rc;
7438 + bdi->owner = owner;
7439 + get_device(owner);
7440 + return 0;
7441 +}
7442 +EXPORT_SYMBOL(bdi_register_owner);
7443 +
7444 /*
7445 * Remove bdi from bdi_list, and ensure that it is no longer visible
7446 */
7447 @@ -849,6 +863,11 @@ void bdi_unregister(struct backing_dev_info *bdi)
7448 device_unregister(bdi->dev);
7449 bdi->dev = NULL;
7450 }
7451 +
7452 + if (bdi->owner) {
7453 + put_device(bdi->owner);
7454 + bdi->owner = NULL;
7455 + }
7456 }
7457
7458 void bdi_exit(struct backing_dev_info *bdi)
7459 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
7460 index addfe4accc07..d9ec1a5b37ff 100644
7461 --- a/mm/hugetlb.c
7462 +++ b/mm/hugetlb.c
7463 @@ -2214,6 +2214,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
7464 * and reducing the surplus.
7465 */
7466 spin_unlock(&hugetlb_lock);
7467 +
7468 + /* yield cpu to avoid soft lockup */
7469 + cond_resched();
7470 +
7471 if (hstate_is_gigantic(h))
7472 ret = alloc_fresh_gigantic_page(h, nodes_allowed);
7473 else
7474 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
7475 index 388ee8b59145..1842141baedb 100644
7476 --- a/net/bluetooth/l2cap_sock.c
7477 +++ b/net/bluetooth/l2cap_sock.c
7478 @@ -927,7 +927,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
7479 break;
7480 }
7481
7482 - if (get_user(opt, (u32 __user *) optval)) {
7483 + if (get_user(opt, (u16 __user *) optval)) {
7484 err = -EFAULT;
7485 break;
7486 }
7487 diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
7488 index 1325776daa27..bd007a9fd921 100644
7489 --- a/net/netlabel/netlabel_kapi.c
7490 +++ b/net/netlabel/netlabel_kapi.c
7491 @@ -824,7 +824,11 @@ socket_setattr_return:
7492 */
7493 void netlbl_sock_delattr(struct sock *sk)
7494 {
7495 - cipso_v4_sock_delattr(sk);
7496 + switch (sk->sk_family) {
7497 + case AF_INET:
7498 + cipso_v4_sock_delattr(sk);
7499 + break;
7500 + }
7501 }
7502
7503 /**
7504 @@ -987,7 +991,11 @@ req_setattr_return:
7505 */
7506 void netlbl_req_delattr(struct request_sock *req)
7507 {
7508 - cipso_v4_req_delattr(req);
7509 + switch (req->rsk_ops->family) {
7510 + case AF_INET:
7511 + cipso_v4_req_delattr(req);
7512 + break;
7513 + }
7514 }
7515
7516 /**
7517 diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
7518 index e167592793a7..42396a74405d 100644
7519 --- a/scripts/recordmcount.c
7520 +++ b/scripts/recordmcount.c
7521 @@ -33,10 +33,17 @@
7522 #include <string.h>
7523 #include <unistd.h>
7524
7525 +/*
7526 + * glibc synced up and added the metag number but didn't add the relocations.
7527 + * Work around this in a crude manner for now.
7528 + */
7529 #ifndef EM_METAG
7530 -/* Remove this when these make it to the standard system elf.h. */
7531 #define EM_METAG 174
7532 +#endif
7533 +#ifndef R_METAG_ADDR32
7534 #define R_METAG_ADDR32 2
7535 +#endif
7536 +#ifndef R_METAG_NONE
7537 #define R_METAG_NONE 3
7538 #endif
7539
7540 diff --git a/sound/hda/array.c b/sound/hda/array.c
7541 index 516795baa7db..5dfa610e4471 100644
7542 --- a/sound/hda/array.c
7543 +++ b/sound/hda/array.c
7544 @@ -21,13 +21,15 @@ void *snd_array_new(struct snd_array *array)
7545 return NULL;
7546 if (array->used >= array->alloced) {
7547 int num = array->alloced + array->alloc_align;
7548 + int oldsize = array->alloced * array->elem_size;
7549 int size = (num + 1) * array->elem_size;
7550 void *nlist;
7551 if (snd_BUG_ON(num >= 4096))
7552 return NULL;
7553 - nlist = krealloc(array->list, size, GFP_KERNEL | __GFP_ZERO);
7554 + nlist = krealloc(array->list, size, GFP_KERNEL);
7555 if (!nlist)
7556 return NULL;
7557 + memset(nlist + oldsize, 0, size - oldsize);
7558 array->list = nlist;
7559 array->alloced = num;
7560 }
7561 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
7562 index 6f8ea13323c1..89dacf9b4e6c 100644
7563 --- a/sound/pci/hda/hda_intel.c
7564 +++ b/sound/pci/hda/hda_intel.c
7565 @@ -2265,6 +2265,8 @@ static const struct pci_device_id azx_ids[] = {
7566 { PCI_DEVICE(0x1022, 0x780d),
7567 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
7568 /* ATI HDMI */
7569 + { PCI_DEVICE(0x1002, 0x0002),
7570 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
7571 { PCI_DEVICE(0x1002, 0x1308),
7572 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
7573 { PCI_DEVICE(0x1002, 0x157a),
7574 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7575 index abcb5a6a1cd9..f25479ba3981 100644
7576 --- a/sound/pci/hda/patch_realtek.c
7577 +++ b/sound/pci/hda/patch_realtek.c
7578 @@ -4674,6 +4674,22 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec,
7579 }
7580 }
7581
7582 +static void alc298_fixup_speaker_volume(struct hda_codec *codec,
7583 + const struct hda_fixup *fix, int action)
7584 +{
7585 + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
7586 + /* The speaker is routed to the Node 0x06 by a mistake, as a result
7587 + we can't adjust the speaker's volume since this node does not has
7588 + Amp-out capability. we change the speaker's route to:
7589 + Node 0x02 (Audio Output) -> Node 0x0c (Audio Mixer) -> Node 0x17 (
7590 + Pin Complex), since Node 0x02 has Amp-out caps, we can adjust
7591 + speaker's volume now. */
7592 +
7593 + hda_nid_t conn1[1] = { 0x0c };
7594 + snd_hda_override_conn_list(codec, 0x17, 1, conn1);
7595 + }
7596 +}
7597 +
7598 /* Hook to update amp GPIO4 for automute */
7599 static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
7600 struct hda_jack_callback *jack)
7601 @@ -4823,6 +4839,7 @@ enum {
7602 ALC280_FIXUP_HP_HEADSET_MIC,
7603 ALC221_FIXUP_HP_FRONT_MIC,
7604 ALC292_FIXUP_TPT460,
7605 + ALC298_FIXUP_SPK_VOLUME,
7606 };
7607
7608 static const struct hda_fixup alc269_fixups[] = {
7609 @@ -5478,6 +5495,12 @@ static const struct hda_fixup alc269_fixups[] = {
7610 .chained = true,
7611 .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
7612 },
7613 + [ALC298_FIXUP_SPK_VOLUME] = {
7614 + .type = HDA_FIXUP_FUNC,
7615 + .v.func = alc298_fixup_speaker_volume,
7616 + .chained = true,
7617 + .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
7618 + },
7619 };
7620
7621 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7622 @@ -5524,6 +5547,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7623 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
7624 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
7625 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
7626 + SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
7627 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
7628 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
7629 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
7630 @@ -5799,6 +5823,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
7631 {0x1b, 0x01014020},
7632 {0x21, 0x0221103f}),
7633 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
7634 + {0x14, 0x90170130},
7635 + {0x1b, 0x02011020},
7636 + {0x21, 0x0221103f}),
7637 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
7638 {0x14, 0x90170150},
7639 {0x1b, 0x02011020},
7640 {0x21, 0x0221105f}),
7641 diff --git a/tools/objtool/.gitignore b/tools/objtool/.gitignore
7642 index a0b3128bb31f..d3102c865a95 100644
7643 --- a/tools/objtool/.gitignore
7644 +++ b/tools/objtool/.gitignore
7645 @@ -1,2 +1,3 @@
7646 arch/x86/insn/inat-tables.c
7647 objtool
7648 +fixdep
7649 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
7650 index 48bd520fc702..dd25346ec356 100644
7651 --- a/virt/kvm/kvm_main.c
7652 +++ b/virt/kvm/kvm_main.c
7653 @@ -148,6 +148,7 @@ int vcpu_load(struct kvm_vcpu *vcpu)
7654 put_cpu();
7655 return 0;
7656 }
7657 +EXPORT_SYMBOL_GPL(vcpu_load);
7658
7659 void vcpu_put(struct kvm_vcpu *vcpu)
7660 {
7661 @@ -157,6 +158,7 @@ void vcpu_put(struct kvm_vcpu *vcpu)
7662 preempt_enable();
7663 mutex_unlock(&vcpu->mutex);
7664 }
7665 +EXPORT_SYMBOL_GPL(vcpu_put);
7666
7667 static void ack_flush(void *_completed)
7668 {