Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.5/0102-4.5.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2784 - (show annotations) (download)
Fri May 13 07:36:29 2016 UTC (7 years, 11 months ago) by niro
File size: 245347 byte(s)
-linux-4.5.3
1 diff --git a/Makefile b/Makefile
2 index 1ecaaeb7791d..9b56a6c5e36f 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 5
8 -SUBLEVEL = 2
9 +SUBLEVEL = 3
10 EXTRAVERSION =
11 NAME = Blurry Fish Butt
12
13 diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
14 index 1fafaad516ba..97471d62d5e4 100644
15 --- a/arch/arm/boot/dts/am33xx.dtsi
16 +++ b/arch/arm/boot/dts/am33xx.dtsi
17 @@ -860,7 +860,7 @@
18 ti,no-idle-on-init;
19 reg = <0x50000000 0x2000>;
20 interrupts = <100>;
21 - dmas = <&edma 52>;
22 + dmas = <&edma 52 0>;
23 dma-names = "rxtx";
24 gpmc,num-cs = <7>;
25 gpmc,num-waitpins = <2>;
26 diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
27 index 92068fbf8b57..6bd38a28e26c 100644
28 --- a/arch/arm/boot/dts/am4372.dtsi
29 +++ b/arch/arm/boot/dts/am4372.dtsi
30 @@ -207,7 +207,7 @@
31 ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
32 <&edma_tptc2 0>;
33
34 - ti,edma-memcpy-channels = <32 33>;
35 + ti,edma-memcpy-channels = <58 59>;
36 };
37
38 edma_tptc0: tptc@49800000 {
39 @@ -884,7 +884,7 @@
40 gpmc: gpmc@50000000 {
41 compatible = "ti,am3352-gpmc";
42 ti,hwmods = "gpmc";
43 - dmas = <&edma 52>;
44 + dmas = <&edma 52 0>;
45 dma-names = "rxtx";
46 clocks = <&l3s_gclk>;
47 clock-names = "fck";
48 diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
49 index d580e2b70f9a..637dc5dbc8ac 100644
50 --- a/arch/arm/boot/dts/am43x-epos-evm.dts
51 +++ b/arch/arm/boot/dts/am43x-epos-evm.dts
52 @@ -792,3 +792,8 @@
53 tx-num-evt = <32>;
54 rx-num-evt = <32>;
55 };
56 +
57 +&synctimer_32kclk {
58 + assigned-clocks = <&mux_synctimer32k_ck>;
59 + assigned-clock-parents = <&clkdiv32k_ick>;
60 +};
61 diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
62 index 7ccce7529b0c..cc952cf8ec30 100644
63 --- a/arch/arm/boot/dts/armada-375.dtsi
64 +++ b/arch/arm/boot/dts/armada-375.dtsi
65 @@ -529,7 +529,7 @@
66 };
67
68 sata@a0000 {
69 - compatible = "marvell,orion-sata";
70 + compatible = "marvell,armada-370-sata";
71 reg = <0xa0000 0x5000>;
72 interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
73 clocks = <&gateclk 14>, <&gateclk 20>;
74 diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
75 index 3710755c6d76..85d2c377c332 100644
76 --- a/arch/arm/boot/dts/armada-385-linksys.dtsi
77 +++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
78 @@ -117,7 +117,7 @@
79 };
80
81 /* USB part of the eSATA/USB 2.0 port */
82 - usb@50000 {
83 + usb@58000 {
84 status = "okay";
85 };
86
87 diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
88 index cf6998a0804d..564341af7e97 100644
89 --- a/arch/arm/boot/dts/pxa3xx.dtsi
90 +++ b/arch/arm/boot/dts/pxa3xx.dtsi
91 @@ -30,7 +30,7 @@
92 reg = <0x43100000 90>;
93 interrupts = <45>;
94 clocks = <&clks CLK_NAND>;
95 - dmas = <&pdma 97>;
96 + dmas = <&pdma 97 3>;
97 dma-names = "data";
98 #address-cells = <1>;
99 #size-cells = <1>;
100 diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
101 index 652a0bb11578..5189bcecad12 100644
102 --- a/arch/arm/mach-exynos/Kconfig
103 +++ b/arch/arm/mach-exynos/Kconfig
104 @@ -27,6 +27,7 @@ menuconfig ARCH_EXYNOS
105 select S5P_DEV_MFC
106 select SRAM
107 select THERMAL
108 + select THERMAL_OF
109 select MFD_SYSCON
110 select CLKSRC_EXYNOS_MCT
111 select POWER_RESET
112 diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
113 index aa7b379e2661..2a3db0bd9e15 100644
114 --- a/arch/arm/mach-omap2/cpuidle34xx.c
115 +++ b/arch/arm/mach-omap2/cpuidle34xx.c
116 @@ -34,6 +34,7 @@
117 #include "pm.h"
118 #include "control.h"
119 #include "common.h"
120 +#include "soc.h"
121
122 /* Mach specific information to be recorded in the C-state driver_data */
123 struct omap3_idle_statedata {
124 @@ -315,6 +316,69 @@ static struct cpuidle_driver omap3_idle_driver = {
125 .safe_state_index = 0,
126 };
127
128 +/*
129 + * Numbers based on measurements made in October 2009 for PM optimized kernel
130 + * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
131 + * and worst case latencies).
132 + */
133 +static struct cpuidle_driver omap3430_idle_driver = {
134 + .name = "omap3430_idle",
135 + .owner = THIS_MODULE,
136 + .states = {
137 + {
138 + .enter = omap3_enter_idle_bm,
139 + .exit_latency = 110 + 162,
140 + .target_residency = 5,
141 + .name = "C1",
142 + .desc = "MPU ON + CORE ON",
143 + },
144 + {
145 + .enter = omap3_enter_idle_bm,
146 + .exit_latency = 106 + 180,
147 + .target_residency = 309,
148 + .name = "C2",
149 + .desc = "MPU ON + CORE ON",
150 + },
151 + {
152 + .enter = omap3_enter_idle_bm,
153 + .exit_latency = 107 + 410,
154 + .target_residency = 46057,
155 + .name = "C3",
156 + .desc = "MPU RET + CORE ON",
157 + },
158 + {
159 + .enter = omap3_enter_idle_bm,
160 + .exit_latency = 121 + 3374,
161 + .target_residency = 46057,
162 + .name = "C4",
163 + .desc = "MPU OFF + CORE ON",
164 + },
165 + {
166 + .enter = omap3_enter_idle_bm,
167 + .exit_latency = 855 + 1146,
168 + .target_residency = 46057,
169 + .name = "C5",
170 + .desc = "MPU RET + CORE RET",
171 + },
172 + {
173 + .enter = omap3_enter_idle_bm,
174 + .exit_latency = 7580 + 4134,
175 + .target_residency = 484329,
176 + .name = "C6",
177 + .desc = "MPU OFF + CORE RET",
178 + },
179 + {
180 + .enter = omap3_enter_idle_bm,
181 + .exit_latency = 7505 + 15274,
182 + .target_residency = 484329,
183 + .name = "C7",
184 + .desc = "MPU OFF + CORE OFF",
185 + },
186 + },
187 + .state_count = ARRAY_SIZE(omap3_idle_data),
188 + .safe_state_index = 0,
189 +};
190 +
191 /* Public functions */
192
193 /**
194 @@ -333,5 +397,8 @@ int __init omap3_idle_init(void)
195 if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
196 return -ENODEV;
197
198 - return cpuidle_register(&omap3_idle_driver, NULL);
199 + if (cpu_is_omap3430())
200 + return cpuidle_register(&omap3430_idle_driver, NULL);
201 + else
202 + return cpuidle_register(&omap3_idle_driver, NULL);
203 }
204 diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
205 index 3c87e40650cf..9821be6dfd5e 100644
206 --- a/arch/arm/mach-omap2/io.c
207 +++ b/arch/arm/mach-omap2/io.c
208 @@ -368,6 +368,7 @@ void __init omap5_map_io(void)
209 void __init dra7xx_map_io(void)
210 {
211 iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
212 + omap_barriers_init();
213 }
214 #endif
215 /*
216 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
217 index b6d62e4cdfdd..2af6ff63e3b4 100644
218 --- a/arch/arm/mach-omap2/omap_hwmod.c
219 +++ b/arch/arm/mach-omap2/omap_hwmod.c
220 @@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
221 (sf & SYSC_HAS_CLOCKACTIVITY))
222 _set_clockactivity(oh, oh->class->sysc->clockact, &v);
223
224 - /* If the cached value is the same as the new value, skip the write */
225 - if (oh->_sysc_cache != v)
226 - _write_sysconfig(v, oh);
227 + _write_sysconfig(v, oh);
228
229 /*
230 * Set the autoidle bit only after setting the smartidle bit
231 @@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
232 _set_master_standbymode(oh, idlemode, &v);
233 }
234
235 - _write_sysconfig(v, oh);
236 + /* If the cached value is the same as the new value, skip the write */
237 + if (oh->_sysc_cache != v)
238 + _write_sysconfig(v, oh);
239 }
240
241 /**
242 diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
243 index f998eb1c698e..0cf4426183cf 100644
244 --- a/arch/arm/mach-prima2/Kconfig
245 +++ b/arch/arm/mach-prima2/Kconfig
246 @@ -2,6 +2,7 @@ menuconfig ARCH_SIRF
247 bool "CSR SiRF"
248 depends on ARCH_MULTI_V7
249 select ARCH_HAS_RESET_CONTROLLER
250 + select RESET_CONTROLLER
251 select ARCH_REQUIRE_GPIOLIB
252 select GENERIC_IRQ_CHIP
253 select NO_IOPORT_MAP
254 diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
255 index 8dde19962a5b..f63c96cd3608 100644
256 --- a/arch/powerpc/include/uapi/asm/cputable.h
257 +++ b/arch/powerpc/include/uapi/asm/cputable.h
258 @@ -31,6 +31,7 @@
259 #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
260 0x00000040
261
262 +/* Reserved - do not use 0x00000004 */
263 #define PPC_FEATURE_TRUE_LE 0x00000002
264 #define PPC_FEATURE_PPC_LE 0x00000001
265
266 diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
267 index 7030b035905d..a15fe1d4e84a 100644
268 --- a/arch/powerpc/kernel/prom.c
269 +++ b/arch/powerpc/kernel/prom.c
270 @@ -148,23 +148,25 @@ static struct ibm_pa_feature {
271 unsigned long cpu_features; /* CPU_FTR_xxx bit */
272 unsigned long mmu_features; /* MMU_FTR_xxx bit */
273 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
274 + unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
275 unsigned char pabyte; /* byte number in ibm,pa-features */
276 unsigned char pabit; /* bit number (big-endian) */
277 unsigned char invert; /* if 1, pa bit set => clear feature */
278 } ibm_pa_features[] __initdata = {
279 - {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
280 - {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
281 - {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
282 - {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
283 - {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
284 - {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
285 - {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
286 + {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
287 + {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
288 + {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
289 + {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
290 + {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
291 + {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
292 + {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
293 /*
294 - * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
295 - * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
296 - * which is 0 if the kernel doesn't support TM.
297 + * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
298 + * we don't want to turn on TM here, so we use the *_COMP versions
299 + * which are 0 if the kernel doesn't support TM.
300 */
301 - {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
302 + {CPU_FTR_TM_COMP, 0, 0,
303 + PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
304 };
305
306 static void __init scan_features(unsigned long node, const unsigned char *ftrs,
307 @@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
308 if (bit ^ fp->invert) {
309 cur_cpu_spec->cpu_features |= fp->cpu_features;
310 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
311 + cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
312 cur_cpu_spec->mmu_features |= fp->mmu_features;
313 } else {
314 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
315 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
316 + cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
317 cur_cpu_spec->mmu_features &= ~fp->mmu_features;
318 }
319 }
320 diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
321 index 2b2ced9dc00a..6dafabb6ae1a 100644
322 --- a/arch/s390/include/asm/pci.h
323 +++ b/arch/s390/include/asm/pci.h
324 @@ -45,7 +45,8 @@ struct zpci_fmb {
325 u64 rpcit_ops;
326 u64 dma_rbytes;
327 u64 dma_wbytes;
328 -} __packed __aligned(64);
329 + u64 pad[2];
330 +} __packed __aligned(128);
331
332 enum zpci_state {
333 ZPCI_FN_STATE_RESERVED,
334 diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
335 index a841e9765bd6..8381c09d2870 100644
336 --- a/arch/x86/crypto/sha-mb/sha1_mb.c
337 +++ b/arch/x86/crypto/sha-mb/sha1_mb.c
338 @@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
339
340 req = cast_mcryptd_ctx_to_req(req_ctx);
341 if (irqs_disabled())
342 - rctx->complete(&req->base, ret);
343 + req_ctx->complete(&req->base, ret);
344 else {
345 local_bh_disable();
346 - rctx->complete(&req->base, ret);
347 + req_ctx->complete(&req->base, ret);
348 local_bh_enable();
349 }
350 }
351 diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
352 index f8a29d2c97b0..e6a8613fbfb0 100644
353 --- a/arch/x86/include/asm/hugetlb.h
354 +++ b/arch/x86/include/asm/hugetlb.h
355 @@ -4,6 +4,7 @@
356 #include <asm/page.h>
357 #include <asm-generic/hugetlb.h>
358
359 +#define hugepages_supported() cpu_has_pse
360
361 static inline int is_hugepage_only_range(struct mm_struct *mm,
362 unsigned long addr,
363 diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
364 index ad59d70bcb1a..ef495511f019 100644
365 --- a/arch/x86/kernel/apic/vector.c
366 +++ b/arch/x86/kernel/apic/vector.c
367 @@ -256,7 +256,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
368 struct irq_desc *desc;
369 int cpu, vector;
370
371 - BUG_ON(!data->cfg.vector);
372 + if (!data->cfg.vector)
373 + return;
374
375 vector = data->cfg.vector;
376 for_each_cpu_and(cpu, data->domain, cpu_online_mask)
377 diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
378 index 0a850100c594..2658e2af74ec 100644
379 --- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
380 +++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
381 @@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
382 void mce_gen_pool_process(void)
383 {
384 struct llist_node *head;
385 - struct mce_evt_llist *node;
386 + struct mce_evt_llist *node, *tmp;
387 struct mce *mce;
388
389 head = llist_del_all(&mce_event_llist);
390 @@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
391 return;
392
393 head = llist_reverse_order(head);
394 - llist_for_each_entry(node, head, llnode) {
395 + llist_for_each_entry_safe(node, tmp, head, llnode) {
396 mce = &node->mce;
397 atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
398 gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
399 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
400 index eca5bd9f0e47..ac4963c38aa3 100644
401 --- a/arch/x86/kvm/x86.c
402 +++ b/arch/x86/kvm/x86.c
403 @@ -697,7 +697,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
404 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
405 return 1;
406 }
407 - kvm_put_guest_xcr0(vcpu);
408 vcpu->arch.xcr0 = xcr0;
409
410 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
411 @@ -6569,8 +6568,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
412 kvm_x86_ops->prepare_guest_switch(vcpu);
413 if (vcpu->fpu_active)
414 kvm_load_guest_fpu(vcpu);
415 - kvm_load_guest_xcr0(vcpu);
416 -
417 vcpu->mode = IN_GUEST_MODE;
418
419 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
420 @@ -6593,6 +6590,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
421 goto cancel_injection;
422 }
423
424 + kvm_load_guest_xcr0(vcpu);
425 +
426 if (req_immediate_exit)
427 smp_send_reschedule(vcpu->cpu);
428
429 @@ -6642,6 +6641,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
430 vcpu->mode = OUTSIDE_GUEST_MODE;
431 smp_wmb();
432
433 + kvm_put_guest_xcr0(vcpu);
434 +
435 /* Interrupt is enabled by handle_external_intr() */
436 kvm_x86_ops->handle_external_intr(vcpu);
437
438 @@ -7289,7 +7290,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
439 * and assume host would use all available bits.
440 * Guest xcr0 would be loaded later.
441 */
442 - kvm_put_guest_xcr0(vcpu);
443 vcpu->guest_fpu_loaded = 1;
444 __kernel_fpu_begin();
445 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
446 @@ -7298,8 +7298,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
447
448 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
449 {
450 - kvm_put_guest_xcr0(vcpu);
451 -
452 if (!vcpu->guest_fpu_loaded) {
453 vcpu->fpu_counter = 0;
454 return;
455 diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
456 index 637ab34ed632..ddb2244b06a1 100644
457 --- a/arch/x86/mm/kmmio.c
458 +++ b/arch/x86/mm/kmmio.c
459 @@ -33,7 +33,7 @@
460 struct kmmio_fault_page {
461 struct list_head list;
462 struct kmmio_fault_page *release_next;
463 - unsigned long page; /* location of the fault page */
464 + unsigned long addr; /* the requested address */
465 pteval_t old_presence; /* page presence prior to arming */
466 bool armed;
467
468 @@ -70,9 +70,16 @@ unsigned int kmmio_count;
469 static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
470 static LIST_HEAD(kmmio_probes);
471
472 -static struct list_head *kmmio_page_list(unsigned long page)
473 +static struct list_head *kmmio_page_list(unsigned long addr)
474 {
475 - return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
476 + unsigned int l;
477 + pte_t *pte = lookup_address(addr, &l);
478 +
479 + if (!pte)
480 + return NULL;
481 + addr &= page_level_mask(l);
482 +
483 + return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
484 }
485
486 /* Accessed per-cpu */
487 @@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
488 }
489
490 /* You must be holding RCU read lock. */
491 -static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
492 +static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
493 {
494 struct list_head *head;
495 struct kmmio_fault_page *f;
496 + unsigned int l;
497 + pte_t *pte = lookup_address(addr, &l);
498
499 - page &= PAGE_MASK;
500 - head = kmmio_page_list(page);
501 + if (!pte)
502 + return NULL;
503 + addr &= page_level_mask(l);
504 + head = kmmio_page_list(addr);
505 list_for_each_entry_rcu(f, head, list) {
506 - if (f->page == page)
507 + if (f->addr == addr)
508 return f;
509 }
510 return NULL;
511 @@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
512 static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
513 {
514 unsigned int level;
515 - pte_t *pte = lookup_address(f->page, &level);
516 + pte_t *pte = lookup_address(f->addr, &level);
517
518 if (!pte) {
519 - pr_err("no pte for page 0x%08lx\n", f->page);
520 + pr_err("no pte for addr 0x%08lx\n", f->addr);
521 return -1;
522 }
523
524 @@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
525 return -1;
526 }
527
528 - __flush_tlb_one(f->page);
529 + __flush_tlb_one(f->addr);
530 return 0;
531 }
532
533 @@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
534 int ret;
535 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
536 if (f->armed) {
537 - pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
538 - f->page, f->count, !!f->old_presence);
539 + pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
540 + f->addr, f->count, !!f->old_presence);
541 }
542 ret = clear_page_presence(f, true);
543 - WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
544 - f->page);
545 + WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
546 + f->addr);
547 f->armed = true;
548 return ret;
549 }
550 @@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
551 {
552 int ret = clear_page_presence(f, false);
553 WARN_ONCE(ret < 0,
554 - KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
555 + KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
556 f->armed = false;
557 }
558
559 @@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
560 struct kmmio_context *ctx;
561 struct kmmio_fault_page *faultpage;
562 int ret = 0; /* default to fault not handled */
563 + unsigned long page_base = addr;
564 + unsigned int l;
565 + pte_t *pte = lookup_address(addr, &l);
566 + if (!pte)
567 + return -EINVAL;
568 + page_base &= page_level_mask(l);
569
570 /*
571 * Preemption is now disabled to prevent process switch during
572 @@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
573 preempt_disable();
574 rcu_read_lock();
575
576 - faultpage = get_kmmio_fault_page(addr);
577 + faultpage = get_kmmio_fault_page(page_base);
578 if (!faultpage) {
579 /*
580 * Either this page fault is not caused by kmmio, or
581 @@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
582
583 ctx = &get_cpu_var(kmmio_ctx);
584 if (ctx->active) {
585 - if (addr == ctx->addr) {
586 + if (page_base == ctx->addr) {
587 /*
588 * A second fault on the same page means some other
589 * condition needs handling by do_page_fault(), the
590 @@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
591 ctx->active++;
592
593 ctx->fpage = faultpage;
594 - ctx->probe = get_kmmio_probe(addr);
595 + ctx->probe = get_kmmio_probe(page_base);
596 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
597 - ctx->addr = addr;
598 + ctx->addr = page_base;
599
600 if (ctx->probe && ctx->probe->pre_handler)
601 ctx->probe->pre_handler(ctx->probe, regs, addr);
602 @@ -354,12 +371,11 @@ out:
603 }
604
605 /* You must be holding kmmio_lock. */
606 -static int add_kmmio_fault_page(unsigned long page)
607 +static int add_kmmio_fault_page(unsigned long addr)
608 {
609 struct kmmio_fault_page *f;
610
611 - page &= PAGE_MASK;
612 - f = get_kmmio_fault_page(page);
613 + f = get_kmmio_fault_page(addr);
614 if (f) {
615 if (!f->count)
616 arm_kmmio_fault_page(f);
617 @@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
618 return -1;
619
620 f->count = 1;
621 - f->page = page;
622 + f->addr = addr;
623
624 if (arm_kmmio_fault_page(f)) {
625 kfree(f);
626 return -1;
627 }
628
629 - list_add_rcu(&f->list, kmmio_page_list(f->page));
630 + list_add_rcu(&f->list, kmmio_page_list(f->addr));
631
632 return 0;
633 }
634
635 /* You must be holding kmmio_lock. */
636 -static void release_kmmio_fault_page(unsigned long page,
637 +static void release_kmmio_fault_page(unsigned long addr,
638 struct kmmio_fault_page **release_list)
639 {
640 struct kmmio_fault_page *f;
641
642 - page &= PAGE_MASK;
643 - f = get_kmmio_fault_page(page);
644 + f = get_kmmio_fault_page(addr);
645 if (!f)
646 return;
647
648 @@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
649 int ret = 0;
650 unsigned long size = 0;
651 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
652 + unsigned int l;
653 + pte_t *pte;
654
655 spin_lock_irqsave(&kmmio_lock, flags);
656 if (get_kmmio_probe(p->addr)) {
657 ret = -EEXIST;
658 goto out;
659 }
660 +
661 + pte = lookup_address(p->addr, &l);
662 + if (!pte) {
663 + ret = -EINVAL;
664 + goto out;
665 + }
666 +
667 kmmio_count++;
668 list_add_rcu(&p->list, &kmmio_probes);
669 while (size < size_lim) {
670 if (add_kmmio_fault_page(p->addr + size))
671 pr_err("Unable to set page fault.\n");
672 - size += PAGE_SIZE;
673 + size += page_level_size(l);
674 }
675 out:
676 spin_unlock_irqrestore(&kmmio_lock, flags);
677 @@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
678 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
679 struct kmmio_fault_page *release_list = NULL;
680 struct kmmio_delayed_release *drelease;
681 + unsigned int l;
682 + pte_t *pte;
683 +
684 + pte = lookup_address(p->addr, &l);
685 + if (!pte)
686 + return;
687
688 spin_lock_irqsave(&kmmio_lock, flags);
689 while (size < size_lim) {
690 release_kmmio_fault_page(p->addr + size, &release_list);
691 - size += PAGE_SIZE;
692 + size += page_level_size(l);
693 }
694 list_del_rcu(&p->list);
695 kmmio_count--;
696 diff --git a/block/partition-generic.c b/block/partition-generic.c
697 index fefd01b496a0..cfcfe1b0ecbc 100644
698 --- a/block/partition-generic.c
699 +++ b/block/partition-generic.c
700 @@ -350,15 +350,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
701 goto out_del;
702 }
703
704 + err = hd_ref_init(p);
705 + if (err) {
706 + if (flags & ADDPART_FLAG_WHOLEDISK)
707 + goto out_remove_file;
708 + goto out_del;
709 + }
710 +
711 /* everything is up and running, commence */
712 rcu_assign_pointer(ptbl->part[partno], p);
713
714 /* suppress uevent if the disk suppresses it */
715 if (!dev_get_uevent_suppress(ddev))
716 kobject_uevent(&pdev->kobj, KOBJ_ADD);
717 -
718 - if (!hd_ref_init(p))
719 - return p;
720 + return p;
721
722 out_free_info:
723 free_part_info(p);
724 @@ -367,6 +372,8 @@ out_free_stats:
725 out_free:
726 kfree(p);
727 return ERR_PTR(err);
728 +out_remove_file:
729 + device_remove_file(pdev, &dev_attr_whole_disk);
730 out_del:
731 kobject_put(p->holder_dir);
732 device_del(pdev);
733 diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
734 index 50f5c97e1087..0cbc5a5025c2 100644
735 --- a/crypto/rsa-pkcs1pad.c
736 +++ b/crypto/rsa-pkcs1pad.c
737 @@ -310,16 +310,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
738 req_ctx->child_req.src = req->src;
739 req_ctx->child_req.src_len = req->src_len;
740 req_ctx->child_req.dst = req_ctx->out_sg;
741 - req_ctx->child_req.dst_len = ctx->key_size - 1;
742 + req_ctx->child_req.dst_len = ctx->key_size ;
743
744 - req_ctx->out_buf = kmalloc(ctx->key_size - 1,
745 + req_ctx->out_buf = kmalloc(ctx->key_size,
746 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
747 GFP_KERNEL : GFP_ATOMIC);
748 if (!req_ctx->out_buf)
749 return -ENOMEM;
750
751 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
752 - ctx->key_size - 1, NULL);
753 + ctx->key_size, NULL);
754
755 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
756 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
757 @@ -491,16 +491,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
758 req_ctx->child_req.src = req->src;
759 req_ctx->child_req.src_len = req->src_len;
760 req_ctx->child_req.dst = req_ctx->out_sg;
761 - req_ctx->child_req.dst_len = ctx->key_size - 1;
762 + req_ctx->child_req.dst_len = ctx->key_size;
763
764 - req_ctx->out_buf = kmalloc(ctx->key_size - 1,
765 + req_ctx->out_buf = kmalloc(ctx->key_size,
766 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
767 GFP_KERNEL : GFP_ATOMIC);
768 if (!req_ctx->out_buf)
769 return -ENOMEM;
770
771 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
772 - ctx->key_size - 1, NULL);
773 + ctx->key_size, NULL);
774
775 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
776 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
777 diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
778 index bd75d46234a4..ddb436f86415 100644
779 --- a/drivers/acpi/acpica/nsinit.c
780 +++ b/drivers/acpi/acpica/nsinit.c
781 @@ -83,6 +83,8 @@ acpi_status acpi_ns_initialize_objects(void)
782
783 ACPI_FUNCTION_TRACE(ns_initialize_objects);
784
785 + ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
786 + "[Init] Completing Initialization of ACPI Objects\n"));
787 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
788 "**** Starting initialization of namespace objects ****\n"));
789 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
790 diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
791 index 278666e39563..c37d47982fbe 100644
792 --- a/drivers/acpi/acpica/tbxfload.c
793 +++ b/drivers/acpi/acpica/tbxfload.c
794 @@ -83,6 +83,20 @@ acpi_status __init acpi_load_tables(void)
795 "While loading namespace from ACPI tables"));
796 }
797
798 + if (!acpi_gbl_group_module_level_code) {
799 + /*
800 + * Initialize the objects that remain uninitialized. This
801 + * runs the executable AML that may be part of the
802 + * declaration of these objects:
803 + * operation_regions, buffer_fields, Buffers, and Packages.
804 + */
805 + status = acpi_ns_initialize_objects();
806 + if (ACPI_FAILURE(status)) {
807 + return_ACPI_STATUS(status);
808 + }
809 + }
810 +
811 + acpi_gbl_reg_methods_enabled = TRUE;
812 return_ACPI_STATUS(status);
813 }
814
815 diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
816 index 721b87cce908..638fbd4ad72b 100644
817 --- a/drivers/acpi/acpica/utxfinit.c
818 +++ b/drivers/acpi/acpica/utxfinit.c
819 @@ -267,7 +267,6 @@ acpi_status __init acpi_initialize_objects(u32 flags)
820 * initialized, even if they contain executable AML (see the call to
821 * acpi_ns_initialize_objects below).
822 */
823 - acpi_gbl_reg_methods_enabled = TRUE;
824 if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
825 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
826 "[Init] Executing _REG OpRegion methods\n"));
827 @@ -299,20 +298,18 @@ acpi_status __init acpi_initialize_objects(u32 flags)
828 */
829 if (acpi_gbl_group_module_level_code) {
830 acpi_ns_exec_module_code_list();
831 - }
832
833 - /*
834 - * Initialize the objects that remain uninitialized. This runs the
835 - * executable AML that may be part of the declaration of these objects:
836 - * operation_regions, buffer_fields, Buffers, and Packages.
837 - */
838 - if (!(flags & ACPI_NO_OBJECT_INIT)) {
839 - ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
840 - "[Init] Completing Initialization of ACPI Objects\n"));
841 -
842 - status = acpi_ns_initialize_objects();
843 - if (ACPI_FAILURE(status)) {
844 - return_ACPI_STATUS(status);
845 + /*
846 + * Initialize the objects that remain uninitialized. This
847 + * runs the executable AML that may be part of the
848 + * declaration of these objects:
849 + * operation_regions, buffer_fields, Buffers, and Packages.
850 + */
851 + if (!(flags & ACPI_NO_OBJECT_INIT)) {
852 + status = acpi_ns_initialize_objects();
853 + if (ACPI_FAILURE(status)) {
854 + return_ACPI_STATUS(status);
855 + }
856 }
857 }
858
859 diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
860 index 301b785f9f56..0caf92ae25f3 100644
861 --- a/drivers/base/power/domain.c
862 +++ b/drivers/base/power/domain.c
863 @@ -1378,7 +1378,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
864 mutex_lock(&subdomain->lock);
865 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
866
867 - if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
868 + if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
869 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
870 subdomain->name);
871 ret = -EBUSY;
872 diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
873 index cf351d3dab1c..0708f301ad97 100644
874 --- a/drivers/base/power/opp/core.c
875 +++ b/drivers/base/power/opp/core.c
876 @@ -844,8 +844,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
877 }
878
879 opp->u_volt = microvolt[0];
880 - opp->u_volt_min = microvolt[1];
881 - opp->u_volt_max = microvolt[2];
882 +
883 + if (count == 1) {
884 + opp->u_volt_min = opp->u_volt;
885 + opp->u_volt_max = opp->u_volt;
886 + } else {
887 + opp->u_volt_min = microvolt[1];
888 + opp->u_volt_max = microvolt[2];
889 + }
890
891 /* Search for "opp-microamp-<name>" */
892 prop = NULL;
893 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
894 index 423f4ca7d712..80cf8add46ff 100644
895 --- a/drivers/block/loop.c
896 +++ b/drivers/block/loop.c
897 @@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
898 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
899 iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
900 bio_segments(bio), blk_rq_bytes(cmd->rq));
901 + /*
902 + * This bio may be started from the middle of the 'bvec'
903 + * because of bio splitting, so offset from the bvec must
904 + * be passed to iov iterator
905 + */
906 + iter.iov_offset = bio->bi_iter.bi_bvec_done;
907
908 cmd->iocb.ki_pos = pos;
909 cmd->iocb.ki_filp = file;
910 diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
911 index 562b5a4ca7b7..78a39f736c64 100644
912 --- a/drivers/block/paride/pd.c
913 +++ b/drivers/block/paride/pd.c
914 @@ -126,7 +126,7 @@
915 */
916 #include <linux/types.h>
917
918 -static bool verbose = 0;
919 +static int verbose = 0;
920 static int major = PD_MAJOR;
921 static char *name = PD_NAME;
922 static int cluster = 64;
923 @@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
924 static DEFINE_MUTEX(pd_mutex);
925 static DEFINE_SPINLOCK(pd_lock);
926
927 -module_param(verbose, bool, 0);
928 +module_param(verbose, int, 0);
929 module_param(major, int, 0);
930 module_param(name, charp, 0);
931 module_param(cluster, int, 0);
932 diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
933 index 1740d75e8a32..216a94fed5b4 100644
934 --- a/drivers/block/paride/pt.c
935 +++ b/drivers/block/paride/pt.c
936 @@ -117,7 +117,7 @@
937
938 */
939
940 -static bool verbose = 0;
941 +static int verbose = 0;
942 static int major = PT_MAJOR;
943 static char *name = PT_NAME;
944 static int disable = 0;
945 @@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
946
947 #include <asm/uaccess.h>
948
949 -module_param(verbose, bool, 0);
950 +module_param(verbose, int, 0);
951 module_param(major, int, 0);
952 module_param(name, charp, 0);
953 module_param_array(drive0, int, NULL, 0);
954 diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
955 index e98d15eaa799..1827fc4d15c1 100644
956 --- a/drivers/bus/imx-weim.c
957 +++ b/drivers/bus/imx-weim.c
958 @@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
959 return ret;
960 }
961
962 - for_each_child_of_node(pdev->dev.of_node, child) {
963 + for_each_available_child_of_node(pdev->dev.of_node, child) {
964 if (!child->name)
965 continue;
966
967 diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
968 index 834a2aeaf27a..350b7309c26d 100644
969 --- a/drivers/bus/uniphier-system-bus.c
970 +++ b/drivers/bus/uniphier-system-bus.c
971 @@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
972
973 for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
974 for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
975 - if (priv->bank[i].end > priv->bank[j].base ||
976 + if (priv->bank[i].end > priv->bank[j].base &&
977 priv->bank[i].base < priv->bank[j].end) {
978 dev_err(priv->dev,
979 "region overlap between bank%d and bank%d\n",
980 diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
981 index 45a634016f95..b28e4da3d2cf 100644
982 --- a/drivers/char/tpm/tpm2-cmd.c
983 +++ b/drivers/char/tpm/tpm2-cmd.c
984 @@ -20,7 +20,11 @@
985 #include <keys/trusted-type.h>
986
987 enum tpm2_object_attributes {
988 - TPM2_ATTR_USER_WITH_AUTH = BIT(6),
989 + TPM2_OA_USER_WITH_AUTH = BIT(6),
990 +};
991 +
992 +enum tpm2_session_attributes {
993 + TPM2_SA_CONTINUE_SESSION = BIT(0),
994 };
995
996 struct tpm2_startup_in {
997 @@ -478,22 +482,18 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
998 tpm_buf_append_u8(&buf, payload->migratable);
999
1000 /* public */
1001 - if (options->policydigest)
1002 - tpm_buf_append_u16(&buf, 14 + options->digest_len);
1003 - else
1004 - tpm_buf_append_u16(&buf, 14);
1005 -
1006 + tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
1007 tpm_buf_append_u16(&buf, TPM2_ALG_KEYEDHASH);
1008 tpm_buf_append_u16(&buf, hash);
1009
1010 /* policy */
1011 - if (options->policydigest) {
1012 + if (options->policydigest_len) {
1013 tpm_buf_append_u32(&buf, 0);
1014 - tpm_buf_append_u16(&buf, options->digest_len);
1015 + tpm_buf_append_u16(&buf, options->policydigest_len);
1016 tpm_buf_append(&buf, options->policydigest,
1017 - options->digest_len);
1018 + options->policydigest_len);
1019 } else {
1020 - tpm_buf_append_u32(&buf, TPM2_ATTR_USER_WITH_AUTH);
1021 + tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
1022 tpm_buf_append_u16(&buf, 0);
1023 }
1024
1025 @@ -631,7 +631,7 @@ static int tpm2_unseal(struct tpm_chip *chip,
1026 options->policyhandle ?
1027 options->policyhandle : TPM2_RS_PW,
1028 NULL /* nonce */, 0,
1029 - 0 /* session_attributes */,
1030 + TPM2_SA_CONTINUE_SESSION,
1031 options->blobauth /* hmac */,
1032 TPM_DIGEST_SIZE);
1033
1034 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
1035 index cd83d477e32d..e89512383c3c 100644
1036 --- a/drivers/cpufreq/intel_pstate.c
1037 +++ b/drivers/cpufreq/intel_pstate.c
1038 @@ -673,6 +673,11 @@ static int core_get_max_pstate(void)
1039 if (err)
1040 goto skip_tar;
1041
1042 + /* For level 1 and 2, bits[23:16] contain the ratio */
1043 + if (tdp_ctrl)
1044 + tdp_ratio >>= 16;
1045 +
1046 + tdp_ratio &= 0xff; /* ratios are only 8 bits long */
1047 if (tdp_ratio - 1 == tar) {
1048 max_pstate = tar;
1049 pr_debug("max_pstate=TAC %x\n", max_pstate);
1050 diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
1051 index 3d9acc53d247..60fc0fa26fd3 100644
1052 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
1053 +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
1054 @@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
1055 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
1056 struct ccp_aes_cmac_exp_ctx state;
1057
1058 + /* Don't let anything leak to 'out' */
1059 + memset(&state, 0, sizeof(state));
1060 +
1061 state.null_msg = rctx->null_msg;
1062 memcpy(state.iv, rctx->iv, sizeof(state.iv));
1063 state.buf_count = rctx->buf_count;
1064 diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
1065 index 8ef06fad8b14..ab9945f2cb7a 100644
1066 --- a/drivers/crypto/ccp/ccp-crypto-sha.c
1067 +++ b/drivers/crypto/ccp/ccp-crypto-sha.c
1068 @@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
1069 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
1070 struct ccp_sha_exp_ctx state;
1071
1072 + /* Don't let anything leak to 'out' */
1073 + memset(&state, 0, sizeof(state));
1074 +
1075 state.type = rctx->type;
1076 state.msg_bits = rctx->msg_bits;
1077 state.first = rctx->first;
1078 diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
1079 index a0d4a08313ae..aae05547b924 100644
1080 --- a/drivers/crypto/talitos.c
1081 +++ b/drivers/crypto/talitos.c
1082 @@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
1083 ptr->eptr = upper_32_bits(dma_addr);
1084 }
1085
1086 +static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
1087 + struct talitos_ptr *src_ptr, bool is_sec1)
1088 +{
1089 + dst_ptr->ptr = src_ptr->ptr;
1090 + if (!is_sec1)
1091 + dst_ptr->eptr = src_ptr->eptr;
1092 +}
1093 +
1094 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
1095 bool is_sec1)
1096 {
1097 @@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1098 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1099 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1100 : DMA_TO_DEVICE);
1101 -
1102 /* hmac data */
1103 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1104 if (sg_count > 1 &&
1105 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1106 areq->assoclen,
1107 &edesc->link_tbl[tbl_off])) > 1) {
1108 - tbl_off += ret;
1109 -
1110 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1111 sizeof(struct talitos_ptr), 0);
1112 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1113
1114 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1115 edesc->dma_len, DMA_BIDIRECTIONAL);
1116 +
1117 + tbl_off += ret;
1118 } else {
1119 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1120 desc->ptr[1].j_extent = 0;
1121 @@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1122 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1123 sg_link_tbl_len += authsize;
1124
1125 - if (sg_count > 1 &&
1126 - (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
1127 - sg_link_tbl_len,
1128 - &edesc->link_tbl[tbl_off])) > 1) {
1129 - tbl_off += ret;
1130 + if (sg_count == 1) {
1131 + to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1132 + areq->assoclen, 0);
1133 + } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1134 + areq->assoclen, sg_link_tbl_len,
1135 + &edesc->link_tbl[tbl_off])) >
1136 + 1) {
1137 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1138 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1139 tbl_off *
1140 @@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1141 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1142 edesc->dma_len,
1143 DMA_BIDIRECTIONAL);
1144 - } else
1145 - to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
1146 + tbl_off += ret;
1147 + } else {
1148 + copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1149 + }
1150
1151 /* cipher out */
1152 desc->ptr[5].len = cpu_to_be16(cryptlen);
1153 @@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1154
1155 edesc->icv_ool = false;
1156
1157 - if (sg_count > 1 &&
1158 - (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
1159 + if (sg_count == 1) {
1160 + to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1161 + areq->assoclen, 0);
1162 + } else if ((sg_count =
1163 + sg_to_link_tbl_offset(areq->dst, sg_count,
1164 areq->assoclen, cryptlen,
1165 - &edesc->link_tbl[tbl_off])) >
1166 - 1) {
1167 + &edesc->link_tbl[tbl_off])) > 1) {
1168 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1169
1170 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1171 @@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1172 edesc->dma_len, DMA_BIDIRECTIONAL);
1173
1174 edesc->icv_ool = true;
1175 - } else
1176 - to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
1177 + } else {
1178 + copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1179 + }
1180
1181 /* iv out */
1182 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1183 @@ -2629,21 +2643,11 @@ struct talitos_crypto_alg {
1184 struct talitos_alg_template algt;
1185 };
1186
1187 -static int talitos_cra_init(struct crypto_tfm *tfm)
1188 +static int talitos_init_common(struct talitos_ctx *ctx,
1189 + struct talitos_crypto_alg *talitos_alg)
1190 {
1191 - struct crypto_alg *alg = tfm->__crt_alg;
1192 - struct talitos_crypto_alg *talitos_alg;
1193 - struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1194 struct talitos_private *priv;
1195
1196 - if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
1197 - talitos_alg = container_of(__crypto_ahash_alg(alg),
1198 - struct talitos_crypto_alg,
1199 - algt.alg.hash);
1200 - else
1201 - talitos_alg = container_of(alg, struct talitos_crypto_alg,
1202 - algt.alg.crypto);
1203 -
1204 /* update context with ptr to dev */
1205 ctx->dev = talitos_alg->dev;
1206
1207 @@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
1208 return 0;
1209 }
1210
1211 +static int talitos_cra_init(struct crypto_tfm *tfm)
1212 +{
1213 + struct crypto_alg *alg = tfm->__crt_alg;
1214 + struct talitos_crypto_alg *talitos_alg;
1215 + struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1216 +
1217 + if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
1218 + talitos_alg = container_of(__crypto_ahash_alg(alg),
1219 + struct talitos_crypto_alg,
1220 + algt.alg.hash);
1221 + else
1222 + talitos_alg = container_of(alg, struct talitos_crypto_alg,
1223 + algt.alg.crypto);
1224 +
1225 + return talitos_init_common(ctx, talitos_alg);
1226 +}
1227 +
1228 static int talitos_cra_init_aead(struct crypto_aead *tfm)
1229 {
1230 - talitos_cra_init(crypto_aead_tfm(tfm));
1231 - return 0;
1232 + struct aead_alg *alg = crypto_aead_alg(tfm);
1233 + struct talitos_crypto_alg *talitos_alg;
1234 + struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
1235 +
1236 + talitos_alg = container_of(alg, struct talitos_crypto_alg,
1237 + algt.alg.aead);
1238 +
1239 + return talitos_init_common(ctx, talitos_alg);
1240 }
1241
1242 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
1243 diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
1244 index 5ad0ec1f0e29..97199b3c25a2 100644
1245 --- a/drivers/dma/dw/core.c
1246 +++ b/drivers/dma/dw/core.c
1247 @@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
1248 static void dwc_initialize(struct dw_dma_chan *dwc)
1249 {
1250 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1251 - struct dw_dma_slave *dws = dwc->chan.private;
1252 u32 cfghi = DWC_CFGH_FIFO_MODE;
1253 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
1254
1255 if (dwc->initialized == true)
1256 return;
1257
1258 - if (dws) {
1259 - /*
1260 - * We need controller-specific data to set up slave
1261 - * transfers.
1262 - */
1263 - BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
1264 -
1265 - cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
1266 - cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
1267 - } else {
1268 - cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
1269 - cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
1270 - }
1271 + cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
1272 + cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
1273
1274 channel_writel(dwc, CFG_LO, cfglo);
1275 channel_writel(dwc, CFG_HI, cfghi);
1276 @@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
1277 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1278 struct dw_dma_slave *dws = param;
1279
1280 - if (!dws || dws->dma_dev != chan->device->dev)
1281 + if (dws->dma_dev != chan->device->dev)
1282 return false;
1283
1284 /* We have to copy data since dws can be temporary storage */
1285 @@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1286 * doesn't mean what you think it means), and status writeback.
1287 */
1288
1289 + /*
1290 + * We need controller-specific data to set up slave transfers.
1291 + */
1292 + if (chan->private && !dw_dma_filter(chan, chan->private)) {
1293 + dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1294 + return -EINVAL;
1295 + }
1296 +
1297 /* Enable controller here if needed */
1298 if (!dw->in_use)
1299 dw_dma_on(dw);
1300 @@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1301 spin_lock_irqsave(&dwc->lock, flags);
1302 list_splice_init(&dwc->free_list, &list);
1303 dwc->descs_allocated = 0;
1304 +
1305 + /* Clear custom channel configuration */
1306 + dwc->src_id = 0;
1307 + dwc->dst_id = 0;
1308 +
1309 + dwc->src_master = 0;
1310 + dwc->dst_master = 0;
1311 +
1312 dwc->initialized = false;
1313
1314 /* Disable interrupts */
1315 diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
1316 index e3d7fcb69b4c..2dac314a2d7a 100644
1317 --- a/drivers/dma/edma.c
1318 +++ b/drivers/dma/edma.c
1319 @@ -1563,32 +1563,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
1320 return IRQ_HANDLED;
1321 }
1322
1323 -static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
1324 -{
1325 - struct platform_device *tc_pdev;
1326 - int ret;
1327 -
1328 - if (!IS_ENABLED(CONFIG_OF) || !tc)
1329 - return;
1330 -
1331 - tc_pdev = of_find_device_by_node(tc->node);
1332 - if (!tc_pdev) {
1333 - pr_err("%s: TPTC device is not found\n", __func__);
1334 - return;
1335 - }
1336 - if (!pm_runtime_enabled(&tc_pdev->dev))
1337 - pm_runtime_enable(&tc_pdev->dev);
1338 -
1339 - if (enable)
1340 - ret = pm_runtime_get_sync(&tc_pdev->dev);
1341 - else
1342 - ret = pm_runtime_put_sync(&tc_pdev->dev);
1343 -
1344 - if (ret < 0)
1345 - pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
1346 - enable ? "get" : "put", dev_name(&tc_pdev->dev));
1347 -}
1348 -
1349 /* Alloc channel resources */
1350 static int edma_alloc_chan_resources(struct dma_chan *chan)
1351 {
1352 @@ -1625,8 +1599,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
1353 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
1354 echan->hw_triggered ? "HW" : "SW");
1355
1356 - edma_tc_set_pm_state(echan->tc, true);
1357 -
1358 return 0;
1359
1360 err_slot:
1361 @@ -1663,7 +1635,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
1362 echan->alloced = false;
1363 }
1364
1365 - edma_tc_set_pm_state(echan->tc, false);
1366 echan->tc = NULL;
1367 echan->hw_triggered = false;
1368
1369 @@ -2408,10 +2379,8 @@ static int edma_pm_suspend(struct device *dev)
1370 int i;
1371
1372 for (i = 0; i < ecc->num_channels; i++) {
1373 - if (echan[i].alloced) {
1374 + if (echan[i].alloced)
1375 edma_setup_interrupt(&echan[i], false);
1376 - edma_tc_set_pm_state(echan[i].tc, false);
1377 - }
1378 }
1379
1380 return 0;
1381 @@ -2441,8 +2410,6 @@ static int edma_pm_resume(struct device *dev)
1382
1383 /* Set up channel -> slot mapping for the entry slot */
1384 edma_set_chmap(&echan[i], echan[i].slot[0]);
1385 -
1386 - edma_tc_set_pm_state(echan[i].tc, true);
1387 }
1388 }
1389
1390 @@ -2466,7 +2433,8 @@ static struct platform_driver edma_driver = {
1391
1392 static int edma_tptc_probe(struct platform_device *pdev)
1393 {
1394 - return 0;
1395 + pm_runtime_enable(&pdev->dev);
1396 + return pm_runtime_get_sync(&pdev->dev);
1397 }
1398
1399 static struct platform_driver edma_tptc_driver = {
1400 diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
1401 index eef145edb936..025d375fc3d7 100644
1402 --- a/drivers/dma/hsu/hsu.c
1403 +++ b/drivers/dma/hsu/hsu.c
1404 @@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
1405 sr = hsu_chan_readl(hsuc, HSU_CH_SR);
1406 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
1407
1408 - return sr;
1409 + return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
1410 }
1411
1412 irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
1413 @@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
1414 static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
1415 {
1416 struct hsu_dma_desc *desc = hsuc->desc;
1417 - size_t bytes = desc->length;
1418 + size_t bytes = 0;
1419 int i;
1420
1421 - i = desc->active % HSU_DMA_CHAN_NR_DESC;
1422 + for (i = desc->active; i < desc->nents; i++)
1423 + bytes += desc->sg[i].len;
1424 +
1425 + i = HSU_DMA_CHAN_NR_DESC - 1;
1426 do {
1427 bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
1428 } while (--i >= 0);
1429 diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
1430 index 578a8ee8cd05..6b070c22b1df 100644
1431 --- a/drivers/dma/hsu/hsu.h
1432 +++ b/drivers/dma/hsu/hsu.h
1433 @@ -41,6 +41,9 @@
1434 #define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
1435 #define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
1436 #define HSU_CH_SR_CHE BIT(15)
1437 +#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
1438 +#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
1439 +#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
1440
1441 /* Bits in HSU_CH_CR */
1442 #define HSU_CH_CR_CHA BIT(0)
1443 diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
1444 index 9794b073d7d7..a5ed9407c51b 100644
1445 --- a/drivers/dma/omap-dma.c
1446 +++ b/drivers/dma/omap-dma.c
1447 @@ -48,6 +48,7 @@ struct omap_chan {
1448 unsigned dma_sig;
1449 bool cyclic;
1450 bool paused;
1451 + bool running;
1452
1453 int dma_ch;
1454 struct omap_desc *desc;
1455 @@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
1456
1457 /* Enable channel */
1458 omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
1459 +
1460 + c->running = true;
1461 }
1462
1463 static void omap_dma_stop(struct omap_chan *c)
1464 @@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
1465
1466 omap_dma_chan_write(c, CLNK_CTRL, val);
1467 }
1468 +
1469 + c->running = false;
1470 }
1471
1472 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
1473 @@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
1474 struct omap_chan *c = to_omap_dma_chan(chan);
1475 struct virt_dma_desc *vd;
1476 enum dma_status ret;
1477 - uint32_t ccr;
1478 unsigned long flags;
1479
1480 - ccr = omap_dma_chan_read(c, CCR);
1481 - /* The channel is no longer active, handle the completion right away */
1482 - if (!(ccr & CCR_ENABLE))
1483 - omap_dma_callback(c->dma_ch, 0, c);
1484 -
1485 ret = dma_cookie_status(chan, cookie, txstate);
1486 +
1487 + if (!c->paused && c->running) {
1488 + uint32_t ccr = omap_dma_chan_read(c, CCR);
1489 + /*
1490 + * The channel is no longer active, set the return value
1491 + * accordingly
1492 + */
1493 + if (!(ccr & CCR_ENABLE))
1494 + ret = DMA_COMPLETE;
1495 + }
1496 +
1497 if (ret == DMA_COMPLETE || !txstate)
1498 return ret;
1499
1500 diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
1501 index debca824bed6..77c1c44009d8 100644
1502 --- a/drivers/dma/pxa_dma.c
1503 +++ b/drivers/dma/pxa_dma.c
1504 @@ -122,6 +122,7 @@ struct pxad_chan {
1505 struct pxad_device {
1506 struct dma_device slave;
1507 int nr_chans;
1508 + int nr_requestors;
1509 void __iomem *base;
1510 struct pxad_phy *phys;
1511 spinlock_t phy_lock; /* Phy association */
1512 @@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
1513 return;
1514
1515 /* clear the channel mapping in DRCMR */
1516 - if (chan->drcmr <= DRCMR_CHLNUM) {
1517 + if (chan->drcmr <= pdev->nr_requestors) {
1518 reg = pxad_drcmr(chan->drcmr);
1519 writel_relaxed(0, chan->phy->base + reg);
1520 }
1521 @@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
1522
1523 static void phy_enable(struct pxad_phy *phy, bool misaligned)
1524 {
1525 + struct pxad_device *pdev;
1526 u32 reg, dalgn;
1527
1528 if (!phy->vchan)
1529 @@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
1530 "%s(); phy=%p(%d) misaligned=%d\n", __func__,
1531 phy, phy->idx, misaligned);
1532
1533 - if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
1534 + pdev = to_pxad_dev(phy->vchan->vc.chan.device);
1535 + if (phy->vchan->drcmr <= pdev->nr_requestors) {
1536 reg = pxad_drcmr(phy->vchan->drcmr);
1537 writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
1538 }
1539 @@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan,
1540 {
1541 u32 maxburst = 0, dev_addr = 0;
1542 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
1543 + struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
1544
1545 *dcmd = 0;
1546 if (dir == DMA_DEV_TO_MEM) {
1547 @@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan,
1548 dev_addr = chan->cfg.src_addr;
1549 *dev_src = dev_addr;
1550 *dcmd |= PXA_DCMD_INCTRGADDR;
1551 - if (chan->drcmr <= DRCMR_CHLNUM)
1552 + if (chan->drcmr <= pdev->nr_requestors)
1553 *dcmd |= PXA_DCMD_FLOWSRC;
1554 }
1555 if (dir == DMA_MEM_TO_DEV) {
1556 @@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan,
1557 dev_addr = chan->cfg.dst_addr;
1558 *dev_dst = dev_addr;
1559 *dcmd |= PXA_DCMD_INCSRCADDR;
1560 - if (chan->drcmr <= DRCMR_CHLNUM)
1561 + if (chan->drcmr <= pdev->nr_requestors)
1562 *dcmd |= PXA_DCMD_FLOWTRG;
1563 }
1564 if (dir == DMA_MEM_TO_MEM)
1565 @@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
1566
1567 static int pxad_init_dmadev(struct platform_device *op,
1568 struct pxad_device *pdev,
1569 - unsigned int nr_phy_chans)
1570 + unsigned int nr_phy_chans,
1571 + unsigned int nr_requestors)
1572 {
1573 int ret;
1574 unsigned int i;
1575 struct pxad_chan *c;
1576
1577 pdev->nr_chans = nr_phy_chans;
1578 + pdev->nr_requestors = nr_requestors;
1579 INIT_LIST_HEAD(&pdev->slave.channels);
1580 pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
1581 pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
1582 @@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op)
1583 const struct of_device_id *of_id;
1584 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1585 struct resource *iores;
1586 - int ret, dma_channels = 0;
1587 + int ret, dma_channels = 0, nb_requestors = 0;
1588 const enum dma_slave_buswidth widths =
1589 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1590 DMA_SLAVE_BUSWIDTH_4_BYTES;
1591 @@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op)
1592 return PTR_ERR(pdev->base);
1593
1594 of_id = of_match_device(pxad_dt_ids, &op->dev);
1595 - if (of_id)
1596 + if (of_id) {
1597 of_property_read_u32(op->dev.of_node, "#dma-channels",
1598 &dma_channels);
1599 - else if (pdata && pdata->dma_channels)
1600 + ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
1601 + &nb_requestors);
1602 + if (ret) {
1603 + dev_warn(pdev->slave.dev,
1604 + "#dma-requests set to default 32 as missing in OF: %d",
1605 + ret);
1606 + nb_requestors = 32;
1607 + };
1608 + } else if (pdata && pdata->dma_channels) {
1609 dma_channels = pdata->dma_channels;
1610 - else
1611 + nb_requestors = pdata->nb_requestors;
1612 + } else {
1613 dma_channels = 32; /* default 32 channel */
1614 + }
1615
1616 dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
1617 dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
1618 @@ -1423,7 +1439,7 @@ static int pxad_probe(struct platform_device *op)
1619 pdev->slave.descriptor_reuse = true;
1620
1621 pdev->slave.dev = &op->dev;
1622 - ret = pxad_init_dmadev(op, pdev, dma_channels);
1623 + ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
1624 if (ret) {
1625 dev_err(pdev->slave.dev, "unable to register\n");
1626 return ret;
1627 @@ -1442,7 +1458,8 @@ static int pxad_probe(struct platform_device *op)
1628
1629 platform_set_drvdata(op, pdev);
1630 pxad_init_debugfs(pdev);
1631 - dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
1632 + dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
1633 + dma_channels, nb_requestors);
1634 return 0;
1635 }
1636
1637 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
1638 index 01087a38da22..792bdae2b91d 100644
1639 --- a/drivers/edac/i7core_edac.c
1640 +++ b/drivers/edac/i7core_edac.c
1641 @@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1642
1643 i7_dev = get_i7core_dev(mce->socketid);
1644 if (!i7_dev)
1645 - return NOTIFY_BAD;
1646 + return NOTIFY_DONE;
1647
1648 mci = i7_dev->mci;
1649 pvt = mci->pvt_info;
1650 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
1651 index 93f0d4120289..8bf745d2da7e 100644
1652 --- a/drivers/edac/sb_edac.c
1653 +++ b/drivers/edac/sb_edac.c
1654 @@ -362,6 +362,7 @@ struct sbridge_pvt {
1655
1656 /* Memory type detection */
1657 bool is_mirrored, is_lockstep, is_close_pg;
1658 + bool is_chan_hash;
1659
1660 /* Fifo double buffers */
1661 struct mce mce_entry[MCE_LOG_LEN];
1662 @@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
1663 return (pkg >> 2) & 0x1;
1664 }
1665
1666 +static int haswell_chan_hash(int idx, u64 addr)
1667 +{
1668 + int i;
1669 +
1670 + /*
1671 + * XOR even bits from 12:26 to bit0 of idx,
1672 + * odd bits from 13:27 to bit1
1673 + */
1674 + for (i = 12; i < 28; i += 2)
1675 + idx ^= (addr >> i) & 3;
1676 +
1677 + return idx;
1678 +}
1679 +
1680 /****************************************************************************
1681 Memory check routines
1682 ****************************************************************************/
1683 @@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
1684 KNL_MAX_CHANNELS : NUM_CHANNELS;
1685 u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1686
1687 + if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1688 + pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
1689 + pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1690 + }
1691 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1692 pvt->info.type == KNIGHTS_LANDING)
1693 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
1694 @@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1695 }
1696
1697 ch_way = TAD_CH(reg) + 1;
1698 - sck_way = 1 << TAD_SOCK(reg);
1699 + sck_way = TAD_SOCK(reg);
1700
1701 if (ch_way == 3)
1702 idx = addr >> 6;
1703 - else
1704 + else {
1705 idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
1706 + if (pvt->is_chan_hash)
1707 + idx = haswell_chan_hash(idx, addr);
1708 + }
1709 idx = idx % ch_way;
1710
1711 /*
1712 @@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1713 switch(ch_way) {
1714 case 2:
1715 case 4:
1716 - sck_xch = 1 << sck_way * (ch_way >> 1);
1717 + sck_xch = (1 << sck_way) * (ch_way >> 1);
1718 break;
1719 default:
1720 sprintf(msg, "Invalid mirror set. Can't decode addr");
1721 @@ -2193,7 +2215,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1722
1723 ch_addr = addr - offset;
1724 ch_addr >>= (6 + shiftup);
1725 - ch_addr /= ch_way * sck_way;
1726 + ch_addr /= sck_xch;
1727 ch_addr <<= (6 + shiftup);
1728 ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
1729
1730 @@ -3146,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
1731
1732 mci = get_mci_for_node_id(mce->socketid);
1733 if (!mci)
1734 - return NOTIFY_BAD;
1735 + return NOTIFY_DONE;
1736 pvt = mci->pvt_info;
1737
1738 /*
1739 diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
1740 index 74dfb7f4f277..d8cac4661cfe 100644
1741 --- a/drivers/extcon/extcon-max77843.c
1742 +++ b/drivers/extcon/extcon-max77843.c
1743 @@ -803,7 +803,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
1744 /* Clear IRQ bits before request IRQs */
1745 ret = regmap_bulk_read(max77843->regmap_muic,
1746 MAX77843_MUIC_REG_INT1, info->status,
1747 - MAX77843_MUIC_IRQ_NUM);
1748 + MAX77843_MUIC_STATUS_NUM);
1749 if (ret) {
1750 dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
1751 goto err_muic_irq;
1752 diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
1753 index 9e15d571b53c..a76c35fc0b92 100644
1754 --- a/drivers/firmware/efi/arm-init.c
1755 +++ b/drivers/firmware/efi/arm-init.c
1756 @@ -203,7 +203,19 @@ void __init efi_init(void)
1757
1758 reserve_regions();
1759 early_memunmap(memmap.map, params.mmap_size);
1760 - memblock_mark_nomap(params.mmap & PAGE_MASK,
1761 - PAGE_ALIGN(params.mmap_size +
1762 - (params.mmap & ~PAGE_MASK)));
1763 +
1764 + if (IS_ENABLED(CONFIG_ARM)) {
1765 + /*
1766 + * ARM currently does not allow ioremap_cache() to be called on
1767 + * memory regions that are covered by struct page. So remove the
1768 + * UEFI memory map from the linear mapping.
1769 + */
1770 + memblock_mark_nomap(params.mmap & PAGE_MASK,
1771 + PAGE_ALIGN(params.mmap_size +
1772 + (params.mmap & ~PAGE_MASK)));
1773 + } else {
1774 + memblock_reserve(params.mmap & PAGE_MASK,
1775 + PAGE_ALIGN(params.mmap_size +
1776 + (params.mmap & ~PAGE_MASK)));
1777 + }
1778 }
1779 diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
1780 index 2cd37dad67a6..c51f3b2fe3c0 100644
1781 --- a/drivers/firmware/efi/efi.c
1782 +++ b/drivers/firmware/efi/efi.c
1783 @@ -182,6 +182,7 @@ static int generic_ops_register(void)
1784 {
1785 generic_ops.get_variable = efi.get_variable;
1786 generic_ops.set_variable = efi.set_variable;
1787 + generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
1788 generic_ops.get_next_variable = efi.get_next_variable;
1789 generic_ops.query_variable_store = efi_query_variable_store;
1790
1791 diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
1792 index 7f2ea21c730d..6f182fd91a6d 100644
1793 --- a/drivers/firmware/efi/vars.c
1794 +++ b/drivers/firmware/efi/vars.c
1795 @@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
1796 { NULL_GUID, "", NULL },
1797 };
1798
1799 +/*
1800 + * Check if @var_name matches the pattern given in @match_name.
1801 + *
1802 + * @var_name: an array of @len non-NUL characters.
1803 + * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
1804 + * final "*" character matches any trailing characters @var_name,
1805 + * including the case when there are none left in @var_name.
1806 + * @match: on output, the number of non-wildcard characters in @match_name
1807 + * that @var_name matches, regardless of the return value.
1808 + * @return: whether @var_name fully matches @match_name.
1809 + */
1810 static bool
1811 variable_matches(const char *var_name, size_t len, const char *match_name,
1812 int *match)
1813 {
1814 for (*match = 0; ; (*match)++) {
1815 char c = match_name[*match];
1816 - char u = var_name[*match];
1817
1818 - /* Wildcard in the matching name means we've matched */
1819 - if (c == '*')
1820 + switch (c) {
1821 + case '*':
1822 + /* Wildcard in @match_name means we've matched. */
1823 return true;
1824
1825 - /* Case sensitive match */
1826 - if (!c && *match == len)
1827 - return true;
1828 + case '\0':
1829 + /* @match_name has ended. Has @var_name too? */
1830 + return (*match == len);
1831
1832 - if (c != u)
1833 + default:
1834 + /*
1835 + * We've reached a non-wildcard char in @match_name.
1836 + * Continue only if there's an identical character in
1837 + * @var_name.
1838 + */
1839 + if (*match < len && c == var_name[*match])
1840 + continue;
1841 return false;
1842 -
1843 - if (!c)
1844 - return true;
1845 + }
1846 }
1847 - return true;
1848 }
1849
1850 bool
1851 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1852 index 5e7770f9a415..ff299752d5e6 100644
1853 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1854 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1855 @@ -1619,6 +1619,7 @@ struct amdgpu_uvd {
1856 struct amdgpu_bo *vcpu_bo;
1857 void *cpu_addr;
1858 uint64_t gpu_addr;
1859 + unsigned fw_version;
1860 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
1861 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
1862 struct delayed_work idle_work;
1863 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1864 index 81dc6b65436f..3c895863fcf5 100644
1865 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1866 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1867 @@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
1868 return amdgpu_atpx_priv.atpx_detected;
1869 }
1870
1871 -bool amdgpu_has_atpx_dgpu_power_cntl(void) {
1872 - return amdgpu_atpx_priv.atpx.functions.power_cntl;
1873 -}
1874 -
1875 /**
1876 * amdgpu_atpx_call - call an ATPX method
1877 *
1878 @@ -146,6 +142,10 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
1879 */
1880 static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
1881 {
1882 + /* make sure required functions are enabled */
1883 + /* dGPU power control is required */
1884 + atpx->functions.power_cntl = true;
1885 +
1886 if (atpx->functions.px_params) {
1887 union acpi_object *info;
1888 struct atpx_px_params output;
1889 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1890 index d6c68d00cbb0..51bfc114584e 100644
1891 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1892 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1893 @@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = {
1894 "LAST",
1895 };
1896
1897 -#if defined(CONFIG_VGA_SWITCHEROO)
1898 -bool amdgpu_has_atpx_dgpu_power_cntl(void);
1899 -#else
1900 -static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
1901 -#endif
1902 -
1903 bool amdgpu_device_is_px(struct drm_device *dev)
1904 {
1905 struct amdgpu_device *adev = dev->dev_private;
1906 @@ -1517,7 +1511,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1907
1908 if (amdgpu_runtime_pm == 1)
1909 runtime = true;
1910 - if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
1911 + if (amdgpu_device_is_px(ddev))
1912 runtime = true;
1913 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1914 if (runtime)
1915 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1916 index e23843f4d877..4488e82f87b0 100644
1917 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1918 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1919 @@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
1920 fw_info.feature = adev->vce.fb_version;
1921 break;
1922 case AMDGPU_INFO_FW_UVD:
1923 - fw_info.ver = 0;
1924 + fw_info.ver = adev->uvd.fw_version;
1925 fw_info.feature = 0;
1926 break;
1927 case AMDGPU_INFO_FW_GMC:
1928 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
1929 index fdc1be8550da..3b2d75d96ea0 100644
1930 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
1931 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
1932 @@ -53,7 +53,7 @@ struct amdgpu_hpd;
1933
1934 #define AMDGPU_MAX_HPD_PINS 6
1935 #define AMDGPU_MAX_CRTCS 6
1936 -#define AMDGPU_MAX_AFMT_BLOCKS 7
1937 +#define AMDGPU_MAX_AFMT_BLOCKS 9
1938
1939 enum amdgpu_rmx_type {
1940 RMX_OFF,
1941 @@ -309,8 +309,8 @@ struct amdgpu_mode_info {
1942 struct atom_context *atom_context;
1943 struct card_info *atom_card_info;
1944 bool mode_config_initialized;
1945 - struct amdgpu_crtc *crtcs[6];
1946 - struct amdgpu_afmt *afmt[7];
1947 + struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
1948 + struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
1949 /* DVI-I properties */
1950 struct drm_property *coherent_mode_property;
1951 /* DAC enable load detect */
1952 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1953 index 53f987aeeacf..3b35ad83867c 100644
1954 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1955 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1956 @@ -156,6 +156,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
1957 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
1958 version_major, version_minor, family_id);
1959
1960 + adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
1961 + (family_id << 8));
1962 +
1963 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
1964 + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
1965 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
1966 @@ -273,6 +276,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
1967 memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
1968 (adev->uvd.fw->size) - offset);
1969
1970 + cancel_delayed_work_sync(&adev->uvd.idle_work);
1971 +
1972 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
1973 size -= le32_to_cpu(hdr->ucode_size_bytes);
1974 ptr = adev->uvd.cpu_addr;
1975 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1976 index a745eeeb5d82..bb0da76051a1 100644
1977 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1978 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1979 @@ -220,6 +220,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
1980 if (i == AMDGPU_MAX_VCE_HANDLES)
1981 return 0;
1982
1983 + cancel_delayed_work_sync(&adev->vce.idle_work);
1984 /* TODO: suspending running encoding sessions isn't supported */
1985 return -EINVAL;
1986 }
1987 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1988 index 06602df707f8..9b1c43005c80 100644
1989 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1990 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1991 @@ -3628,7 +3628,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1992 unsigned vm_id, uint64_t pd_addr)
1993 {
1994 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
1995 - uint32_t seq = ring->fence_drv.sync_seq;
1996 + uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
1997 uint64_t addr = ring->fence_drv.gpu_addr;
1998
1999 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2000 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
2001 index c34c393e9aea..d5e19b5fbbfb 100644
2002 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
2003 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
2004 @@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
2005 union SQ_CMD_BITS *in_reg_sq_cmd,
2006 union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
2007 {
2008 - int status;
2009 + int status = 0;
2010 union SQ_CMD_BITS reg_sq_cmd;
2011 union GRBM_GFX_INDEX_BITS reg_gfx_index;
2012 struct HsaDbgWaveMsgAMDGen2 *pMsg;
2013 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
2014 index 27fbd79d0daf..71ea0521ea96 100644
2015 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
2016 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
2017 @@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2018 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2019 int i;
2020
2021 + port = drm_dp_get_validated_port_ref(mgr, port);
2022 + if (!port)
2023 + return -EINVAL;
2024 +
2025 port_num = port->port_num;
2026 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2027 if (!mstb) {
2028 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
2029
2030 - if (!mstb)
2031 + if (!mstb) {
2032 + drm_dp_put_port(port);
2033 return -EINVAL;
2034 + }
2035 }
2036
2037 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2038 @@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2039 kfree(txmsg);
2040 fail_put:
2041 drm_dp_put_mst_branch_device(mstb);
2042 + drm_dp_put_port(port);
2043 return ret;
2044 }
2045
2046 @@ -1789,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2047 req_payload.start_slot = cur_slots;
2048 if (mgr->proposed_vcpis[i]) {
2049 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2050 + port = drm_dp_get_validated_port_ref(mgr, port);
2051 + if (!port) {
2052 + mutex_unlock(&mgr->payload_lock);
2053 + return -EINVAL;
2054 + }
2055 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
2056 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
2057 } else {
2058 @@ -1816,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2059 mgr->payloads[i].payload_state = req_payload.payload_state;
2060 }
2061 cur_slots += req_payload.num_slots;
2062 +
2063 + if (port)
2064 + drm_dp_put_port(port);
2065 }
2066
2067 for (i = 0; i < mgr->max_payloads; i++) {
2068 @@ -2121,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2069
2070 if (mgr->mst_primary) {
2071 int sret;
2072 + u8 guid[16];
2073 +
2074 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2075 if (sret != DP_RECEIVER_CAP_SIZE) {
2076 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2077 @@ -2135,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2078 ret = -1;
2079 goto out_unlock;
2080 }
2081 +
2082 + /* Some hubs forget their guids after they resume */
2083 + sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2084 + if (sret != 16) {
2085 + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2086 + ret = -1;
2087 + goto out_unlock;
2088 + }
2089 + drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2090 +
2091 ret = 0;
2092 } else
2093 ret = -1;
2094 diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
2095 index 647d85e77c2f..597cfb5ca847 100644
2096 --- a/drivers/gpu/drm/i915/intel_csr.c
2097 +++ b/drivers/gpu/drm/i915/intel_csr.c
2098 @@ -177,7 +177,8 @@ static const struct stepping_info kbl_stepping_info[] = {
2099 static const struct stepping_info skl_stepping_info[] = {
2100 {'A', '0'}, {'B', '0'}, {'C', '0'},
2101 {'D', '0'}, {'E', '0'}, {'F', '0'},
2102 - {'G', '0'}, {'H', '0'}, {'I', '0'}
2103 + {'G', '0'}, {'H', '0'}, {'I', '0'},
2104 + {'J', '0'}, {'K', '0'}
2105 };
2106
2107 static const struct stepping_info bxt_stepping_info[] = {
2108 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2109 index 46947fffd599..a9c35134f2e2 100644
2110 --- a/drivers/gpu/drm/i915/intel_display.c
2111 +++ b/drivers/gpu/drm/i915/intel_display.c
2112 @@ -4455,7 +4455,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
2113 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
2114
2115 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
2116 - &state->scaler_state.scaler_id, DRM_ROTATE_0,
2117 + &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
2118 state->pipe_src_w, state->pipe_src_h,
2119 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
2120 }
2121 diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
2122 index fa0dabf578dc..db6361b5a6ab 100644
2123 --- a/drivers/gpu/drm/i915/intel_dp_mst.c
2124 +++ b/drivers/gpu/drm/i915/intel_dp_mst.c
2125 @@ -184,7 +184,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
2126 intel_mst->port = found->port;
2127
2128 if (intel_dp->active_mst_links == 0) {
2129 - intel_ddi_clk_select(encoder, intel_crtc->config);
2130 + intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
2131
2132 intel_dp_set_link_params(intel_dp, intel_crtc->config);
2133
2134 @@ -499,6 +499,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
2135 struct intel_connector *intel_connector = to_intel_connector(connector);
2136 struct drm_device *dev = connector->dev;
2137
2138 + intel_connector->unregister(intel_connector);
2139 +
2140 /* need to nuke the connector */
2141 drm_modeset_lock_all(dev);
2142 if (connector->state->crtc) {
2143 @@ -512,11 +514,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
2144
2145 WARN(ret, "Disabling mst crtc failed with %i\n", ret);
2146 }
2147 - drm_modeset_unlock_all(dev);
2148
2149 - intel_connector->unregister(intel_connector);
2150 -
2151 - drm_modeset_lock_all(dev);
2152 intel_connector_remove_from_fbdev(intel_connector);
2153 drm_connector_cleanup(connector);
2154 drm_modeset_unlock_all(dev);
2155 diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
2156 index f1fa756c5d5d..cfd5f9fff2f4 100644
2157 --- a/drivers/gpu/drm/i915/intel_lrc.c
2158 +++ b/drivers/gpu/drm/i915/intel_lrc.c
2159 @@ -781,11 +781,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
2160 if (unlikely(total_bytes > remain_usable)) {
2161 /*
2162 * The base request will fit but the reserved space
2163 - * falls off the end. So only need to to wait for the
2164 - * reserved size after flushing out the remainder.
2165 + * falls off the end. So don't need an immediate wrap
2166 + * and only need to effectively wait for the reserved
2167 + * size space from the start of ringbuffer.
2168 */
2169 wait_bytes = remain_actual + ringbuf->reserved_size;
2170 - need_wrap = true;
2171 } else if (total_bytes > ringbuf->space) {
2172 /* No wrapping required, just waiting. */
2173 wait_bytes = total_bytes;
2174 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
2175 index b28c29f20e75..7e4a9842b9ea 100644
2176 --- a/drivers/gpu/drm/i915/intel_pm.c
2177 +++ b/drivers/gpu/drm/i915/intel_pm.c
2178 @@ -2281,6 +2281,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
2179 return PTR_ERR(cstate);
2180
2181 pipe_wm = &cstate->wm.optimal.ilk;
2182 + memset(pipe_wm, 0, sizeof(*pipe_wm));
2183
2184 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2185 ps = drm_atomic_get_plane_state(state,
2186 @@ -3606,23 +3607,43 @@ static void skl_update_wm(struct drm_crtc *crtc)
2187 dev_priv->wm.skl_hw = *results;
2188 }
2189
2190 +static void ilk_compute_wm_config(struct drm_device *dev,
2191 + struct intel_wm_config *config)
2192 +{
2193 + struct intel_crtc *crtc;
2194 +
2195 + /* Compute the currently _active_ config */
2196 + for_each_intel_crtc(dev, crtc) {
2197 + const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
2198 +
2199 + if (!wm->pipe_enabled)
2200 + continue;
2201 +
2202 + config->sprites_enabled |= wm->sprites_enabled;
2203 + config->sprites_scaled |= wm->sprites_scaled;
2204 + config->num_pipes_active++;
2205 + }
2206 +}
2207 +
2208 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
2209 {
2210 struct drm_device *dev = dev_priv->dev;
2211 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2212 struct ilk_wm_maximums max;
2213 - struct intel_wm_config *config = &dev_priv->wm.config;
2214 + struct intel_wm_config config = {};
2215 struct ilk_wm_values results = {};
2216 enum intel_ddb_partitioning partitioning;
2217
2218 - ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max);
2219 - ilk_wm_merge(dev, config, &max, &lp_wm_1_2);
2220 + ilk_compute_wm_config(dev, &config);
2221 +
2222 + ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2223 + ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2224
2225 /* 5/6 split only in single pipe config on IVB+ */
2226 if (INTEL_INFO(dev)->gen >= 7 &&
2227 - config->num_pipes_active == 1 && config->sprites_enabled) {
2228 - ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max);
2229 - ilk_wm_merge(dev, config, &max, &lp_wm_5_6);
2230 + config.num_pipes_active == 1 && config.sprites_enabled) {
2231 + ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2232 + ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2233
2234 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2235 } else {
2236 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
2237 index 40c6aff57256..549afa7bc75f 100644
2238 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
2239 +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
2240 @@ -951,7 +951,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
2241
2242 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
2243 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
2244 - if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
2245 + if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
2246 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
2247 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
2248 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
2249 @@ -1044,7 +1044,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
2250 WA_SET_BIT_MASKED(HIZ_CHICKEN,
2251 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
2252
2253 - if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
2254 + /* This is tied to WaForceContextSaveRestoreNonCoherent */
2255 + if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
2256 /*
2257 *Use Force Non-Coherent whenever executing a 3D context. This
2258 * is a workaround for a possible hang in the unlikely event
2259 @@ -1901,6 +1902,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
2260 return 0;
2261 }
2262
2263 +static void cleanup_phys_status_page(struct intel_engine_cs *ring)
2264 +{
2265 + struct drm_i915_private *dev_priv = to_i915(ring->dev);
2266 +
2267 + if (!dev_priv->status_page_dmah)
2268 + return;
2269 +
2270 + drm_pci_free(ring->dev, dev_priv->status_page_dmah);
2271 + ring->status_page.page_addr = NULL;
2272 +}
2273 +
2274 static void cleanup_status_page(struct intel_engine_cs *ring)
2275 {
2276 struct drm_i915_gem_object *obj;
2277 @@ -1917,9 +1929,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
2278
2279 static int init_status_page(struct intel_engine_cs *ring)
2280 {
2281 - struct drm_i915_gem_object *obj;
2282 + struct drm_i915_gem_object *obj = ring->status_page.obj;
2283
2284 - if ((obj = ring->status_page.obj) == NULL) {
2285 + if (obj == NULL) {
2286 unsigned flags;
2287 int ret;
2288
2289 @@ -2019,10 +2031,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2290 {
2291 struct drm_i915_private *dev_priv = to_i915(dev);
2292 struct drm_i915_gem_object *obj = ringbuf->obj;
2293 + /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
2294 + unsigned flags = PIN_OFFSET_BIAS | 4096;
2295 int ret;
2296
2297 if (HAS_LLC(dev_priv) && !obj->stolen) {
2298 - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
2299 + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
2300 if (ret)
2301 return ret;
2302
2303 @@ -2038,7 +2052,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2304 return -ENOMEM;
2305 }
2306 } else {
2307 - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
2308 + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
2309 + flags | PIN_MAPPABLE);
2310 if (ret)
2311 return ret;
2312
2313 @@ -2164,7 +2179,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2314 if (ret)
2315 goto error;
2316 } else {
2317 - BUG_ON(ring->id != RCS);
2318 + WARN_ON(ring->id != RCS);
2319 ret = init_phys_status_page(ring);
2320 if (ret)
2321 goto error;
2322 @@ -2210,7 +2225,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
2323 if (ring->cleanup)
2324 ring->cleanup(ring);
2325
2326 - cleanup_status_page(ring);
2327 + if (I915_NEED_GFX_HWS(ring->dev)) {
2328 + cleanup_status_page(ring);
2329 + } else {
2330 + WARN_ON(ring->id != RCS);
2331 + cleanup_phys_status_page(ring);
2332 + }
2333
2334 i915_cmd_parser_fini_ring(ring);
2335 i915_gem_batch_pool_fini(&ring->batch_pool);
2336 @@ -2373,11 +2393,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
2337 if (unlikely(total_bytes > remain_usable)) {
2338 /*
2339 * The base request will fit but the reserved space
2340 - * falls off the end. So only need to to wait for the
2341 - * reserved size after flushing out the remainder.
2342 + * falls off the end. So don't need an immediate wrap
2343 + * and only need to effectively wait for the reserved
2344 + * size space from the start of ringbuffer.
2345 */
2346 wait_bytes = remain_actual + ringbuf->reserved_size;
2347 - need_wrap = true;
2348 } else if (total_bytes > ringbuf->space) {
2349 /* No wrapping required, just waiting. */
2350 wait_bytes = total_bytes;
2351 diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
2352 index 277e60ae0e47..08961f7d151c 100644
2353 --- a/drivers/gpu/drm/i915/intel_uncore.c
2354 +++ b/drivers/gpu/drm/i915/intel_uncore.c
2355 @@ -1155,7 +1155,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
2356 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2357 dev_priv->uncore.funcs.force_wake_get =
2358 fw_domains_get_with_thread_status;
2359 - dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
2360 + if (IS_HASWELL(dev))
2361 + dev_priv->uncore.funcs.force_wake_put =
2362 + fw_domains_put_with_fifo;
2363 + else
2364 + dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
2365 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
2366 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
2367 } else if (IS_IVYBRIDGE(dev)) {
2368 diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
2369 index 3216e157a8a0..89da47234016 100644
2370 --- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
2371 +++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
2372 @@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
2373 struct nvkm_ramht *ramht = *pramht;
2374 if (ramht) {
2375 nvkm_gpuobj_del(&ramht->gpuobj);
2376 - kfree(*pramht);
2377 + vfree(*pramht);
2378 *pramht = NULL;
2379 }
2380 }
2381 @@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
2382 struct nvkm_ramht *ramht;
2383 int ret, i;
2384
2385 - if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
2386 - sizeof(*ramht->data), GFP_KERNEL)))
2387 + if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
2388 + (size >> 3) * sizeof(*ramht->data))))
2389 return -ENOMEM;
2390
2391 ramht->device = device;
2392 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
2393 index 1f81069edc58..332b5fe687fe 100644
2394 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
2395 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
2396 @@ -1807,6 +1807,8 @@ gf100_gr_init(struct gf100_gr *gr)
2397
2398 gf100_gr_mmio(gr, gr->func->mmio);
2399
2400 + nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
2401 +
2402 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
2403 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
2404 do {
2405 diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
2406 index 86276519b2ef..47e52647c9e5 100644
2407 --- a/drivers/gpu/drm/qxl/qxl_display.c
2408 +++ b/drivers/gpu/drm/qxl/qxl_display.c
2409 @@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
2410
2411 qxl_bo_kunmap(user_bo);
2412
2413 + qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
2414 + qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
2415 + qcrtc->hot_spot_x = hot_x;
2416 + qcrtc->hot_spot_y = hot_y;
2417 +
2418 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
2419 cmd->type = QXL_CURSOR_SET;
2420 - cmd->u.set.position.x = qcrtc->cur_x;
2421 - cmd->u.set.position.y = qcrtc->cur_y;
2422 + cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
2423 + cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
2424
2425 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
2426
2427 @@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
2428
2429 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
2430 cmd->type = QXL_CURSOR_MOVE;
2431 - cmd->u.position.x = qcrtc->cur_x;
2432 - cmd->u.position.y = qcrtc->cur_y;
2433 + cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
2434 + cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
2435 qxl_release_unmap(qdev, release, &cmd->release_info);
2436
2437 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
2438 diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
2439 index 6e6b9b1519b8..3f3897eb458c 100644
2440 --- a/drivers/gpu/drm/qxl/qxl_drv.h
2441 +++ b/drivers/gpu/drm/qxl/qxl_drv.h
2442 @@ -135,6 +135,8 @@ struct qxl_crtc {
2443 int index;
2444 int cur_x;
2445 int cur_y;
2446 + int hot_spot_x;
2447 + int hot_spot_y;
2448 };
2449
2450 struct qxl_output {
2451 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
2452 index 2ad462896896..32491355a1d4 100644
2453 --- a/drivers/gpu/drm/radeon/evergreen.c
2454 +++ b/drivers/gpu/drm/radeon/evergreen.c
2455 @@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
2456 WREG32(VM_CONTEXT1_CNTL, 0);
2457 }
2458
2459 +static const unsigned ni_dig_offsets[] =
2460 +{
2461 + NI_DIG0_REGISTER_OFFSET,
2462 + NI_DIG1_REGISTER_OFFSET,
2463 + NI_DIG2_REGISTER_OFFSET,
2464 + NI_DIG3_REGISTER_OFFSET,
2465 + NI_DIG4_REGISTER_OFFSET,
2466 + NI_DIG5_REGISTER_OFFSET
2467 +};
2468 +
2469 +static const unsigned ni_tx_offsets[] =
2470 +{
2471 + NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
2472 + NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
2473 + NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
2474 + NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
2475 + NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
2476 + NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
2477 +};
2478 +
2479 +static const unsigned evergreen_dp_offsets[] =
2480 +{
2481 + EVERGREEN_DP0_REGISTER_OFFSET,
2482 + EVERGREEN_DP1_REGISTER_OFFSET,
2483 + EVERGREEN_DP2_REGISTER_OFFSET,
2484 + EVERGREEN_DP3_REGISTER_OFFSET,
2485 + EVERGREEN_DP4_REGISTER_OFFSET,
2486 + EVERGREEN_DP5_REGISTER_OFFSET
2487 +};
2488 +
2489 +
2490 +/*
2491 + * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
2492 + * We go from crtc to connector and it is not relible since it
2493 + * should be an opposite direction .If crtc is enable then
2494 + * find the dig_fe which selects this crtc and insure that it enable.
2495 + * if such dig_fe is found then find dig_be which selects found dig_be and
2496 + * insure that it enable and in DP_SST mode.
2497 + * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
2498 + * from dp symbols clocks .
2499 + */
2500 +static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
2501 + unsigned crtc_id, unsigned *ret_dig_fe)
2502 +{
2503 + unsigned i;
2504 + unsigned dig_fe;
2505 + unsigned dig_be;
2506 + unsigned dig_en_be;
2507 + unsigned uniphy_pll;
2508 + unsigned digs_fe_selected;
2509 + unsigned dig_be_mode;
2510 + unsigned dig_fe_mask;
2511 + bool is_enabled = false;
2512 + bool found_crtc = false;
2513 +
2514 + /* loop through all running dig_fe to find selected crtc */
2515 + for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2516 + dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
2517 + if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
2518 + crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
2519 + /* found running pipe */
2520 + found_crtc = true;
2521 + dig_fe_mask = 1 << i;
2522 + dig_fe = i;
2523 + break;
2524 + }
2525 + }
2526 +
2527 + if (found_crtc) {
2528 + /* loop through all running dig_be to find selected dig_fe */
2529 + for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2530 + dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
2531 + /* if dig_fe_selected by dig_be? */
2532 + digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
2533 + dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
2534 + if (dig_fe_mask & digs_fe_selected &&
2535 + /* if dig_be in sst mode? */
2536 + dig_be_mode == NI_DIG_BE_DPSST) {
2537 + dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
2538 + ni_dig_offsets[i]);
2539 + uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
2540 + ni_tx_offsets[i]);
2541 + /* dig_be enable and tx is running */
2542 + if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
2543 + dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
2544 + uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
2545 + is_enabled = true;
2546 + *ret_dig_fe = dig_fe;
2547 + break;
2548 + }
2549 + }
2550 + }
2551 + }
2552 +
2553 + return is_enabled;
2554 +}
2555 +
2556 +/*
2557 + * Blank dig when in dp sst mode
2558 + * Dig ignores crtc timing
2559 + */
2560 +static void evergreen_blank_dp_output(struct radeon_device *rdev,
2561 + unsigned dig_fe)
2562 +{
2563 + unsigned stream_ctrl;
2564 + unsigned fifo_ctrl;
2565 + unsigned counter = 0;
2566 +
2567 + if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
2568 + DRM_ERROR("invalid dig_fe %d\n", dig_fe);
2569 + return;
2570 + }
2571 +
2572 + stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2573 + evergreen_dp_offsets[dig_fe]);
2574 + if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
2575 + DRM_ERROR("dig %d , should be enable\n", dig_fe);
2576 + return;
2577 + }
2578 +
2579 + stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
2580 + WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2581 + evergreen_dp_offsets[dig_fe], stream_ctrl);
2582 +
2583 + stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2584 + evergreen_dp_offsets[dig_fe]);
2585 + while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
2586 + msleep(1);
2587 + counter++;
2588 + stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2589 + evergreen_dp_offsets[dig_fe]);
2590 + }
2591 + if (counter >= 32 )
2592 + DRM_ERROR("counter exceeds %d\n", counter);
2593 +
2594 + fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
2595 + fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
2596 + WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
2597 +
2598 +}
2599 +
2600 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
2601 {
2602 u32 crtc_enabled, tmp, frame_count, blackout;
2603 int i, j;
2604 + unsigned dig_fe;
2605
2606 if (!ASIC_IS_NODCE(rdev)) {
2607 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
2608 @@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
2609 break;
2610 udelay(1);
2611 }
2612 -
2613 + /*we should disable dig if it drives dp sst*/
2614 + /*but we are in radeon_device_init and the topology is unknown*/
2615 + /*and it is available after radeon_modeset_init*/
2616 + /*the following method radeon_atom_encoder_dpms_dig*/
2617 + /*does the job if we initialize it properly*/
2618 + /*for now we do it this manually*/
2619 + /**/
2620 + if (ASIC_IS_DCE5(rdev) &&
2621 + evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
2622 + evergreen_blank_dp_output(rdev, dig_fe);
2623 + /*we could remove 6 lines below*/
2624 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
2625 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2626 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
2627 diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
2628 index aa939dfed3a3..b436badf9efa 100644
2629 --- a/drivers/gpu/drm/radeon/evergreen_reg.h
2630 +++ b/drivers/gpu/drm/radeon/evergreen_reg.h
2631 @@ -250,8 +250,43 @@
2632
2633 /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
2634 #define EVERGREEN_HDMI_BASE 0x7030
2635 +/*DIG block*/
2636 +#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
2637 +#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
2638 +#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
2639 +#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
2640 +#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
2641 +#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
2642 +
2643 +
2644 +#define NI_DIG_FE_CNTL 0x7000
2645 +# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
2646 +# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
2647 +
2648 +
2649 +#define NI_DIG_BE_CNTL 0x7140
2650 +# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
2651 +# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
2652 +
2653 +#define NI_DIG_BE_EN_CNTL 0x7144
2654 +# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
2655 +# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
2656 +# define NI_DIG_BE_DPSST 0
2657
2658 /* Display Port block */
2659 +#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
2660 +#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
2661 +#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
2662 +#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
2663 +#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
2664 +#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
2665 +
2666 +
2667 +#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
2668 +# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
2669 +# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
2670 +#define EVERGREEN_DP_STEER_FIFO 0x7310
2671 +# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
2672 #define EVERGREEN_DP_SEC_CNTL 0x7280
2673 # define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
2674 # define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
2675 @@ -266,4 +301,15 @@
2676 # define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
2677 # define EVERGREEN_DP_SEC_SS_EN (1 << 28)
2678
2679 +/*DCIO_UNIPHY block*/
2680 +#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
2681 +#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
2682 +#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
2683 +#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
2684 +#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
2685 +#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
2686 +
2687 +#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
2688 +# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
2689 +
2690 #endif
2691 diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2692 index 9bc408c9f9f6..c4b4f298a283 100644
2693 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2694 +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2695 @@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
2696 return radeon_atpx_priv.atpx_detected;
2697 }
2698
2699 -bool radeon_has_atpx_dgpu_power_cntl(void) {
2700 - return radeon_atpx_priv.atpx.functions.power_cntl;
2701 -}
2702 -
2703 /**
2704 * radeon_atpx_call - call an ATPX method
2705 *
2706 @@ -145,6 +141,10 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
2707 */
2708 static int radeon_atpx_validate(struct radeon_atpx *atpx)
2709 {
2710 + /* make sure required functions are enabled */
2711 + /* dGPU power control is required */
2712 + atpx->functions.power_cntl = true;
2713 +
2714 if (atpx->functions.px_params) {
2715 union acpi_object *info;
2716 struct atpx_px_params output;
2717 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2718 index 340f3f549f29..9cfc1c3e1965 100644
2719 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
2720 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2721 @@ -1996,10 +1996,12 @@ radeon_add_atom_connector(struct drm_device *dev,
2722 rdev->mode_info.dither_property,
2723 RADEON_FMT_DITHER_DISABLE);
2724
2725 - if (radeon_audio != 0)
2726 + if (radeon_audio != 0) {
2727 drm_object_attach_property(&radeon_connector->base.base,
2728 rdev->mode_info.audio_property,
2729 RADEON_AUDIO_AUTO);
2730 + radeon_connector->audio = RADEON_AUDIO_AUTO;
2731 + }
2732 if (ASIC_IS_DCE5(rdev))
2733 drm_object_attach_property(&radeon_connector->base.base,
2734 rdev->mode_info.output_csc_property,
2735 @@ -2124,6 +2126,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2736 drm_object_attach_property(&radeon_connector->base.base,
2737 rdev->mode_info.audio_property,
2738 RADEON_AUDIO_AUTO);
2739 + radeon_connector->audio = RADEON_AUDIO_AUTO;
2740 }
2741 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
2742 radeon_connector->dac_load_detect = true;
2743 @@ -2179,6 +2182,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2744 drm_object_attach_property(&radeon_connector->base.base,
2745 rdev->mode_info.audio_property,
2746 RADEON_AUDIO_AUTO);
2747 + radeon_connector->audio = RADEON_AUDIO_AUTO;
2748 }
2749 if (ASIC_IS_DCE5(rdev))
2750 drm_object_attach_property(&radeon_connector->base.base,
2751 @@ -2231,6 +2235,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2752 drm_object_attach_property(&radeon_connector->base.base,
2753 rdev->mode_info.audio_property,
2754 RADEON_AUDIO_AUTO);
2755 + radeon_connector->audio = RADEON_AUDIO_AUTO;
2756 }
2757 if (ASIC_IS_DCE5(rdev))
2758 drm_object_attach_property(&radeon_connector->base.base,
2759 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2760 index e2396336f9e8..4197ca1bb1e4 100644
2761 --- a/drivers/gpu/drm/radeon/radeon_device.c
2762 +++ b/drivers/gpu/drm/radeon/radeon_device.c
2763 @@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
2764 "LAST",
2765 };
2766
2767 -#if defined(CONFIG_VGA_SWITCHEROO)
2768 -bool radeon_has_atpx_dgpu_power_cntl(void);
2769 -#else
2770 -static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
2771 -#endif
2772 -
2773 #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
2774 #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
2775
2776 @@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
2777 * ignore it */
2778 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
2779
2780 - if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
2781 + if (rdev->flags & RADEON_IS_PX)
2782 runtime = true;
2783 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
2784 if (runtime)
2785 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
2786 index e06ac546a90f..f342aad79cc6 100644
2787 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
2788 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
2789 @@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
2790 {
2791 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
2792
2793 + if (radeon_ttm_tt_has_userptr(bo->ttm))
2794 + return -EPERM;
2795 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
2796 }
2797
2798 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
2799 index 7285adb27099..caa73de584a5 100644
2800 --- a/drivers/gpu/drm/radeon/si_dpm.c
2801 +++ b/drivers/gpu/drm/radeon/si_dpm.c
2802 @@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2803 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
2804 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
2805 { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
2806 + { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
2807 { 0, 0, 0, 0 },
2808 };
2809
2810 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
2811 index 4cbf26555093..e3daafa1be13 100644
2812 --- a/drivers/gpu/drm/ttm/ttm_bo.c
2813 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
2814 @@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
2815
2816 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
2817 {
2818 - struct ttm_bo_device *bdev = bo->bdev;
2819 - struct ttm_mem_type_manager *man;
2820 + int put_count = 0;
2821
2822 lockdep_assert_held(&bo->resv->lock.base);
2823
2824 - if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
2825 - list_del_init(&bo->swap);
2826 - list_del_init(&bo->lru);
2827 -
2828 - } else {
2829 - if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
2830 - list_move_tail(&bo->swap, &bo->glob->swap_lru);
2831 -
2832 - man = &bdev->man[bo->mem.mem_type];
2833 - list_move_tail(&bo->lru, &man->lru);
2834 - }
2835 + put_count = ttm_bo_del_from_lru(bo);
2836 + ttm_bo_list_ref_sub(bo, put_count, true);
2837 + ttm_bo_add_to_lru(bo);
2838 }
2839 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
2840
2841 diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
2842 index 83e9f591a54b..e7a348807f0c 100644
2843 --- a/drivers/hwtracing/stm/Kconfig
2844 +++ b/drivers/hwtracing/stm/Kconfig
2845 @@ -1,6 +1,7 @@
2846 config STM
2847 tristate "System Trace Module devices"
2848 select CONFIGFS_FS
2849 + select SRCU
2850 help
2851 A System Trace Module (STM) is a device exporting data in System
2852 Trace Protocol (STP) format as defined by MIPI STP standards.
2853 diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
2854 index 714bdc837769..b167ab25310a 100644
2855 --- a/drivers/i2c/busses/i2c-cpm.c
2856 +++ b/drivers/i2c/busses/i2c-cpm.c
2857 @@ -116,8 +116,8 @@ struct cpm_i2c {
2858 cbd_t __iomem *rbase;
2859 u_char *txbuf[CPM_MAXBD];
2860 u_char *rxbuf[CPM_MAXBD];
2861 - u32 txdma[CPM_MAXBD];
2862 - u32 rxdma[CPM_MAXBD];
2863 + dma_addr_t txdma[CPM_MAXBD];
2864 + dma_addr_t rxdma[CPM_MAXBD];
2865 };
2866
2867 static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
2868 diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
2869 index b29c7500461a..f54ece8fce78 100644
2870 --- a/drivers/i2c/busses/i2c-exynos5.c
2871 +++ b/drivers/i2c/busses/i2c-exynos5.c
2872 @@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
2873 return -EIO;
2874 }
2875
2876 - clk_prepare_enable(i2c->clk);
2877 + ret = clk_enable(i2c->clk);
2878 + if (ret)
2879 + return ret;
2880
2881 for (i = 0; i < num; i++, msgs++) {
2882 stop = (i == num - 1);
2883 @@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
2884 }
2885
2886 out:
2887 - clk_disable_unprepare(i2c->clk);
2888 + clk_disable(i2c->clk);
2889 return ret;
2890 }
2891
2892 @@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
2893 return -ENOENT;
2894 }
2895
2896 - clk_prepare_enable(i2c->clk);
2897 + ret = clk_prepare_enable(i2c->clk);
2898 + if (ret)
2899 + return ret;
2900
2901 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2902 i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
2903 @@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
2904
2905 platform_set_drvdata(pdev, i2c);
2906
2907 + clk_disable(i2c->clk);
2908 +
2909 + return 0;
2910 +
2911 err_clk:
2912 clk_disable_unprepare(i2c->clk);
2913 return ret;
2914 @@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
2915
2916 i2c_del_adapter(&i2c->adap);
2917
2918 + clk_unprepare(i2c->clk);
2919 +
2920 return 0;
2921 }
2922
2923 @@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
2924
2925 i2c->suspended = 1;
2926
2927 + clk_unprepare(i2c->clk);
2928 +
2929 return 0;
2930 }
2931
2932 @@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
2933 struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
2934 int ret = 0;
2935
2936 - clk_prepare_enable(i2c->clk);
2937 + ret = clk_prepare_enable(i2c->clk);
2938 + if (ret)
2939 + return ret;
2940
2941 ret = exynos5_hsi2c_clock_setup(i2c);
2942 if (ret) {
2943 @@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
2944 }
2945
2946 exynos5_i2c_init(i2c);
2947 - clk_disable_unprepare(i2c->clk);
2948 + clk_disable(i2c->clk);
2949 i2c->suspended = 0;
2950
2951 return 0;
2952 diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
2953 index 53343ffbff7a..1b109b2a235e 100644
2954 --- a/drivers/infiniband/core/cache.c
2955 +++ b/drivers/infiniband/core/cache.c
2956 @@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
2957 NULL);
2958
2959 /* Coudn't find default GID location */
2960 - WARN_ON(ix < 0);
2961 + if (WARN_ON(ix < 0))
2962 + goto release;
2963
2964 zattr_type.gid_type = gid_type;
2965
2966 diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
2967 index 6b4e8a008bc0..564adf3116e8 100644
2968 --- a/drivers/infiniband/core/ucm.c
2969 +++ b/drivers/infiniband/core/ucm.c
2970 @@ -48,6 +48,7 @@
2971
2972 #include <asm/uaccess.h>
2973
2974 +#include <rdma/ib.h>
2975 #include <rdma/ib_cm.h>
2976 #include <rdma/ib_user_cm.h>
2977 #include <rdma/ib_marshall.h>
2978 @@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
2979 struct ib_ucm_cmd_hdr hdr;
2980 ssize_t result;
2981
2982 + if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
2983 + return -EACCES;
2984 +
2985 if (len < sizeof(hdr))
2986 return -EINVAL;
2987
2988 diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
2989 index 8b5a934e1133..886f61ea6cc7 100644
2990 --- a/drivers/infiniband/core/ucma.c
2991 +++ b/drivers/infiniband/core/ucma.c
2992 @@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
2993 struct rdma_ucm_cmd_hdr hdr;
2994 ssize_t ret;
2995
2996 + if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
2997 + return -EACCES;
2998 +
2999 if (len < sizeof(hdr))
3000 return -EINVAL;
3001
3002 diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
3003 index 39680aed99dd..d3fb8aa46c59 100644
3004 --- a/drivers/infiniband/core/uverbs_main.c
3005 +++ b/drivers/infiniband/core/uverbs_main.c
3006 @@ -48,6 +48,8 @@
3007
3008 #include <asm/uaccess.h>
3009
3010 +#include <rdma/ib.h>
3011 +
3012 #include "uverbs.h"
3013
3014 MODULE_AUTHOR("Roland Dreier");
3015 @@ -693,6 +695,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
3016 int srcu_key;
3017 ssize_t ret;
3018
3019 + if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
3020 + return -EACCES;
3021 +
3022 if (count < sizeof hdr)
3023 return -EINVAL;
3024
3025 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
3026 index 03c418ccbc98..ed9cefa1f6f1 100644
3027 --- a/drivers/infiniband/hw/mlx5/main.c
3028 +++ b/drivers/infiniband/hw/mlx5/main.c
3029 @@ -517,7 +517,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
3030 sizeof(struct mlx5_wqe_ctrl_seg)) /
3031 sizeof(struct mlx5_wqe_data_seg);
3032 props->max_sge = min(max_rq_sg, max_sq_sg);
3033 - props->max_sge_rd = props->max_sge;
3034 + props->max_sge_rd = MLX5_MAX_SGE_RD;
3035 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
3036 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
3037 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
3038 diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
3039 index e449e394963f..24f4a782e0f4 100644
3040 --- a/drivers/infiniband/hw/qib/qib_file_ops.c
3041 +++ b/drivers/infiniband/hw/qib/qib_file_ops.c
3042 @@ -45,6 +45,8 @@
3043 #include <linux/export.h>
3044 #include <linux/uio.h>
3045
3046 +#include <rdma/ib.h>
3047 +
3048 #include "qib.h"
3049 #include "qib_common.h"
3050 #include "qib_user_sdma.h"
3051 @@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
3052 ssize_t ret = 0;
3053 void *dest;
3054
3055 + if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
3056 + return -EACCES;
3057 +
3058 if (count < sizeof(cmd.type)) {
3059 ret = -EINVAL;
3060 goto bail;
3061 diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
3062 index 3f02e0e03d12..67aab86048ad 100644
3063 --- a/drivers/input/misc/pmic8xxx-pwrkey.c
3064 +++ b/drivers/input/misc/pmic8xxx-pwrkey.c
3065 @@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
3066 if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
3067 kpd_delay = 15625;
3068
3069 - if (kpd_delay > 62500 || kpd_delay == 0) {
3070 + /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
3071 + if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
3072 dev_err(&pdev->dev, "invalid power key trigger delay\n");
3073 return -EINVAL;
3074 }
3075 @@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
3076 pwr->name = "pmic8xxx_pwrkey";
3077 pwr->phys = "pmic8xxx_pwrkey/input0";
3078
3079 - delay = (kpd_delay << 10) / USEC_PER_SEC;
3080 - delay = 1 + ilog2(delay);
3081 + delay = (kpd_delay << 6) / USEC_PER_SEC;
3082 + delay = ilog2(delay);
3083
3084 err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
3085 if (err < 0) {
3086 diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
3087 index 3a7f3a4a4396..7c18249d6c8e 100644
3088 --- a/drivers/input/tablet/gtco.c
3089 +++ b/drivers/input/tablet/gtco.c
3090 @@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
3091 goto err_free_buf;
3092 }
3093
3094 + /* Sanity check that a device has an endpoint */
3095 + if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
3096 + dev_err(&usbinterface->dev,
3097 + "Invalid number of endpoints\n");
3098 + error = -EINVAL;
3099 + goto err_free_urb;
3100 + }
3101 +
3102 /*
3103 * The endpoint is always altsetting 0, we know this since we know
3104 * this device only has one interrupt endpoint
3105 @@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
3106 * HID report descriptor
3107 */
3108 if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
3109 - HID_DEVICE_TYPE, &hid_desc) != 0){
3110 + HID_DEVICE_TYPE, &hid_desc) != 0) {
3111 dev_err(&usbinterface->dev,
3112 "Can't retrieve exta USB descriptor to get hid report descriptor length\n");
3113 error = -EIO;
3114 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
3115 index 374c129219ef..5efadad4615b 100644
3116 --- a/drivers/iommu/amd_iommu.c
3117 +++ b/drivers/iommu/amd_iommu.c
3118 @@ -92,6 +92,7 @@ struct iommu_dev_data {
3119 struct list_head dev_data_list; /* For global dev_data_list */
3120 struct protection_domain *domain; /* Domain the device is bound to */
3121 u16 devid; /* PCI Device ID */
3122 + u16 alias; /* Alias Device ID */
3123 bool iommu_v2; /* Device can make use of IOMMUv2 */
3124 bool passthrough; /* Device is identity mapped */
3125 struct {
3126 @@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
3127 return container_of(dom, struct protection_domain, domain);
3128 }
3129
3130 +static inline u16 get_device_id(struct device *dev)
3131 +{
3132 + struct pci_dev *pdev = to_pci_dev(dev);
3133 +
3134 + return PCI_DEVID(pdev->bus->number, pdev->devfn);
3135 +}
3136 +
3137 static struct iommu_dev_data *alloc_dev_data(u16 devid)
3138 {
3139 struct iommu_dev_data *dev_data;
3140 @@ -203,6 +211,68 @@ out_unlock:
3141 return dev_data;
3142 }
3143
3144 +static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
3145 +{
3146 + *(u16 *)data = alias;
3147 + return 0;
3148 +}
3149 +
3150 +static u16 get_alias(struct device *dev)
3151 +{
3152 + struct pci_dev *pdev = to_pci_dev(dev);
3153 + u16 devid, ivrs_alias, pci_alias;
3154 +
3155 + devid = get_device_id(dev);
3156 + ivrs_alias = amd_iommu_alias_table[devid];
3157 + pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
3158 +
3159 + if (ivrs_alias == pci_alias)
3160 + return ivrs_alias;
3161 +
3162 + /*
3163 + * DMA alias showdown
3164 + *
3165 + * The IVRS is fairly reliable in telling us about aliases, but it
3166 + * can't know about every screwy device. If we don't have an IVRS
3167 + * reported alias, use the PCI reported alias. In that case we may
3168 + * still need to initialize the rlookup and dev_table entries if the
3169 + * alias is to a non-existent device.
3170 + */
3171 + if (ivrs_alias == devid) {
3172 + if (!amd_iommu_rlookup_table[pci_alias]) {
3173 + amd_iommu_rlookup_table[pci_alias] =
3174 + amd_iommu_rlookup_table[devid];
3175 + memcpy(amd_iommu_dev_table[pci_alias].data,
3176 + amd_iommu_dev_table[devid].data,
3177 + sizeof(amd_iommu_dev_table[pci_alias].data));
3178 + }
3179 +
3180 + return pci_alias;
3181 + }
3182 +
3183 + pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
3184 + "for device %s[%04x:%04x], kernel reported alias "
3185 + "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
3186 + PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
3187 + PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
3188 + PCI_FUNC(pci_alias));
3189 +
3190 + /*
3191 + * If we don't have a PCI DMA alias and the IVRS alias is on the same
3192 + * bus, then the IVRS table may know about a quirk that we don't.
3193 + */
3194 + if (pci_alias == devid &&
3195 + PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
3196 + pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
3197 + pdev->dma_alias_devfn = ivrs_alias & 0xff;
3198 + pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
3199 + PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
3200 + dev_name(dev));
3201 + }
3202 +
3203 + return ivrs_alias;
3204 +}
3205 +
3206 static struct iommu_dev_data *find_dev_data(u16 devid)
3207 {
3208 struct iommu_dev_data *dev_data;
3209 @@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
3210 return dev_data;
3211 }
3212
3213 -static inline u16 get_device_id(struct device *dev)
3214 -{
3215 - struct pci_dev *pdev = to_pci_dev(dev);
3216 -
3217 - return PCI_DEVID(pdev->bus->number, pdev->devfn);
3218 -}
3219 -
3220 static struct iommu_dev_data *get_dev_data(struct device *dev)
3221 {
3222 return dev->archdata.iommu;
3223 @@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
3224 if (!dev_data)
3225 return -ENOMEM;
3226
3227 + dev_data->alias = get_alias(dev);
3228 +
3229 if (pci_iommuv2_capable(pdev)) {
3230 struct amd_iommu *iommu;
3231
3232 @@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
3233 u16 devid, alias;
3234
3235 devid = get_device_id(dev);
3236 - alias = amd_iommu_alias_table[devid];
3237 + alias = get_alias(dev);
3238
3239 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
3240 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
3241 @@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
3242 int ret;
3243
3244 iommu = amd_iommu_rlookup_table[dev_data->devid];
3245 - alias = amd_iommu_alias_table[dev_data->devid];
3246 + alias = dev_data->alias;
3247
3248 ret = iommu_flush_dte(iommu, dev_data->devid);
3249 if (!ret && alias != dev_data->devid)
3250 @@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
3251 bool ats;
3252
3253 iommu = amd_iommu_rlookup_table[dev_data->devid];
3254 - alias = amd_iommu_alias_table[dev_data->devid];
3255 + alias = dev_data->alias;
3256 ats = dev_data->ats.enabled;
3257
3258 /* Update data structures */
3259 @@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
3260 return;
3261
3262 iommu = amd_iommu_rlookup_table[dev_data->devid];
3263 - alias = amd_iommu_alias_table[dev_data->devid];
3264 + alias = dev_data->alias;
3265
3266 /* decrease reference counters */
3267 dev_data->domain->dev_iommu[iommu->index] -= 1;
3268 diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
3269 index 72d6182666cb..58f2fe687a24 100644
3270 --- a/drivers/iommu/dma-iommu.c
3271 +++ b/drivers/iommu/dma-iommu.c
3272 @@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
3273 unsigned int s_length = sg_dma_len(s);
3274 unsigned int s_dma_len = s->length;
3275
3276 - s->offset = s_offset;
3277 + s->offset += s_offset;
3278 s->length = s_length;
3279 sg_dma_address(s) = dma_addr + s_offset;
3280 dma_addr += s_dma_len;
3281 @@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
3282
3283 for_each_sg(sg, s, nents, i) {
3284 if (sg_dma_address(s) != DMA_ERROR_CODE)
3285 - s->offset = sg_dma_address(s);
3286 + s->offset += sg_dma_address(s);
3287 if (sg_dma_len(s))
3288 s->length = sg_dma_len(s);
3289 sg_dma_address(s) = DMA_ERROR_CODE;
3290 diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
3291 index efe50845939d..17304705f2cf 100644
3292 --- a/drivers/irqchip/irq-mxs.c
3293 +++ b/drivers/irqchip/irq-mxs.c
3294 @@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
3295 void __iomem *icoll_base;
3296
3297 icoll_base = of_io_request_and_map(np, 0, np->name);
3298 - if (!icoll_base)
3299 + if (IS_ERR(icoll_base))
3300 panic("%s: unable to map resource", np->full_name);
3301 return icoll_base;
3302 }
3303 diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
3304 index 0820f67cc9a7..668730c5cb66 100644
3305 --- a/drivers/irqchip/irq-sunxi-nmi.c
3306 +++ b/drivers/irqchip/irq-sunxi-nmi.c
3307 @@ -160,9 +160,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
3308
3309 gc = irq_get_domain_generic_chip(domain, 0);
3310 gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
3311 - if (!gc->reg_base) {
3312 + if (IS_ERR(gc->reg_base)) {
3313 pr_err("unable to map resource\n");
3314 - ret = -ENOMEM;
3315 + ret = PTR_ERR(gc->reg_base);
3316 goto fail_irqd_remove;
3317 }
3318
3319 diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
3320 index 27f2ef300f8b..3970cda10080 100644
3321 --- a/drivers/md/dm-cache-metadata.c
3322 +++ b/drivers/md/dm-cache-metadata.c
3323 @@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
3324 return 0;
3325 }
3326
3327 -#define WRITE_LOCK(cmd) \
3328 - down_write(&cmd->root_lock); \
3329 - if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
3330 - up_write(&cmd->root_lock); \
3331 - return -EINVAL; \
3332 +static bool cmd_write_lock(struct dm_cache_metadata *cmd)
3333 +{
3334 + down_write(&cmd->root_lock);
3335 + if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
3336 + up_write(&cmd->root_lock);
3337 + return false;
3338 }
3339 + return true;
3340 +}
3341
3342 -#define WRITE_LOCK_VOID(cmd) \
3343 - down_write(&cmd->root_lock); \
3344 - if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
3345 - up_write(&cmd->root_lock); \
3346 - return; \
3347 - }
3348 +#define WRITE_LOCK(cmd) \
3349 + do { \
3350 + if (!cmd_write_lock((cmd))) \
3351 + return -EINVAL; \
3352 + } while(0)
3353 +
3354 +#define WRITE_LOCK_VOID(cmd) \
3355 + do { \
3356 + if (!cmd_write_lock((cmd))) \
3357 + return; \
3358 + } while(0)
3359
3360 #define WRITE_UNLOCK(cmd) \
3361 - up_write(&cmd->root_lock)
3362 + up_write(&(cmd)->root_lock)
3363
3364 -#define READ_LOCK(cmd) \
3365 - down_read(&cmd->root_lock); \
3366 - if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
3367 - up_read(&cmd->root_lock); \
3368 - return -EINVAL; \
3369 +static bool cmd_read_lock(struct dm_cache_metadata *cmd)
3370 +{
3371 + down_read(&cmd->root_lock);
3372 + if (cmd->fail_io) {
3373 + up_read(&cmd->root_lock);
3374 + return false;
3375 }
3376 + return true;
3377 +}
3378
3379 -#define READ_LOCK_VOID(cmd) \
3380 - down_read(&cmd->root_lock); \
3381 - if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
3382 - up_read(&cmd->root_lock); \
3383 - return; \
3384 - }
3385 +#define READ_LOCK(cmd) \
3386 + do { \
3387 + if (!cmd_read_lock((cmd))) \
3388 + return -EINVAL; \
3389 + } while(0)
3390 +
3391 +#define READ_LOCK_VOID(cmd) \
3392 + do { \
3393 + if (!cmd_read_lock((cmd))) \
3394 + return; \
3395 + } while(0)
3396
3397 #define READ_UNLOCK(cmd) \
3398 - up_read(&cmd->root_lock)
3399 + up_read(&(cmd)->root_lock)
3400
3401 int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
3402 {
3403 diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
3404 index de9ff3bb8edd..6996ab8db108 100644
3405 --- a/drivers/media/usb/usbvision/usbvision-video.c
3406 +++ b/drivers/media/usb/usbvision/usbvision-video.c
3407 @@ -1461,13 +1461,6 @@ static int usbvision_probe(struct usb_interface *intf,
3408 printk(KERN_INFO "%s: %s found\n", __func__,
3409 usbvision_device_data[model].model_string);
3410
3411 - /*
3412 - * this is a security check.
3413 - * an exploit using an incorrect bInterfaceNumber is known
3414 - */
3415 - if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
3416 - return -ENODEV;
3417 -
3418 if (usbvision_device_data[model].interface >= 0)
3419 interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
3420 else if (ifnum < dev->actconfig->desc.bNumInterfaces)
3421 diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
3422 index ff8953ae52d1..d7d7c52a3060 100644
3423 --- a/drivers/media/v4l2-core/videobuf2-core.c
3424 +++ b/drivers/media/v4l2-core/videobuf2-core.c
3425 @@ -1643,7 +1643,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
3426 * Will sleep if required for nonblocking == false.
3427 */
3428 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
3429 - int nonblocking)
3430 + void *pb, int nonblocking)
3431 {
3432 unsigned long flags;
3433 int ret;
3434 @@ -1664,10 +1664,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
3435 /*
3436 * Only remove the buffer from done_list if v4l2_buffer can handle all
3437 * the planes.
3438 - * Verifying planes is NOT necessary since it already has been checked
3439 - * before the buffer is queued/prepared. So it can never fail.
3440 */
3441 - list_del(&(*vb)->done_entry);
3442 + ret = call_bufop(q, verify_planes_array, *vb, pb);
3443 + if (!ret)
3444 + list_del(&(*vb)->done_entry);
3445 spin_unlock_irqrestore(&q->done_lock, flags);
3446
3447 return ret;
3448 @@ -1746,7 +1746,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
3449 struct vb2_buffer *vb = NULL;
3450 int ret;
3451
3452 - ret = __vb2_get_done_vb(q, &vb, nonblocking);
3453 + ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
3454 if (ret < 0)
3455 return ret;
3456
3457 @@ -2293,6 +2293,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
3458 return POLLERR;
3459
3460 /*
3461 + * If this quirk is set and QBUF hasn't been called yet then
3462 + * return POLLERR as well. This only affects capture queues, output
3463 + * queues will always initialize waiting_for_buffers to false.
3464 + * This quirk is set by V4L2 for backwards compatibility reasons.
3465 + */
3466 + if (q->quirk_poll_must_check_waiting_for_buffers &&
3467 + q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
3468 + return POLLERR;
3469 +
3470 + /*
3471 * For output streams you can call write() as long as there are fewer
3472 * buffers queued than there are buffers available.
3473 */
3474 diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
3475 index dbec5923fcf0..3c3b517f1d1c 100644
3476 --- a/drivers/media/v4l2-core/videobuf2-memops.c
3477 +++ b/drivers/media/v4l2-core/videobuf2-memops.c
3478 @@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
3479 vec = frame_vector_create(nr);
3480 if (!vec)
3481 return ERR_PTR(-ENOMEM);
3482 - ret = get_vaddr_frames(start, nr, write, 1, vec);
3483 + ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
3484 if (ret < 0)
3485 goto out_destroy;
3486 /* We accept only complete set of PFNs */
3487 diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
3488 index 91f552124050..7f366f1b0377 100644
3489 --- a/drivers/media/v4l2-core/videobuf2-v4l2.c
3490 +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
3491 @@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
3492 return 0;
3493 }
3494
3495 +static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
3496 +{
3497 + return __verify_planes_array(vb, pb);
3498 +}
3499 +
3500 /**
3501 * __verify_length() - Verify that the bytesused value for each plane fits in
3502 * the plane length and that the data offset doesn't exceed the bytesused value.
3503 @@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
3504 }
3505
3506 static const struct vb2_buf_ops v4l2_buf_ops = {
3507 + .verify_planes_array = __verify_planes_array_core,
3508 .fill_user_buffer = __fill_v4l2_buffer,
3509 .fill_vb2_buffer = __fill_vb2_buffer,
3510 .copy_timestamp = __copy_timestamp,
3511 @@ -765,6 +771,12 @@ int vb2_queue_init(struct vb2_queue *q)
3512 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
3513 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
3514 == V4L2_BUF_FLAG_TIMESTAMP_COPY;
3515 + /*
3516 + * For compatibility with vb1: if QBUF hasn't been called yet, then
3517 + * return POLLERR as well. This only affects capture queues, output
3518 + * queues will always initialize waiting_for_buffers to false.
3519 + */
3520 + q->quirk_poll_must_check_waiting_for_buffers = true;
3521
3522 return vb2_core_queue_init(q);
3523 }
3524 @@ -818,14 +830,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
3525 poll_wait(file, &fh->wait, wait);
3526 }
3527
3528 - /*
3529 - * For compatibility with vb1: if QBUF hasn't been called yet, then
3530 - * return POLLERR as well. This only affects capture queues, output
3531 - * queues will always initialize waiting_for_buffers to false.
3532 - */
3533 - if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
3534 - return POLLERR;
3535 -
3536 return res | vb2_core_poll(q, file, wait);
3537 }
3538 EXPORT_SYMBOL_GPL(vb2_poll);
3539 diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
3540 index 054fc10cb3b6..b22c03264270 100644
3541 --- a/drivers/misc/Kconfig
3542 +++ b/drivers/misc/Kconfig
3543 @@ -440,7 +440,7 @@ config ARM_CHARLCD
3544 still useful.
3545
3546 config BMP085
3547 - bool
3548 + tristate
3549 depends on SYSFS
3550
3551 config BMP085_I2C
3552 diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
3553 index 15e88078ba1e..f1a0b99f5a9a 100644
3554 --- a/drivers/misc/ad525x_dpot.c
3555 +++ b/drivers/misc/ad525x_dpot.c
3556 @@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
3557 */
3558 value = swab16(value);
3559
3560 - if (dpot->uid == DPOT_UID(AD5271_ID))
3561 + if (dpot->uid == DPOT_UID(AD5274_ID))
3562 value = value >> 2;
3563 return value;
3564 default:
3565 diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
3566 index 09a406058c46..efbb6945eb18 100644
3567 --- a/drivers/misc/cxl/irq.c
3568 +++ b/drivers/misc/cxl/irq.c
3569 @@ -288,7 +288,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
3570 void cxl_unmap_irq(unsigned int virq, void *cookie)
3571 {
3572 free_irq(virq, cookie);
3573 - irq_dispose_mapping(virq);
3574 }
3575
3576 static int cxl_register_one_irq(struct cxl *adapter,
3577 diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
3578 index 8310b4dbff06..6a451bd65bf3 100644
3579 --- a/drivers/misc/mic/scif/scif_rma.c
3580 +++ b/drivers/misc/mic/scif/scif_rma.c
3581 @@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
3582 if ((map_flags & SCIF_MAP_FIXED) &&
3583 ((ALIGN(offset, PAGE_SIZE) != offset) ||
3584 (offset < 0) ||
3585 - (offset + (off_t)len < offset)))
3586 + (len > LONG_MAX - offset)))
3587 return -EINVAL;
3588
3589 might_sleep();
3590 @@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
3591 if ((map_flags & SCIF_MAP_FIXED) &&
3592 ((ALIGN(offset, PAGE_SIZE) != offset) ||
3593 (offset < 0) ||
3594 - (offset + (off_t)len < offset)))
3595 + (len > LONG_MAX - offset)))
3596 return -EINVAL;
3597
3598 /* Unsupported protection requested */
3599 @@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
3600
3601 /* Offset is not page aligned or offset+len wraps around */
3602 if ((ALIGN(offset, PAGE_SIZE) != offset) ||
3603 - (offset + (off_t)len < offset))
3604 + (offset < 0) ||
3605 + (len > LONG_MAX - offset))
3606 return -EINVAL;
3607
3608 err = scif_verify_epd(ep);
3609 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
3610 index 5fbffdb6b854..c6f36f3ca5d2 100644
3611 --- a/drivers/mmc/card/block.c
3612 +++ b/drivers/mmc/card/block.c
3613 @@ -86,7 +86,6 @@ static int max_devices;
3614
3615 /* TODO: Replace these with struct ida */
3616 static DECLARE_BITMAP(dev_use, MAX_DEVICES);
3617 -static DECLARE_BITMAP(name_use, MAX_DEVICES);
3618
3619 /*
3620 * There is one mmc_blk_data per slot.
3621 @@ -105,7 +104,6 @@ struct mmc_blk_data {
3622 unsigned int usage;
3623 unsigned int read_only;
3624 unsigned int part_type;
3625 - unsigned int name_idx;
3626 unsigned int reset_done;
3627 #define MMC_BLK_READ BIT(0)
3628 #define MMC_BLK_WRITE BIT(1)
3629 @@ -2202,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
3630 goto out;
3631 }
3632
3633 - /*
3634 - * !subname implies we are creating main mmc_blk_data that will be
3635 - * associated with mmc_card with dev_set_drvdata. Due to device
3636 - * partitions, devidx will not coincide with a per-physical card
3637 - * index anymore so we keep track of a name index.
3638 - */
3639 - if (!subname) {
3640 - md->name_idx = find_first_zero_bit(name_use, max_devices);
3641 - __set_bit(md->name_idx, name_use);
3642 - } else
3643 - md->name_idx = ((struct mmc_blk_data *)
3644 - dev_to_disk(parent)->private_data)->name_idx;
3645 -
3646 md->area_type = area_type;
3647
3648 /*
3649 @@ -2264,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
3650 */
3651
3652 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
3653 - "mmcblk%u%s", md->name_idx, subname ? subname : "");
3654 + "mmcblk%u%s", card->host->index, subname ? subname : "");
3655
3656 if (mmc_card_mmc(card))
3657 blk_queue_logical_block_size(md->queue.queue,
3658 @@ -2418,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
3659 struct list_head *pos, *q;
3660 struct mmc_blk_data *part_md;
3661
3662 - __clear_bit(md->name_idx, name_use);
3663 list_for_each_safe(pos, q, &md->part) {
3664 part_md = list_entry(pos, struct mmc_blk_data, part);
3665 list_del(pos);
3666 diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
3667 index 1526b8a10b09..3b944fc70eec 100644
3668 --- a/drivers/mmc/host/Kconfig
3669 +++ b/drivers/mmc/host/Kconfig
3670 @@ -97,6 +97,7 @@ config MMC_RICOH_MMC
3671 config MMC_SDHCI_ACPI
3672 tristate "SDHCI support for ACPI enumerated SDHCI controllers"
3673 depends on MMC_SDHCI && ACPI
3674 + select IOSF_MBI if X86
3675 help
3676 This selects support for ACPI enumerated SDHCI controllers,
3677 identified by ACPI Compatibility ID PNP0D40 or specific
3678 diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
3679 index a5cda926d38e..975139f97498 100644
3680 --- a/drivers/mmc/host/sdhci-acpi.c
3681 +++ b/drivers/mmc/host/sdhci-acpi.c
3682 @@ -41,6 +41,11 @@
3683 #include <linux/mmc/pm.h>
3684 #include <linux/mmc/slot-gpio.h>
3685
3686 +#ifdef CONFIG_X86
3687 +#include <asm/cpu_device_id.h>
3688 +#include <asm/iosf_mbi.h>
3689 +#endif
3690 +
3691 #include "sdhci.h"
3692
3693 enum {
3694 @@ -146,6 +151,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
3695 .ops = &sdhci_acpi_ops_int,
3696 };
3697
3698 +#ifdef CONFIG_X86
3699 +
3700 +static bool sdhci_acpi_byt(void)
3701 +{
3702 + static const struct x86_cpu_id byt[] = {
3703 + { X86_VENDOR_INTEL, 6, 0x37 },
3704 + {}
3705 + };
3706 +
3707 + return x86_match_cpu(byt);
3708 +}
3709 +
3710 +#define BYT_IOSF_SCCEP 0x63
3711 +#define BYT_IOSF_OCP_NETCTRL0 0x1078
3712 +#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
3713 +
3714 +static void sdhci_acpi_byt_setting(struct device *dev)
3715 +{
3716 + u32 val = 0;
3717 +
3718 + if (!sdhci_acpi_byt())
3719 + return;
3720 +
3721 + if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
3722 + &val)) {
3723 + dev_err(dev, "%s read error\n", __func__);
3724 + return;
3725 + }
3726 +
3727 + if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
3728 + return;
3729 +
3730 + val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
3731 +
3732 + if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
3733 + val)) {
3734 + dev_err(dev, "%s write error\n", __func__);
3735 + return;
3736 + }
3737 +
3738 + dev_dbg(dev, "%s completed\n", __func__);
3739 +}
3740 +
3741 +static bool sdhci_acpi_byt_defer(struct device *dev)
3742 +{
3743 + if (!sdhci_acpi_byt())
3744 + return false;
3745 +
3746 + if (!iosf_mbi_available())
3747 + return true;
3748 +
3749 + sdhci_acpi_byt_setting(dev);
3750 +
3751 + return false;
3752 +}
3753 +
3754 +#else
3755 +
3756 +static inline void sdhci_acpi_byt_setting(struct device *dev)
3757 +{
3758 +}
3759 +
3760 +static inline bool sdhci_acpi_byt_defer(struct device *dev)
3761 +{
3762 + return false;
3763 +}
3764 +
3765 +#endif
3766 +
3767 static int bxt_get_cd(struct mmc_host *mmc)
3768 {
3769 int gpio_cd = mmc_gpio_get_cd(mmc);
3770 @@ -337,6 +411,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
3771 if (acpi_bus_get_status(device) || !device->status.present)
3772 return -ENODEV;
3773
3774 + if (sdhci_acpi_byt_defer(dev))
3775 + return -EPROBE_DEFER;
3776 +
3777 hid = acpi_device_hid(device);
3778 uid = device->pnp.unique_id;
3779
3780 @@ -460,6 +537,8 @@ static int sdhci_acpi_resume(struct device *dev)
3781 {
3782 struct sdhci_acpi_host *c = dev_get_drvdata(dev);
3783
3784 + sdhci_acpi_byt_setting(&c->pdev->dev);
3785 +
3786 return sdhci_resume_host(c->host);
3787 }
3788
3789 @@ -483,6 +562,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
3790 {
3791 struct sdhci_acpi_host *c = dev_get_drvdata(dev);
3792
3793 + sdhci_acpi_byt_setting(&c->pdev->dev);
3794 +
3795 return sdhci_runtime_resume_host(c->host);
3796 }
3797
3798 diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
3799 index 844fc07d22cd..f7009c1cb90c 100644
3800 --- a/drivers/mtd/nand/brcmnand/brcmnand.c
3801 +++ b/drivers/mtd/nand/brcmnand/brcmnand.c
3802 @@ -311,6 +311,36 @@ static const u16 brcmnand_regs_v60[] = {
3803 [BRCMNAND_FC_BASE] = 0x400,
3804 };
3805
3806 +/* BRCMNAND v7.1 */
3807 +static const u16 brcmnand_regs_v71[] = {
3808 + [BRCMNAND_CMD_START] = 0x04,
3809 + [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
3810 + [BRCMNAND_CMD_ADDRESS] = 0x0c,
3811 + [BRCMNAND_INTFC_STATUS] = 0x14,
3812 + [BRCMNAND_CS_SELECT] = 0x18,
3813 + [BRCMNAND_CS_XOR] = 0x1c,
3814 + [BRCMNAND_LL_OP] = 0x20,
3815 + [BRCMNAND_CS0_BASE] = 0x50,
3816 + [BRCMNAND_CS1_BASE] = 0,
3817 + [BRCMNAND_CORR_THRESHOLD] = 0xdc,
3818 + [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
3819 + [BRCMNAND_UNCORR_COUNT] = 0xfc,
3820 + [BRCMNAND_CORR_COUNT] = 0x100,
3821 + [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
3822 + [BRCMNAND_CORR_ADDR] = 0x110,
3823 + [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
3824 + [BRCMNAND_UNCORR_ADDR] = 0x118,
3825 + [BRCMNAND_SEMAPHORE] = 0x150,
3826 + [BRCMNAND_ID] = 0x194,
3827 + [BRCMNAND_ID_EXT] = 0x198,
3828 + [BRCMNAND_LL_RDATA] = 0x19c,
3829 + [BRCMNAND_OOB_READ_BASE] = 0x200,
3830 + [BRCMNAND_OOB_READ_10_BASE] = 0,
3831 + [BRCMNAND_OOB_WRITE_BASE] = 0x280,
3832 + [BRCMNAND_OOB_WRITE_10_BASE] = 0,
3833 + [BRCMNAND_FC_BASE] = 0x400,
3834 +};
3835 +
3836 enum brcmnand_cs_reg {
3837 BRCMNAND_CS_CFG_EXT = 0,
3838 BRCMNAND_CS_CFG,
3839 @@ -406,7 +436,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
3840 }
3841
3842 /* Register offsets */
3843 - if (ctrl->nand_version >= 0x0600)
3844 + if (ctrl->nand_version >= 0x0701)
3845 + ctrl->reg_offsets = brcmnand_regs_v71;
3846 + else if (ctrl->nand_version >= 0x0600)
3847 ctrl->reg_offsets = brcmnand_regs_v60;
3848 else if (ctrl->nand_version >= 0x0500)
3849 ctrl->reg_offsets = brcmnand_regs_v50;
3850 diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
3851 index f2c8ff398d6c..171d146645ba 100644
3852 --- a/drivers/mtd/nand/nand_base.c
3853 +++ b/drivers/mtd/nand/nand_base.c
3854 @@ -4021,7 +4021,6 @@ static int nand_dt_init(struct nand_chip *chip)
3855 * This is the first phase of the normal nand_scan() function. It reads the
3856 * flash ID and sets up MTD fields accordingly.
3857 *
3858 - * The mtd->owner field must be set to the module of the caller.
3859 */
3860 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3861 struct nand_flash_dev *table)
3862 @@ -4443,19 +4442,12 @@ EXPORT_SYMBOL(nand_scan_tail);
3863 *
3864 * This fills out all the uninitialized function pointers with the defaults.
3865 * The flash ID is read and the mtd/chip structures are filled with the
3866 - * appropriate values. The mtd->owner field must be set to the module of the
3867 - * caller.
3868 + * appropriate values.
3869 */
3870 int nand_scan(struct mtd_info *mtd, int maxchips)
3871 {
3872 int ret;
3873
3874 - /* Many callers got this wrong, so check for it for a while... */
3875 - if (!mtd->owner && caller_is_module()) {
3876 - pr_crit("%s called with NULL mtd->owner!\n", __func__);
3877 - BUG();
3878 - }
3879 -
3880 ret = nand_scan_ident(mtd, maxchips, NULL);
3881 if (!ret)
3882 ret = nand_scan_tail(mtd);
3883 diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
3884 index 86fc245dc71a..fd78644469fa 100644
3885 --- a/drivers/mtd/nand/pxa3xx_nand.c
3886 +++ b/drivers/mtd/nand/pxa3xx_nand.c
3887 @@ -1738,7 +1738,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
3888 if (ret < 0)
3889 return ret;
3890
3891 - if (use_dma) {
3892 + if (!np && use_dma) {
3893 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
3894 if (r == NULL) {
3895 dev_err(&pdev->dev,
3896 diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
3897 index ed0c19c558b5..3028c06547c1 100644
3898 --- a/drivers/mtd/spi-nor/spi-nor.c
3899 +++ b/drivers/mtd/spi-nor/spi-nor.c
3900 @@ -1100,45 +1100,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
3901 return 0;
3902 }
3903
3904 -static int micron_quad_enable(struct spi_nor *nor)
3905 -{
3906 - int ret;
3907 - u8 val;
3908 -
3909 - ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
3910 - if (ret < 0) {
3911 - dev_err(nor->dev, "error %d reading EVCR\n", ret);
3912 - return ret;
3913 - }
3914 -
3915 - write_enable(nor);
3916 -
3917 - /* set EVCR, enable quad I/O */
3918 - nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
3919 - ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
3920 - if (ret < 0) {
3921 - dev_err(nor->dev, "error while writing EVCR register\n");
3922 - return ret;
3923 - }
3924 -
3925 - ret = spi_nor_wait_till_ready(nor);
3926 - if (ret)
3927 - return ret;
3928 -
3929 - /* read EVCR and check it */
3930 - ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
3931 - if (ret < 0) {
3932 - dev_err(nor->dev, "error %d reading EVCR\n", ret);
3933 - return ret;
3934 - }
3935 - if (val & EVCR_QUAD_EN_MICRON) {
3936 - dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
3937 - return -EINVAL;
3938 - }
3939 -
3940 - return 0;
3941 -}
3942 -
3943 static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
3944 {
3945 int status;
3946 @@ -1152,12 +1113,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
3947 }
3948 return status;
3949 case SNOR_MFR_MICRON:
3950 - status = micron_quad_enable(nor);
3951 - if (status) {
3952 - dev_err(nor->dev, "Micron quad-read not enabled\n");
3953 - return -EINVAL;
3954 - }
3955 - return status;
3956 + return 0;
3957 default:
3958 status = spansion_quad_enable(nor);
3959 if (status) {
3960 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3961 index d70a1716f3e0..1486f33a743e 100644
3962 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3963 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3964 @@ -1143,6 +1143,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
3965 /* the fw is stopped, the aux sta is dead: clean up driver state */
3966 iwl_mvm_del_aux_sta(mvm);
3967
3968 + iwl_free_fw_paging(mvm);
3969 +
3970 /*
3971 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
3972 * won't be called in this case).
3973 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
3974 index e80be9a59520..89ea70deeb84 100644
3975 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
3976 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
3977 @@ -684,8 +684,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
3978 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
3979 kfree(mvm->nvm_sections[i].data);
3980
3981 - iwl_free_fw_paging(mvm);
3982 -
3983 iwl_mvm_tof_clean(mvm);
3984
3985 ieee80211_free_hw(mvm->hw);
3986 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
3987 index 5a854c609477..1198caac35c8 100644
3988 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
3989 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
3990 @@ -731,8 +731,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
3991 */
3992 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
3993 if (val & (BIT(1) | BIT(17))) {
3994 - IWL_INFO(trans,
3995 - "can't access the RSA semaphore it is write protected\n");
3996 + IWL_DEBUG_INFO(trans,
3997 + "can't access the RSA semaphore it is write protected\n");
3998 return 0;
3999 }
4000
4001 diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
4002 index ff3ee9dfbbd5..23bae87d4d3d 100644
4003 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
4004 +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
4005 @@ -607,11 +607,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
4006
4007 case EVENT_PS_AWAKE:
4008 mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
4009 - if (!adapter->pps_uapsd_mode && priv->port_open &&
4010 + if (!adapter->pps_uapsd_mode &&
4011 + (priv->port_open ||
4012 + (priv->bss_mode == NL80211_IFTYPE_ADHOC)) &&
4013 priv->media_connected && adapter->sleep_period.period) {
4014 - adapter->pps_uapsd_mode = true;
4015 - mwifiex_dbg(adapter, EVENT,
4016 - "event: PPS/UAPSD mode activated\n");
4017 + adapter->pps_uapsd_mode = true;
4018 + mwifiex_dbg(adapter, EVENT,
4019 + "event: PPS/UAPSD mode activated\n");
4020 }
4021 adapter->tx_lock_flag = false;
4022 if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
4023 diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
4024 index acccd6734e3b..499e5a741c62 100644
4025 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c
4026 +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
4027 @@ -475,7 +475,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
4028 priv = adapter->priv[i];
4029 if (!priv)
4030 continue;
4031 - if (!priv->port_open)
4032 + if (!priv->port_open &&
4033 + (priv->bss_mode != NL80211_IFTYPE_ADHOC))
4034 continue;
4035 if (adapter->if_ops.is_port_ready &&
4036 !adapter->if_ops.is_port_ready(priv))
4037 @@ -1099,7 +1100,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
4038
4039 priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
4040
4041 - if (!priv_tmp->port_open ||
4042 + if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
4043 + !priv_tmp->port_open) ||
4044 (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
4045 continue;
4046
4047 diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
4048 index 588803ad6847..6ccba0d862df 100644
4049 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c
4050 +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
4051 @@ -357,20 +357,6 @@ static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
4052 return 0;
4053 }
4054
4055 -static int amd_ntb_peer_db_addr(struct ntb_dev *ntb,
4056 - phys_addr_t *db_addr,
4057 - resource_size_t *db_size)
4058 -{
4059 - struct amd_ntb_dev *ndev = ntb_ndev(ntb);
4060 -
4061 - if (db_addr)
4062 - *db_addr = (phys_addr_t)(ndev->peer_mmio + AMD_DBREQ_OFFSET);
4063 - if (db_size)
4064 - *db_size = sizeof(u32);
4065 -
4066 - return 0;
4067 -}
4068 -
4069 static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
4070 {
4071 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
4072 @@ -415,20 +401,6 @@ static int amd_ntb_spad_write(struct ntb_dev *ntb,
4073 return 0;
4074 }
4075
4076 -static int amd_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
4077 - phys_addr_t *spad_addr)
4078 -{
4079 - struct amd_ntb_dev *ndev = ntb_ndev(ntb);
4080 -
4081 - if (idx < 0 || idx >= ndev->spad_count)
4082 - return -EINVAL;
4083 -
4084 - if (spad_addr)
4085 - *spad_addr = (phys_addr_t)(ndev->self_mmio + AMD_SPAD_OFFSET +
4086 - ndev->peer_spad + (idx << 2));
4087 - return 0;
4088 -}
4089 -
4090 static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
4091 {
4092 struct amd_ntb_dev *ndev = ntb_ndev(ntb);
4093 @@ -472,12 +444,10 @@ static const struct ntb_dev_ops amd_ntb_ops = {
4094 .db_clear = amd_ntb_db_clear,
4095 .db_set_mask = amd_ntb_db_set_mask,
4096 .db_clear_mask = amd_ntb_db_clear_mask,
4097 - .peer_db_addr = amd_ntb_peer_db_addr,
4098 .peer_db_set = amd_ntb_peer_db_set,
4099 .spad_count = amd_ntb_spad_count,
4100 .spad_read = amd_ntb_spad_read,
4101 .spad_write = amd_ntb_spad_write,
4102 - .peer_spad_addr = amd_ntb_peer_spad_addr,
4103 .peer_spad_read = amd_ntb_peer_spad_read,
4104 .peer_spad_write = amd_ntb_peer_spad_write,
4105 };
4106 diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
4107 index c8a37ba4b4f9..6bdc1e7b7503 100644
4108 --- a/drivers/ntb/test/ntb_perf.c
4109 +++ b/drivers/ntb/test/ntb_perf.c
4110 @@ -178,7 +178,7 @@ static void perf_copy_callback(void *data)
4111 atomic_dec(&pctx->dma_sync);
4112 }
4113
4114 -static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
4115 +static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
4116 char *src, size_t size)
4117 {
4118 struct perf_ctx *perf = pctx->perf;
4119 @@ -189,7 +189,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
4120 dma_cookie_t cookie;
4121 size_t src_off, dst_off;
4122 struct perf_mw *mw = &perf->mw;
4123 - u64 vbase, dst_vaddr;
4124 + void __iomem *vbase;
4125 + void __iomem *dst_vaddr;
4126 dma_addr_t dst_phys;
4127 int retries = 0;
4128
4129 @@ -204,14 +205,14 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
4130 }
4131
4132 device = chan->device;
4133 - src_off = (size_t)src & ~PAGE_MASK;
4134 - dst_off = (size_t)dst & ~PAGE_MASK;
4135 + src_off = (uintptr_t)src & ~PAGE_MASK;
4136 + dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
4137
4138 if (!is_dma_copy_aligned(device, src_off, dst_off, size))
4139 return -ENODEV;
4140
4141 - vbase = (u64)(u64 *)mw->vbase;
4142 - dst_vaddr = (u64)(u64 *)dst;
4143 + vbase = mw->vbase;
4144 + dst_vaddr = dst;
4145 dst_phys = mw->phys_addr + (dst_vaddr - vbase);
4146
4147 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
4148 @@ -261,13 +262,13 @@ err_get_unmap:
4149 return 0;
4150 }
4151
4152 -static int perf_move_data(struct pthr_ctx *pctx, char *dst, char *src,
4153 +static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
4154 u64 buf_size, u64 win_size, u64 total)
4155 {
4156 int chunks, total_chunks, i;
4157 int copied_chunks = 0;
4158 u64 copied = 0, result;
4159 - char *tmp = dst;
4160 + char __iomem *tmp = dst;
4161 u64 perf, diff_us;
4162 ktime_t kstart, kstop, kdiff;
4163
4164 @@ -324,7 +325,7 @@ static int ntb_perf_thread(void *data)
4165 struct perf_ctx *perf = pctx->perf;
4166 struct pci_dev *pdev = perf->ntb->pdev;
4167 struct perf_mw *mw = &perf->mw;
4168 - char *dst;
4169 + char __iomem *dst;
4170 u64 win_size, buf_size, total;
4171 void *src;
4172 int rc, node, i;
4173 @@ -364,7 +365,7 @@ static int ntb_perf_thread(void *data)
4174 if (buf_size > MAX_TEST_SIZE)
4175 buf_size = MAX_TEST_SIZE;
4176
4177 - dst = (char *)mw->vbase;
4178 + dst = (char __iomem *)mw->vbase;
4179
4180 atomic_inc(&perf->tsync);
4181 while (atomic_read(&perf->tsync) != perf->perf_threads)
4182 diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
4183 index fe600964fa50..88ccfeaa49c7 100644
4184 --- a/drivers/pci/host/pci-imx6.c
4185 +++ b/drivers/pci/host/pci-imx6.c
4186 @@ -32,7 +32,7 @@
4187 #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
4188
4189 struct imx6_pcie {
4190 - struct gpio_desc *reset_gpio;
4191 + int reset_gpio;
4192 struct clk *pcie_bus;
4193 struct clk *pcie_phy;
4194 struct clk *pcie;
4195 @@ -287,10 +287,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
4196 usleep_range(200, 500);
4197
4198 /* Some boards don't have PCIe reset GPIO. */
4199 - if (imx6_pcie->reset_gpio) {
4200 - gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
4201 + if (gpio_is_valid(imx6_pcie->reset_gpio)) {
4202 + gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
4203 msleep(100);
4204 - gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
4205 + gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
4206 }
4207 return 0;
4208
4209 @@ -561,6 +561,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
4210 {
4211 struct imx6_pcie *imx6_pcie;
4212 struct pcie_port *pp;
4213 + struct device_node *np = pdev->dev.of_node;
4214 struct resource *dbi_base;
4215 int ret;
4216
4217 @@ -581,8 +582,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
4218 return PTR_ERR(pp->dbi_base);
4219
4220 /* Fetch GPIOs */
4221 - imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
4222 - GPIOD_OUT_LOW);
4223 + imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
4224 + if (gpio_is_valid(imx6_pcie->reset_gpio)) {
4225 + ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
4226 + GPIOF_OUT_INIT_LOW, "PCIe reset");
4227 + if (ret) {
4228 + dev_err(&pdev->dev, "unable to get reset gpio\n");
4229 + return ret;
4230 + }
4231 + }
4232
4233 /* Fetch clocks */
4234 imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
4235 diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
4236 index e96e86d2e745..3878d23ca7a8 100644
4237 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
4238 +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
4239 @@ -949,7 +949,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
4240 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
4241 int eint_num, virq, eint_offset;
4242 unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
4243 - static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
4244 + static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
4245 + 128000, 256000};
4246 const struct mtk_desc_pin *pin;
4247 struct irq_data *d;
4248
4249 @@ -967,9 +968,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
4250 if (!mtk_eint_can_en_debounce(pctl, eint_num))
4251 return -ENOSYS;
4252
4253 - dbnc = ARRAY_SIZE(dbnc_arr);
4254 - for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
4255 - if (debounce <= dbnc_arr[i]) {
4256 + dbnc = ARRAY_SIZE(debounce_time);
4257 + for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
4258 + if (debounce <= debounce_time[i]) {
4259 dbnc = i;
4260 break;
4261 }
4262 diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
4263 index d24e5f1d1525..bd2e657163b8 100644
4264 --- a/drivers/pinctrl/pinctrl-single.c
4265 +++ b/drivers/pinctrl/pinctrl-single.c
4266 @@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
4267
4268 /* Parse pins in each row from LSB */
4269 while (mask) {
4270 - bit_pos = ffs(mask);
4271 + bit_pos = __ffs(mask);
4272 pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
4273 - mask_pos = ((pcs->fmask) << (bit_pos - 1));
4274 + mask_pos = ((pcs->fmask) << bit_pos);
4275 val_pos = val & mask_pos;
4276 submask = mask & mask_pos;
4277
4278 @@ -1844,7 +1844,7 @@ static int pcs_probe(struct platform_device *pdev)
4279 ret = of_property_read_u32(np, "pinctrl-single,function-mask",
4280 &pcs->fmask);
4281 if (!ret) {
4282 - pcs->fshift = ffs(pcs->fmask) - 1;
4283 + pcs->fshift = __ffs(pcs->fmask);
4284 pcs->fmax = pcs->fmask >> pcs->fshift;
4285 } else {
4286 /* If mask property doesn't exist, function mux is invalid. */
4287 diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
4288 index 73833079bac8..d6baea6a7544 100644
4289 --- a/drivers/platform/x86/toshiba_acpi.c
4290 +++ b/drivers/platform/x86/toshiba_acpi.c
4291 @@ -133,7 +133,7 @@ MODULE_LICENSE("GPL");
4292 /* Field definitions */
4293 #define HCI_ACCEL_MASK 0x7fff
4294 #define HCI_HOTKEY_DISABLE 0x0b
4295 -#define HCI_HOTKEY_ENABLE 0x01
4296 +#define HCI_HOTKEY_ENABLE 0x09
4297 #define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
4298 #define HCI_LCD_BRIGHTNESS_BITS 3
4299 #define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
4300 diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
4301 index 423ce087cd9c..5d5adee16886 100644
4302 --- a/drivers/pwm/pwm-brcmstb.c
4303 +++ b/drivers/pwm/pwm-brcmstb.c
4304 @@ -274,8 +274,8 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
4305
4306 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4307 p->base = devm_ioremap_resource(&pdev->dev, res);
4308 - if (!p->base) {
4309 - ret = -ENOMEM;
4310 + if (IS_ERR(p->base)) {
4311 + ret = PTR_ERR(p->base);
4312 goto out_clk;
4313 }
4314
4315 diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
4316 index 58f5d3b8e981..27343e1c43ef 100644
4317 --- a/drivers/regulator/s5m8767.c
4318 +++ b/drivers/regulator/s5m8767.c
4319 @@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
4320 }
4321 }
4322
4323 - if (i < s5m8767->num_regulators)
4324 - *enable_ctrl =
4325 - s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
4326 + if (i >= s5m8767->num_regulators)
4327 + return -EINVAL;
4328 +
4329 + *enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
4330
4331 return 0;
4332 }
4333 @@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
4334 else
4335 regulators[id].vsel_mask = 0xff;
4336
4337 - s5m8767_get_register(s5m8767, id, &enable_reg,
4338 + ret = s5m8767_get_register(s5m8767, id, &enable_reg,
4339 &enable_val);
4340 + if (ret) {
4341 + dev_err(s5m8767->dev, "error reading registers\n");
4342 + return ret;
4343 + }
4344 regulators[id].enable_reg = enable_reg;
4345 regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
4346 regulators[id].enable_val = enable_val;
4347 diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
4348 index 535050fc5e9f..08e0ff8c786a 100644
4349 --- a/drivers/rtc/rtc-ds1685.c
4350 +++ b/drivers/rtc/rtc-ds1685.c
4351 @@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
4352 * Only use this where you are certain another lock will not be held.
4353 */
4354 static inline void
4355 -ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
4356 +ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
4357 {
4358 - spin_lock_irqsave(&rtc->lock, flags);
4359 + spin_lock_irqsave(&rtc->lock, *flags);
4360 ds1685_rtc_switch_to_bank1(rtc);
4361 }
4362
4363 @@ -1300,7 +1300,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
4364 {
4365 struct ds1685_priv *rtc = dev_get_drvdata(dev);
4366 u8 reg = 0, bit = 0, tmp;
4367 - unsigned long flags = 0;
4368 + unsigned long flags;
4369 long int val = 0;
4370 const struct ds1685_rtc_ctrl_regs *reg_info =
4371 ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
4372 @@ -1321,7 +1321,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
4373 bit = reg_info->bit;
4374
4375 /* Safe to spinlock during a write. */
4376 - ds1685_rtc_begin_ctrl_access(rtc, flags);
4377 + ds1685_rtc_begin_ctrl_access(rtc, &flags);
4378 tmp = rtc->read(rtc, reg);
4379 rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
4380 ds1685_rtc_end_ctrl_access(rtc, flags);
4381 diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
4382 index 097325d96db5..b1b4746a0eab 100644
4383 --- a/drivers/rtc/rtc-hym8563.c
4384 +++ b/drivers/rtc/rtc-hym8563.c
4385 @@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
4386 * it does not seem to carry it over a subsequent write/read.
4387 * So we'll limit ourself to 100 years, starting at 2000 for now.
4388 */
4389 - buf[6] = tm->tm_year - 100;
4390 + buf[6] = bin2bcd(tm->tm_year - 100);
4391
4392 /*
4393 * CTL1 only contains TEST-mode bits apart from stop,
4394 diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
4395 index 7184a0eda793..725dccae24e7 100644
4396 --- a/drivers/rtc/rtc-max77686.c
4397 +++ b/drivers/rtc/rtc-max77686.c
4398 @@ -465,7 +465,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
4399
4400 info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
4401 MAX77686_RTCIRQ_RTCA1);
4402 - if (!info->virq) {
4403 + if (info->virq <= 0) {
4404 ret = -ENXIO;
4405 goto err_rtc;
4406 }
4407 diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
4408 index bd911bafb809..17341feadad1 100644
4409 --- a/drivers/rtc/rtc-rx8025.c
4410 +++ b/drivers/rtc/rtc-rx8025.c
4411 @@ -65,7 +65,6 @@
4412
4413 static const struct i2c_device_id rx8025_id[] = {
4414 { "rx8025", 0 },
4415 - { "rv8803", 1 },
4416 { }
4417 };
4418 MODULE_DEVICE_TABLE(i2c, rx8025_id);
4419 diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
4420 index f64c282275b3..e1b86bb01062 100644
4421 --- a/drivers/rtc/rtc-vr41xx.c
4422 +++ b/drivers/rtc/rtc-vr41xx.c
4423 @@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
4424 }
4425
4426 static const struct rtc_class_ops vr41xx_rtc_ops = {
4427 - .release = vr41xx_rtc_release,
4428 - .ioctl = vr41xx_rtc_ioctl,
4429 - .read_time = vr41xx_rtc_read_time,
4430 - .set_time = vr41xx_rtc_set_time,
4431 - .read_alarm = vr41xx_rtc_read_alarm,
4432 - .set_alarm = vr41xx_rtc_set_alarm,
4433 + .release = vr41xx_rtc_release,
4434 + .ioctl = vr41xx_rtc_ioctl,
4435 + .read_time = vr41xx_rtc_read_time,
4436 + .set_time = vr41xx_rtc_set_time,
4437 + .read_alarm = vr41xx_rtc_read_alarm,
4438 + .set_alarm = vr41xx_rtc_set_alarm,
4439 + .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
4440 };
4441
4442 static int rtc_probe(struct platform_device *pdev)
4443 diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
4444 index e5647d59224f..0b331c9c0a8f 100644
4445 --- a/drivers/scsi/device_handler/Kconfig
4446 +++ b/drivers/scsi/device_handler/Kconfig
4447 @@ -13,13 +13,13 @@ menuconfig SCSI_DH
4448
4449 config SCSI_DH_RDAC
4450 tristate "LSI RDAC Device Handler"
4451 - depends on SCSI_DH
4452 + depends on SCSI_DH && SCSI
4453 help
4454 If you have a LSI RDAC select y. Otherwise, say N.
4455
4456 config SCSI_DH_HP_SW
4457 tristate "HP/COMPAQ MSA Device Handler"
4458 - depends on SCSI_DH
4459 + depends on SCSI_DH && SCSI
4460 help
4461 If you have a HP/COMPAQ MSA device that requires START_STOP to
4462 be sent to start it and cannot upgrade the firmware then select y.
4463 @@ -27,13 +27,13 @@ config SCSI_DH_HP_SW
4464
4465 config SCSI_DH_EMC
4466 tristate "EMC CLARiiON Device Handler"
4467 - depends on SCSI_DH
4468 + depends on SCSI_DH && SCSI
4469 help
4470 If you have a EMC CLARiiON select y. Otherwise, say N.
4471
4472 config SCSI_DH_ALUA
4473 tristate "SPC-3 ALUA Device Handler"
4474 - depends on SCSI_DH
4475 + depends on SCSI_DH && SCSI
4476 help
4477 SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
4478 Access (ALUA).
4479 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
4480 index 97a1c1c33b05..00ce3e269a43 100644
4481 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
4482 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
4483 @@ -6282,12 +6282,13 @@ out:
4484 }
4485
4486 for (i = 0; i < ioc->sge_count; i++) {
4487 - if (kbuff_arr[i])
4488 + if (kbuff_arr[i]) {
4489 dma_free_coherent(&instance->pdev->dev,
4490 le32_to_cpu(kern_sge32[i].length),
4491 kbuff_arr[i],
4492 le32_to_cpu(kern_sge32[i].phys_addr));
4493 kbuff_arr[i] = NULL;
4494 + }
4495 }
4496
4497 megasas_return_cmd(instance, cmd);
4498 diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
4499 index 7cb1b2d710c1..475fb44c1883 100644
4500 --- a/drivers/spi/spi-rockchip.c
4501 +++ b/drivers/spi/spi-rockchip.c
4502 @@ -265,7 +265,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
4503 static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
4504 {
4505 u32 ser;
4506 - struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
4507 + struct spi_master *master = spi->master;
4508 + struct rockchip_spi *rs = spi_master_get_devdata(master);
4509 +
4510 + pm_runtime_get_sync(rs->dev);
4511
4512 ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
4513
4514 @@ -290,6 +293,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
4515 ser &= ~(1 << spi->chip_select);
4516
4517 writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
4518 +
4519 + pm_runtime_put_sync(rs->dev);
4520 }
4521
4522 static int rockchip_spi_prepare_message(struct spi_master *master,
4523 diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
4524 index 05de0dad8762..4c6f1d7d2eaf 100644
4525 --- a/drivers/staging/rdma/hfi1/TODO
4526 +++ b/drivers/staging/rdma/hfi1/TODO
4527 @@ -3,4 +3,4 @@ July, 2015
4528 - Remove unneeded file entries in sysfs
4529 - Remove software processing of IB protocol and place in library for use
4530 by qib, ipath (if still present), hfi1, and eventually soft-roce
4531 -
4532 +- Replace incorrect uAPI
4533 diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
4534 index d57d549052c8..29ae777556d2 100644
4535 --- a/drivers/staging/rdma/hfi1/file_ops.c
4536 +++ b/drivers/staging/rdma/hfi1/file_ops.c
4537 @@ -52,6 +52,8 @@
4538 #include <linux/vmalloc.h>
4539 #include <linux/io.h>
4540
4541 +#include <rdma/ib.h>
4542 +
4543 #include "hfi.h"
4544 #include "pio.h"
4545 #include "device.h"
4546 @@ -194,6 +196,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
4547 int uctxt_required = 1;
4548 int must_be_root = 0;
4549
4550 + /* FIXME: This interface cannot continue out of staging */
4551 + if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
4552 + return -EACCES;
4553 +
4554 if (count < sizeof(cmd)) {
4555 ret = -EINVAL;
4556 goto bail;
4557 diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
4558 index b58e3fb9b311..433085a97626 100644
4559 --- a/drivers/thermal/rockchip_thermal.c
4560 +++ b/drivers/thermal/rockchip_thermal.c
4561 @@ -693,15 +693,14 @@ static int rockchip_configure_from_dt(struct device *dev,
4562 thermal->chip->tshut_temp);
4563 thermal->tshut_temp = thermal->chip->tshut_temp;
4564 } else {
4565 + if (shut_temp > INT_MAX) {
4566 + dev_err(dev, "Invalid tshut temperature specified: %d\n",
4567 + shut_temp);
4568 + return -ERANGE;
4569 + }
4570 thermal->tshut_temp = shut_temp;
4571 }
4572
4573 - if (thermal->tshut_temp > INT_MAX) {
4574 - dev_err(dev, "Invalid tshut temperature specified: %d\n",
4575 - thermal->tshut_temp);
4576 - return -ERANGE;
4577 - }
4578 -
4579 if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
4580 dev_warn(dev,
4581 "Missing tshut mode property, using default (%s)\n",
4582 diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
4583 index 9eb1cff28bd4..b8b580e5ae6e 100644
4584 --- a/drivers/usb/core/hcd-pci.c
4585 +++ b/drivers/usb/core/hcd-pci.c
4586 @@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
4587 if (companion->bus != pdev->bus ||
4588 PCI_SLOT(companion->devfn) != slot)
4589 continue;
4590 +
4591 + /*
4592 + * Companion device should be either UHCI,OHCI or EHCI host
4593 + * controller, otherwise skip.
4594 + */
4595 + if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
4596 + companion->class != CL_EHCI)
4597 + continue;
4598 +
4599 companion_hcd = pci_get_drvdata(companion);
4600 if (!companion_hcd || !companion_hcd->self.root_hub)
4601 continue;
4602 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
4603 index cf43e9e18368..79d895c2dd71 100644
4604 --- a/drivers/usb/gadget/function/f_fs.c
4605 +++ b/drivers/usb/gadget/function/f_fs.c
4606 @@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
4607 work);
4608 int ret = io_data->req->status ? io_data->req->status :
4609 io_data->req->actual;
4610 + bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
4611
4612 if (io_data->read && ret > 0) {
4613 use_mm(io_data->mm);
4614 @@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
4615
4616 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
4617
4618 - if (io_data->ffs->ffs_eventfd &&
4619 - !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
4620 + if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
4621 eventfd_signal(io_data->ffs->ffs_eventfd, 1);
4622
4623 usb_ep_free_request(io_data->ep, io_data->req);
4624
4625 - io_data->kiocb->private = NULL;
4626 if (io_data->read)
4627 kfree(io_data->to_free);
4628 kfree(io_data->buf);
4629 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
4630 index 5cd080e0a685..743d9a20e248 100644
4631 --- a/drivers/usb/host/xhci-mem.c
4632 +++ b/drivers/usb/host/xhci-mem.c
4633 @@ -1873,6 +1873,12 @@ no_bw:
4634 kfree(xhci->rh_bw);
4635 kfree(xhci->ext_caps);
4636
4637 + xhci->usb2_ports = NULL;
4638 + xhci->usb3_ports = NULL;
4639 + xhci->port_array = NULL;
4640 + xhci->rh_bw = NULL;
4641 + xhci->ext_caps = NULL;
4642 +
4643 xhci->page_size = 0;
4644 xhci->page_shift = 0;
4645 xhci->bus_state[0].bus_suspended = 0;
4646 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
4647 index f0640b7a1c42..48672fac7ff3 100644
4648 --- a/drivers/usb/host/xhci-pci.c
4649 +++ b/drivers/usb/host/xhci-pci.c
4650 @@ -48,6 +48,7 @@
4651 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
4652 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
4653 #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
4654 +#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
4655
4656 static const char hcd_name[] = "xhci_hcd";
4657
4658 @@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
4659 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
4660 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
4661 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
4662 - pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
4663 + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
4664 + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
4665 xhci->quirks |= XHCI_PME_STUCK_QUIRK;
4666 }
4667 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
4668 @@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
4669 struct xhci_hcd *xhci;
4670
4671 xhci = hcd_to_xhci(pci_get_drvdata(dev));
4672 + xhci->xhc_state |= XHCI_STATE_REMOVING;
4673 if (xhci->shared_hcd) {
4674 usb_remove_hcd(xhci->shared_hcd);
4675 usb_put_hcd(xhci->shared_hcd);
4676 diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
4677 index d39d6bf1d090..d4962208be30 100644
4678 --- a/drivers/usb/host/xhci-plat.c
4679 +++ b/drivers/usb/host/xhci-plat.c
4680 @@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
4681
4682 static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
4683 {
4684 + struct usb_hcd *hcd = xhci_to_hcd(xhci);
4685 +
4686 /*
4687 * As of now platform drivers don't provide MSI support so we ensure
4688 * here that the generic code does not try to make a pci_dev from our
4689 * dev struct in order to setup MSI
4690 */
4691 xhci->quirks |= XHCI_PLAT;
4692 +
4693 + /*
4694 + * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
4695 + * to 1. However, these SoCs don't support 64-bit address memory
4696 + * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
4697 + * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
4698 + * xhci_gen_setup().
4699 + */
4700 + if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
4701 + xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
4702 + xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
4703 }
4704
4705 /* called during probe() after chip reset completes */
4706 diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
4707 index 5a2e2e3936c4..529c3c40f901 100644
4708 --- a/drivers/usb/host/xhci-plat.h
4709 +++ b/drivers/usb/host/xhci-plat.h
4710 @@ -14,7 +14,7 @@
4711 #include "xhci.h" /* for hcd_to_xhci() */
4712
4713 enum xhci_plat_type {
4714 - XHCI_PLAT_TYPE_MARVELL_ARMADA,
4715 + XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
4716 XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
4717 XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
4718 };
4719 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
4720 index 3915657e6078..a85a1c993d61 100644
4721 --- a/drivers/usb/host/xhci-ring.c
4722 +++ b/drivers/usb/host/xhci-ring.c
4723 @@ -4014,7 +4014,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4724 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4725 int ret;
4726
4727 - if (xhci->xhc_state) {
4728 + if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4729 + (xhci->xhc_state & XHCI_STATE_HALTED)) {
4730 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
4731 return -ESHUTDOWN;
4732 }
4733 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
4734 index 0c8087d3c313..8e713cca58ed 100644
4735 --- a/drivers/usb/host/xhci.c
4736 +++ b/drivers/usb/host/xhci.c
4737 @@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
4738 "waited %u microseconds.\n",
4739 XHCI_MAX_HALT_USEC);
4740 if (!ret)
4741 - xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
4742 + /* clear state flags. Including dying, halted or removing */
4743 + xhci->xhc_state = 0;
4744
4745 return ret;
4746 }
4747 @@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
4748 /* Resume root hubs only when have pending events. */
4749 status = readl(&xhci->op_regs->status);
4750 if (status & STS_EINT) {
4751 - usb_hcd_resume_root_hub(hcd);
4752 usb_hcd_resume_root_hub(xhci->shared_hcd);
4753 + usb_hcd_resume_root_hub(hcd);
4754 }
4755 }
4756
4757 @@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
4758
4759 /* Re-enable port polling. */
4760 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
4761 - set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
4762 - usb_hcd_poll_rh_status(hcd);
4763 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
4764 usb_hcd_poll_rh_status(xhci->shared_hcd);
4765 + set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
4766 + usb_hcd_poll_rh_status(hcd);
4767
4768 return retval;
4769 }
4770 @@ -2770,7 +2771,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
4771 if (ret <= 0)
4772 return ret;
4773 xhci = hcd_to_xhci(hcd);
4774 - if (xhci->xhc_state & XHCI_STATE_DYING)
4775 + if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4776 + (xhci->xhc_state & XHCI_STATE_REMOVING))
4777 return -ENODEV;
4778
4779 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
4780 @@ -3817,7 +3819,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4781
4782 mutex_lock(&xhci->mutex);
4783
4784 - if (xhci->xhc_state) /* dying or halted */
4785 + if (xhci->xhc_state) /* dying, removing or halted */
4786 goto out;
4787
4788 if (!udev->slot_id) {
4789 @@ -4944,6 +4946,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4790 return retval;
4791 xhci_dbg(xhci, "Reset complete\n");
4792
4793 + /*
4794 + * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
4795 + * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
4796 + * address memory pointers actually. So, this driver clears the AC64
4797 + * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
4798 + * DMA_BIT_MASK(32)) in this xhci_gen_setup().
4799 + */
4800 + if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
4801 + xhci->hcc_params &= ~BIT(0);
4802 +
4803 /* Set dma_mask and coherent_dma_mask to 64-bits,
4804 * if xHC supports 64-bit addressing */
4805 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4806 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
4807 index cc651383ce5a..1cdea4a8c895 100644
4808 --- a/drivers/usb/host/xhci.h
4809 +++ b/drivers/usb/host/xhci.h
4810 @@ -1596,6 +1596,7 @@ struct xhci_hcd {
4811 */
4812 #define XHCI_STATE_DYING (1 << 0)
4813 #define XHCI_STATE_HALTED (1 << 1)
4814 +#define XHCI_STATE_REMOVING (1 << 2)
4815 /* Statistics */
4816 int error_bitmask;
4817 unsigned int quirks;
4818 @@ -1632,6 +1633,7 @@ struct xhci_hcd {
4819 #define XHCI_PME_STUCK_QUIRK (1 << 20)
4820 #define XHCI_MTK_HOST (1 << 21)
4821 #define XHCI_SSIC_PORT_UNUSED (1 << 22)
4822 +#define XHCI_NO_64BIT_SUPPORT (1 << 23)
4823 unsigned int num_active_eps;
4824 unsigned int limit_active_eps;
4825 /* There are two roothubs to keep track of bus suspend info for */
4826 diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
4827 index facaaf003f19..e40da7759a0e 100644
4828 --- a/drivers/usb/usbip/usbip_common.c
4829 +++ b/drivers/usb/usbip/usbip_common.c
4830 @@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
4831 if (!(size > 0))
4832 return 0;
4833
4834 + if (size > urb->transfer_buffer_length) {
4835 + /* should not happen, probably malicious packet */
4836 + if (ud->side == USBIP_STUB) {
4837 + usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
4838 + return 0;
4839 + } else {
4840 + usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
4841 + return -EPIPE;
4842 + }
4843 + }
4844 +
4845 ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
4846 if (ret != size) {
4847 dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
4848 diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
4849 index 8ea45a5cd806..d889ef2048df 100644
4850 --- a/drivers/video/fbdev/Kconfig
4851 +++ b/drivers/video/fbdev/Kconfig
4852 @@ -2246,7 +2246,6 @@ config XEN_FBDEV_FRONTEND
4853 select FB_SYS_IMAGEBLIT
4854 select FB_SYS_FOPS
4855 select FB_DEFERRED_IO
4856 - select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
4857 select XEN_XENBUS_FRONTEND
4858 default y
4859 help
4860 diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
4861 index 9362424c2340..f9ef06d0cd48 100644
4862 --- a/drivers/video/fbdev/amba-clcd.c
4863 +++ b/drivers/video/fbdev/amba-clcd.c
4864 @@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
4865 fb->off_ienb = CLCD_PL111_IENB;
4866 fb->off_cntl = CLCD_PL111_CNTL;
4867 } else {
4868 -#ifdef CONFIG_ARCH_VERSATILE
4869 - fb->off_ienb = CLCD_PL111_IENB;
4870 - fb->off_cntl = CLCD_PL111_CNTL;
4871 -#else
4872 - fb->off_ienb = CLCD_PL110_IENB;
4873 - fb->off_cntl = CLCD_PL110_CNTL;
4874 -#endif
4875 + if (of_machine_is_compatible("arm,versatile-ab") ||
4876 + of_machine_is_compatible("arm,versatile-pb")) {
4877 + fb->off_ienb = CLCD_PL111_IENB;
4878 + fb->off_cntl = CLCD_PL111_CNTL;
4879 + } else {
4880 + fb->off_ienb = CLCD_PL110_IENB;
4881 + fb->off_cntl = CLCD_PL110_CNTL;
4882 + }
4883 }
4884
4885 fb->clk = clk_get(&fb->dev->dev, NULL);
4886 diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
4887 index 6b2a06d09f2b..d8d583d32a37 100644
4888 --- a/drivers/video/fbdev/da8xx-fb.c
4889 +++ b/drivers/video/fbdev/da8xx-fb.c
4890 @@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
4891 .lower_margin = 2,
4892 .hsync_len = 0,
4893 .vsync_len = 0,
4894 - .sync = FB_SYNC_CLK_INVERT |
4895 - FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
4896 + .sync = FB_SYNC_CLK_INVERT,
4897 },
4898 /* Sharp LK043T1DG01 */
4899 [1] = {
4900 @@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
4901 .lower_margin = 2,
4902 .hsync_len = 41,
4903 .vsync_len = 10,
4904 - .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
4905 + .sync = 0,
4906 .flag = 0,
4907 },
4908 [2] = {
4909 @@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
4910 .lower_margin = 10,
4911 .hsync_len = 10,
4912 .vsync_len = 10,
4913 - .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
4914 + .sync = 0,
4915 .flag = 0,
4916 },
4917 [3] = {
4918 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
4919 index 4545e2e2ad45..d8d68af5aef0 100644
4920 --- a/fs/btrfs/disk-io.c
4921 +++ b/fs/btrfs/disk-io.c
4922 @@ -1830,7 +1830,7 @@ static int cleaner_kthread(void *arg)
4923 */
4924 btrfs_delete_unused_bgs(root->fs_info);
4925 sleep:
4926 - if (!try_to_freeze() && !again) {
4927 + if (!again) {
4928 set_current_state(TASK_INTERRUPTIBLE);
4929 if (!kthread_should_stop())
4930 schedule();
4931 diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
4932 index 0e1e61a7ec23..d39f714dabeb 100644
4933 --- a/fs/btrfs/tests/btrfs-tests.c
4934 +++ b/fs/btrfs/tests/btrfs-tests.c
4935 @@ -189,12 +189,6 @@ btrfs_alloc_dummy_block_group(unsigned long length)
4936 kfree(cache);
4937 return NULL;
4938 }
4939 - cache->fs_info = btrfs_alloc_dummy_fs_info();
4940 - if (!cache->fs_info) {
4941 - kfree(cache->free_space_ctl);
4942 - kfree(cache);
4943 - return NULL;
4944 - }
4945
4946 cache->key.objectid = 0;
4947 cache->key.offset = length;
4948 diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
4949 index d05fe1ab4808..7cea4462acd5 100644
4950 --- a/fs/btrfs/tests/free-space-tree-tests.c
4951 +++ b/fs/btrfs/tests/free-space-tree-tests.c
4952 @@ -485,6 +485,7 @@ static int run_test(test_func_t test_func, int bitmaps)
4953 cache->bitmap_low_thresh = 0;
4954 cache->bitmap_high_thresh = (u32)-1;
4955 cache->needs_free_space = 1;
4956 + cache->fs_info = root->fs_info;
4957
4958 btrfs_init_dummy_trans(&trans);
4959
4960 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
4961 index bece948b363d..8580831ed237 100644
4962 --- a/fs/debugfs/inode.c
4963 +++ b/fs/debugfs/inode.c
4964 @@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
4965 if (unlikely(!inode))
4966 return failed_creating(dentry);
4967
4968 - inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
4969 + make_empty_dir_inode(inode);
4970 inode->i_flags |= S_AUTOMOUNT;
4971 inode->i_private = data;
4972 dentry->d_fsdata = (void *)f;
4973 diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
4974 index ecb54394492a..25634c353191 100644
4975 --- a/fs/ext4/crypto.c
4976 +++ b/fs/ext4/crypto.c
4977 @@ -34,6 +34,7 @@
4978 #include <linux/random.h>
4979 #include <linux/scatterlist.h>
4980 #include <linux/spinlock_types.h>
4981 +#include <linux/namei.h>
4982
4983 #include "ext4_extents.h"
4984 #include "xattr.h"
4985 @@ -479,6 +480,9 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
4986 struct ext4_crypt_info *ci;
4987 int dir_has_key, cached_with_key;
4988
4989 + if (flags & LOOKUP_RCU)
4990 + return -ECHILD;
4991 +
4992 dir = dget_parent(dentry);
4993 if (!ext4_encrypted_inode(d_inode(dir))) {
4994 dput(dir);
4995 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4996 index aee960b1af34..e6218cbc8332 100644
4997 --- a/fs/ext4/inode.c
4998 +++ b/fs/ext4/inode.c
4999 @@ -5261,6 +5261,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5000 might_sleep();
5001 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5002 err = ext4_reserve_inode_write(handle, inode, &iloc);
5003 + if (err)
5004 + return err;
5005 if (ext4_handle_valid(handle) &&
5006 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
5007 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5008 @@ -5291,9 +5293,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5009 }
5010 }
5011 }
5012 - if (!err)
5013 - err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5014 - return err;
5015 + return ext4_mark_iloc_dirty(handle, inode, &iloc);
5016 }
5017
5018 /*
5019 diff --git a/fs/f2fs/crypto_policy.c b/fs/f2fs/crypto_policy.c
5020 index d4a96af513c2..596f02490f27 100644
5021 --- a/fs/f2fs/crypto_policy.c
5022 +++ b/fs/f2fs/crypto_policy.c
5023 @@ -192,7 +192,8 @@ int f2fs_inherit_context(struct inode *parent, struct inode *child,
5024 return res;
5025
5026 ci = F2FS_I(parent)->i_crypt_info;
5027 - BUG_ON(ci == NULL);
5028 + if (ci == NULL)
5029 + return -ENOKEY;
5030
5031 ctx.format = F2FS_ENCRYPTION_CONTEXT_FORMAT_V1;
5032
5033 diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
5034 index 5c06db17e41f..44802599fa67 100644
5035 --- a/fs/f2fs/data.c
5036 +++ b/fs/f2fs/data.c
5037 @@ -67,7 +67,6 @@ static void f2fs_write_end_io(struct bio *bio)
5038 f2fs_restore_and_release_control_page(&page);
5039
5040 if (unlikely(bio->bi_error)) {
5041 - set_page_dirty(page);
5042 set_bit(AS_EIO, &page->mapping->flags);
5043 f2fs_stop_checkpoint(sbi);
5044 }
5045 @@ -504,7 +503,7 @@ static int __allocate_data_blocks(struct inode *inode, loff_t offset,
5046 struct dnode_of_data dn;
5047 u64 start = F2FS_BYTES_TO_BLK(offset);
5048 u64 len = F2FS_BYTES_TO_BLK(count);
5049 - bool allocated;
5050 + bool allocated = false;
5051 u64 end_offset;
5052 int err = 0;
5053
5054 @@ -546,7 +545,7 @@ static int __allocate_data_blocks(struct inode *inode, loff_t offset,
5055 f2fs_put_dnode(&dn);
5056 f2fs_unlock_op(sbi);
5057
5058 - f2fs_balance_fs(sbi, dn.node_changed);
5059 + f2fs_balance_fs(sbi, allocated);
5060 }
5061 return err;
5062
5063 @@ -556,7 +555,7 @@ sync_out:
5064 f2fs_put_dnode(&dn);
5065 out:
5066 f2fs_unlock_op(sbi);
5067 - f2fs_balance_fs(sbi, dn.node_changed);
5068 + f2fs_balance_fs(sbi, allocated);
5069 return err;
5070 }
5071
5072 @@ -650,14 +649,14 @@ get_next:
5073 if (dn.ofs_in_node >= end_offset) {
5074 if (allocated)
5075 sync_inode_page(&dn);
5076 - allocated = false;
5077 f2fs_put_dnode(&dn);
5078
5079 if (create) {
5080 f2fs_unlock_op(sbi);
5081 - f2fs_balance_fs(sbi, dn.node_changed);
5082 + f2fs_balance_fs(sbi, allocated);
5083 f2fs_lock_op(sbi);
5084 }
5085 + allocated = false;
5086
5087 set_new_dnode(&dn, inode, NULL, NULL, 0);
5088 err = get_dnode_of_data(&dn, pgofs, mode);
5089 @@ -715,7 +714,7 @@ put_out:
5090 unlock_out:
5091 if (create) {
5092 f2fs_unlock_op(sbi);
5093 - f2fs_balance_fs(sbi, dn.node_changed);
5094 + f2fs_balance_fs(sbi, allocated);
5095 }
5096 out:
5097 trace_f2fs_map_blocks(inode, map, err);
5098 diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
5099 index faa7495e2d7e..30e6b6563494 100644
5100 --- a/fs/f2fs/dir.c
5101 +++ b/fs/f2fs/dir.c
5102 @@ -892,11 +892,19 @@ out:
5103 return err;
5104 }
5105
5106 +static int f2fs_dir_open(struct inode *inode, struct file *filp)
5107 +{
5108 + if (f2fs_encrypted_inode(inode))
5109 + return f2fs_get_encryption_info(inode) ? -EACCES : 0;
5110 + return 0;
5111 +}
5112 +
5113 const struct file_operations f2fs_dir_operations = {
5114 .llseek = generic_file_llseek,
5115 .read = generic_read_dir,
5116 .iterate = f2fs_readdir,
5117 .fsync = f2fs_sync_file,
5118 + .open = f2fs_dir_open,
5119 .unlocked_ioctl = f2fs_ioctl,
5120 #ifdef CONFIG_COMPAT
5121 .compat_ioctl = f2fs_compat_ioctl,
5122 diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
5123 index ea272be62677..5a322bc00ac4 100644
5124 --- a/fs/f2fs/file.c
5125 +++ b/fs/f2fs/file.c
5126 @@ -425,6 +425,8 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
5127 err = f2fs_get_encryption_info(inode);
5128 if (err)
5129 return 0;
5130 + if (!f2fs_encrypted_inode(inode))
5131 + return -ENOKEY;
5132 }
5133
5134 /* we don't need to use inline_data strictly */
5135 @@ -444,7 +446,9 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
5136 if (!ret && f2fs_encrypted_inode(inode)) {
5137 ret = f2fs_get_encryption_info(inode);
5138 if (ret)
5139 - ret = -EACCES;
5140 + return -EACCES;
5141 + if (!f2fs_encrypted_inode(inode))
5142 + return -ENOKEY;
5143 }
5144 return ret;
5145 }
5146 diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
5147 index 6f944e5eb76e..7e9e38769660 100644
5148 --- a/fs/f2fs/namei.c
5149 +++ b/fs/f2fs/namei.c
5150 @@ -980,12 +980,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
5151 }
5152 memcpy(cstr.name, sd->encrypted_path, cstr.len);
5153
5154 - /* this is broken symlink case */
5155 - if (unlikely(cstr.name[0] == 0)) {
5156 - res = -ENOENT;
5157 - goto errout;
5158 - }
5159 -
5160 if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) >
5161 max_size) {
5162 /* Symlink data on the disk is corrupted */
5163 @@ -1002,6 +996,12 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
5164
5165 kfree(cstr.name);
5166
5167 + /* this is broken symlink case */
5168 + if (unlikely(pstr.name[0] == 0)) {
5169 + res = -ENOENT;
5170 + goto errout;
5171 + }
5172 +
5173 paddr = pstr.name;
5174
5175 /* Null-terminate the name */
5176 diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
5177 index 6134832baaaf..013a62b2f8ca 100644
5178 --- a/fs/f2fs/super.c
5179 +++ b/fs/f2fs/super.c
5180 @@ -926,9 +926,25 @@ static loff_t max_file_blocks(void)
5181 return result;
5182 }
5183
5184 +static int __f2fs_commit_super(struct buffer_head *bh,
5185 + struct f2fs_super_block *super)
5186 +{
5187 + lock_buffer(bh);
5188 + if (super)
5189 + memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
5190 + set_buffer_uptodate(bh);
5191 + set_buffer_dirty(bh);
5192 + unlock_buffer(bh);
5193 +
5194 + /* it's rare case, we can do fua all the time */
5195 + return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
5196 +}
5197 +
5198 static inline bool sanity_check_area_boundary(struct super_block *sb,
5199 - struct f2fs_super_block *raw_super)
5200 + struct buffer_head *bh)
5201 {
5202 + struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
5203 + (bh->b_data + F2FS_SUPER_OFFSET);
5204 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5205 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
5206 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
5207 @@ -942,6 +958,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
5208 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
5209 u32 segment_count = le32_to_cpu(raw_super->segment_count);
5210 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
5211 + u64 main_end_blkaddr = main_blkaddr +
5212 + (segment_count_main << log_blocks_per_seg);
5213 + u64 seg_end_blkaddr = segment0_blkaddr +
5214 + (segment_count << log_blocks_per_seg);
5215
5216 if (segment0_blkaddr != cp_blkaddr) {
5217 f2fs_msg(sb, KERN_INFO,
5218 @@ -986,22 +1006,45 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
5219 return true;
5220 }
5221
5222 - if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
5223 - segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
5224 + if (main_end_blkaddr > seg_end_blkaddr) {
5225 f2fs_msg(sb, KERN_INFO,
5226 - "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
5227 + "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
5228 main_blkaddr,
5229 - segment0_blkaddr + (segment_count << log_blocks_per_seg),
5230 + segment0_blkaddr +
5231 + (segment_count << log_blocks_per_seg),
5232 segment_count_main << log_blocks_per_seg);
5233 return true;
5234 + } else if (main_end_blkaddr < seg_end_blkaddr) {
5235 + int err = 0;
5236 + char *res;
5237 +
5238 + /* fix in-memory information all the time */
5239 + raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
5240 + segment0_blkaddr) >> log_blocks_per_seg);
5241 +
5242 + if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
5243 + res = "internally";
5244 + } else {
5245 + err = __f2fs_commit_super(bh, NULL);
5246 + res = err ? "failed" : "done";
5247 + }
5248 + f2fs_msg(sb, KERN_INFO,
5249 + "Fix alignment : %s, start(%u) end(%u) block(%u)",
5250 + res, main_blkaddr,
5251 + segment0_blkaddr +
5252 + (segment_count << log_blocks_per_seg),
5253 + segment_count_main << log_blocks_per_seg);
5254 + if (err)
5255 + return true;
5256 }
5257 -
5258 return false;
5259 }
5260
5261 static int sanity_check_raw_super(struct super_block *sb,
5262 - struct f2fs_super_block *raw_super)
5263 + struct buffer_head *bh)
5264 {
5265 + struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
5266 + (bh->b_data + F2FS_SUPER_OFFSET);
5267 unsigned int blocksize;
5268
5269 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
5270 @@ -1068,7 +1111,7 @@ static int sanity_check_raw_super(struct super_block *sb,
5271 }
5272
5273 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
5274 - if (sanity_check_area_boundary(sb, raw_super))
5275 + if (sanity_check_area_boundary(sb, bh))
5276 return 1;
5277
5278 return 0;
5279 @@ -1134,103 +1177,87 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
5280
5281 /*
5282 * Read f2fs raw super block.
5283 - * Because we have two copies of super block, so read the first one at first,
5284 - * if the first one is invalid, move to read the second one.
5285 + * Because we have two copies of super block, so read both of them
5286 + * to get the first valid one. If any one of them is broken, we pass
5287 + * them recovery flag back to the caller.
5288 */
5289 static int read_raw_super_block(struct super_block *sb,
5290 struct f2fs_super_block **raw_super,
5291 int *valid_super_block, int *recovery)
5292 {
5293 - int block = 0;
5294 + int block;
5295 struct buffer_head *bh;
5296 - struct f2fs_super_block *super, *buf;
5297 + struct f2fs_super_block *super;
5298 int err = 0;
5299
5300 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
5301 if (!super)
5302 return -ENOMEM;
5303 -retry:
5304 - bh = sb_bread(sb, block);
5305 - if (!bh) {
5306 - *recovery = 1;
5307 - f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
5308 +
5309 + for (block = 0; block < 2; block++) {
5310 + bh = sb_bread(sb, block);
5311 + if (!bh) {
5312 + f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
5313 block + 1);
5314 - err = -EIO;
5315 - goto next;
5316 - }
5317 + err = -EIO;
5318 + continue;
5319 + }
5320
5321 - buf = (struct f2fs_super_block *)(bh->b_data + F2FS_SUPER_OFFSET);
5322 + /* sanity checking of raw super */
5323 + if (sanity_check_raw_super(sb, bh)) {
5324 + f2fs_msg(sb, KERN_ERR,
5325 + "Can't find valid F2FS filesystem in %dth superblock",
5326 + block + 1);
5327 + err = -EINVAL;
5328 + brelse(bh);
5329 + continue;
5330 + }
5331
5332 - /* sanity checking of raw super */
5333 - if (sanity_check_raw_super(sb, buf)) {
5334 + if (!*raw_super) {
5335 + memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
5336 + sizeof(*super));
5337 + *valid_super_block = block;
5338 + *raw_super = super;
5339 + }
5340 brelse(bh);
5341 - *recovery = 1;
5342 - f2fs_msg(sb, KERN_ERR,
5343 - "Can't find valid F2FS filesystem in %dth superblock",
5344 - block + 1);
5345 - err = -EINVAL;
5346 - goto next;
5347 }
5348
5349 - if (!*raw_super) {
5350 - memcpy(super, buf, sizeof(*super));
5351 - *valid_super_block = block;
5352 - *raw_super = super;
5353 - }
5354 - brelse(bh);
5355 -
5356 -next:
5357 - /* check the validity of the second superblock */
5358 - if (block == 0) {
5359 - block++;
5360 - goto retry;
5361 - }
5362 + /* Fail to read any one of the superblocks*/
5363 + if (err < 0)
5364 + *recovery = 1;
5365
5366 /* No valid superblock */
5367 - if (!*raw_super) {
5368 + if (!*raw_super)
5369 kfree(super);
5370 - return err;
5371 - }
5372 + else
5373 + err = 0;
5374
5375 - return 0;
5376 + return err;
5377 }
5378
5379 -static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block)
5380 +int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
5381 {
5382 - struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
5383 struct buffer_head *bh;
5384 int err;
5385
5386 - bh = sb_getblk(sbi->sb, block);
5387 + /* write back-up superblock first */
5388 + bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
5389 if (!bh)
5390 return -EIO;
5391 -
5392 - lock_buffer(bh);
5393 - memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
5394 - set_buffer_uptodate(bh);
5395 - set_buffer_dirty(bh);
5396 - unlock_buffer(bh);
5397 -
5398 - /* it's rare case, we can do fua all the time */
5399 - err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
5400 + err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
5401 brelse(bh);
5402
5403 - return err;
5404 -}
5405 -
5406 -int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
5407 -{
5408 - int err;
5409 -
5410 - /* write back-up superblock first */
5411 - err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
5412 -
5413 /* if we are in recovery path, skip writing valid superblock */
5414 if (recover || err)
5415 return err;
5416
5417 /* write current valid superblock */
5418 - return __f2fs_commit_super(sbi, sbi->valid_super_block);
5419 + bh = sb_getblk(sbi->sb, sbi->valid_super_block);
5420 + if (!bh)
5421 + return -EIO;
5422 + err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
5423 + brelse(bh);
5424 + return err;
5425 }
5426
5427 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
5428 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
5429 index fa95ab2d3674..9d2f3e0a6360 100644
5430 --- a/fs/proc/task_mmu.c
5431 +++ b/fs/proc/task_mmu.c
5432 @@ -1504,6 +1504,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
5433 return page;
5434 }
5435
5436 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5437 +static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
5438 + struct vm_area_struct *vma,
5439 + unsigned long addr)
5440 +{
5441 + struct page *page;
5442 + int nid;
5443 +
5444 + if (!pmd_present(pmd))
5445 + return NULL;
5446 +
5447 + page = vm_normal_page_pmd(vma, addr, pmd);
5448 + if (!page)
5449 + return NULL;
5450 +
5451 + if (PageReserved(page))
5452 + return NULL;
5453 +
5454 + nid = page_to_nid(page);
5455 + if (!node_isset(nid, node_states[N_MEMORY]))
5456 + return NULL;
5457 +
5458 + return page;
5459 +}
5460 +#endif
5461 +
5462 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
5463 unsigned long end, struct mm_walk *walk)
5464 {
5465 @@ -1513,14 +1539,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
5466 pte_t *orig_pte;
5467 pte_t *pte;
5468
5469 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5470 ptl = pmd_trans_huge_lock(pmd, vma);
5471 if (ptl) {
5472 - pte_t huge_pte = *(pte_t *)pmd;
5473 struct page *page;
5474
5475 - page = can_gather_numa_stats(huge_pte, vma, addr);
5476 + page = can_gather_numa_stats_pmd(*pmd, vma, addr);
5477 if (page)
5478 - gather_stats(page, md, pte_dirty(huge_pte),
5479 + gather_stats(page, md, pmd_dirty(*pmd),
5480 HPAGE_PMD_SIZE/PAGE_SIZE);
5481 spin_unlock(ptl);
5482 return 0;
5483 @@ -1528,6 +1554,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
5484
5485 if (pmd_trans_unstable(pmd))
5486 return 0;
5487 +#endif
5488 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5489 do {
5490 struct page *page = can_gather_numa_stats(*pte, vma, addr);
5491 diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
5492 index e56272c919b5..bf2d34c9d804 100644
5493 --- a/include/asm-generic/futex.h
5494 +++ b/include/asm-generic/futex.h
5495 @@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
5496 u32 val;
5497
5498 preempt_disable();
5499 - if (unlikely(get_user(val, uaddr) != 0))
5500 + if (unlikely(get_user(val, uaddr) != 0)) {
5501 + preempt_enable();
5502 return -EFAULT;
5503 + }
5504
5505 - if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
5506 + if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
5507 + preempt_enable();
5508 return -EFAULT;
5509 + }
5510
5511 *uval = val;
5512 preempt_enable();
5513 diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
5514 index 461a0558bca4..cebecff536a3 100644
5515 --- a/include/drm/drm_cache.h
5516 +++ b/include/drm/drm_cache.h
5517 @@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
5518 {
5519 #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
5520 return false;
5521 +#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
5522 + return false;
5523 #else
5524 return true;
5525 #endif
5526 diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
5527 index 42cf2d991bf4..4ea7e55f20b0 100644
5528 --- a/include/keys/trusted-type.h
5529 +++ b/include/keys/trusted-type.h
5530 @@ -38,7 +38,7 @@ struct trusted_key_options {
5531 unsigned char pcrinfo[MAX_PCRINFO_SIZE];
5532 int pcrlock;
5533 uint32_t hash;
5534 - uint32_t digest_len;
5535 + uint32_t policydigest_len;
5536 unsigned char policydigest[MAX_DIGEST_SIZE];
5537 uint32_t policyhandle;
5538 };
5539 diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
5540 index 89d944b25d87..7fc7cb7872e3 100644
5541 --- a/include/linux/cgroup-defs.h
5542 +++ b/include/linux/cgroup-defs.h
5543 @@ -442,6 +442,7 @@ struct cgroup_subsys {
5544 int (*can_attach)(struct cgroup_taskset *tset);
5545 void (*cancel_attach)(struct cgroup_taskset *tset);
5546 void (*attach)(struct cgroup_taskset *tset);
5547 + void (*post_attach)(void);
5548 int (*can_fork)(struct task_struct *task);
5549 void (*cancel_fork)(struct task_struct *task);
5550 void (*fork)(struct task_struct *task);
5551 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
5552 index fea160ee5803..85a868ccb493 100644
5553 --- a/include/linux/cpuset.h
5554 +++ b/include/linux/cpuset.h
5555 @@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
5556 task_unlock(current);
5557 }
5558
5559 -extern void cpuset_post_attach_flush(void);
5560 -
5561 #else /* !CONFIG_CPUSETS */
5562
5563 static inline bool cpusets_enabled(void) { return false; }
5564 @@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
5565 return false;
5566 }
5567
5568 -static inline void cpuset_post_attach_flush(void)
5569 -{
5570 -}
5571 -
5572 #endif /* !CONFIG_CPUSETS */
5573
5574 #endif /* _LINUX_CPUSET_H */
5575 diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
5576 index 987764afa65c..f8b83792939b 100644
5577 --- a/include/linux/mlx5/device.h
5578 +++ b/include/linux/mlx5/device.h
5579 @@ -363,6 +363,17 @@ enum {
5580 MLX5_CAP_OFF_CMDIF_CSUM = 46,
5581 };
5582
5583 +enum {
5584 + /*
5585 + * Max wqe size for rdma read is 512 bytes, so this
5586 + * limits our max_sge_rd as the wqe needs to fit:
5587 + * - ctrl segment (16 bytes)
5588 + * - rdma segment (16 bytes)
5589 + * - scatter elements (16 bytes each)
5590 + */
5591 + MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
5592 +};
5593 +
5594 struct mlx5_inbox_hdr {
5595 __be16 opcode;
5596 u8 rsvd[4];
5597 diff --git a/include/linux/mm.h b/include/linux/mm.h
5598 index 516e14944339..a6c240e885c0 100644
5599 --- a/include/linux/mm.h
5600 +++ b/include/linux/mm.h
5601 @@ -1010,6 +1010,8 @@ static inline bool page_mapped(struct page *page)
5602 page = compound_head(page);
5603 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
5604 return true;
5605 + if (PageHuge(page))
5606 + return false;
5607 for (i = 0; i < hpage_nr_pages(page); i++) {
5608 if (atomic_read(&page[i]._mapcount) >= 0)
5609 return true;
5610 @@ -1117,6 +1119,8 @@ struct zap_details {
5611
5612 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
5613 pte_t pte);
5614 +struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
5615 + pmd_t pmd);
5616
5617 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
5618 unsigned long size);
5619 diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
5620 index 2a330ec9e2af..d1397c8ed94e 100644
5621 --- a/include/linux/platform_data/mmp_dma.h
5622 +++ b/include/linux/platform_data/mmp_dma.h
5623 @@ -14,6 +14,7 @@
5624
5625 struct mmp_dma_platdata {
5626 int dma_channels;
5627 + int nb_requestors;
5628 };
5629
5630 #endif /* MMP_DMA_H */
5631 diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
5632 index 8a0f55b6c2ba..88e3ab496e8f 100644
5633 --- a/include/media/videobuf2-core.h
5634 +++ b/include/media/videobuf2-core.h
5635 @@ -375,6 +375,9 @@ struct vb2_ops {
5636 /**
5637 * struct vb2_ops - driver-specific callbacks
5638 *
5639 + * @verify_planes_array: Verify that a given user space structure contains
5640 + * enough planes for the buffer. This is called
5641 + * for each dequeued buffer.
5642 * @fill_user_buffer: given a vb2_buffer fill in the userspace structure.
5643 * For V4L2 this is a struct v4l2_buffer.
5644 * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer.
5645 @@ -384,6 +387,7 @@ struct vb2_ops {
5646 * the vb2_buffer struct.
5647 */
5648 struct vb2_buf_ops {
5649 + int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
5650 void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
5651 int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
5652 struct vb2_plane *planes);
5653 @@ -400,6 +404,9 @@ struct vb2_buf_ops {
5654 * @fileio_read_once: report EOF after reading the first buffer
5655 * @fileio_write_immediately: queue buffer after each write() call
5656 * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
5657 + * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
5658 + * has not been called. This is a vb1 idiom that has been adopted
5659 + * also by vb2.
5660 * @lock: pointer to a mutex that protects the vb2_queue struct. The
5661 * driver can set this to a mutex to let the v4l2 core serialize
5662 * the queuing ioctls. If the driver wants to handle locking
5663 @@ -463,6 +470,7 @@ struct vb2_queue {
5664 unsigned fileio_read_once:1;
5665 unsigned fileio_write_immediately:1;
5666 unsigned allow_zero_bytesused:1;
5667 + unsigned quirk_poll_must_check_waiting_for_buffers:1;
5668
5669 struct mutex *lock;
5670 void *owner;
5671 diff --git a/include/rdma/ib.h b/include/rdma/ib.h
5672 index cf8f9e700e48..a6b93706b0fc 100644
5673 --- a/include/rdma/ib.h
5674 +++ b/include/rdma/ib.h
5675 @@ -34,6 +34,7 @@
5676 #define _RDMA_IB_H
5677
5678 #include <linux/types.h>
5679 +#include <linux/sched.h>
5680
5681 struct ib_addr {
5682 union {
5683 @@ -86,4 +87,19 @@ struct sockaddr_ib {
5684 __u64 sib_scope_id;
5685 };
5686
5687 +/*
5688 + * The IB interfaces that use write() as bi-directional ioctl() are
5689 + * fundamentally unsafe, since there are lots of ways to trigger "write()"
5690 + * calls from various contexts with elevated privileges. That includes the
5691 + * traditional suid executable error message writes, but also various kernel
5692 + * interfaces that can write to file descriptors.
5693 + *
5694 + * This function provides protection for the legacy API by restricting the
5695 + * calling context.
5696 + */
5697 +static inline bool ib_safe_file_access(struct file *filp)
5698 +{
5699 + return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
5700 +}
5701 +
5702 #endif /* _RDMA_IB_H */
5703 diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
5704 index fa341fcb5829..f5842bcd9c94 100644
5705 --- a/include/sound/hda_i915.h
5706 +++ b/include/sound/hda_i915.h
5707 @@ -9,7 +9,7 @@
5708 #ifdef CONFIG_SND_HDA_I915
5709 int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
5710 int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
5711 -int snd_hdac_get_display_clk(struct hdac_bus *bus);
5712 +void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
5713 int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
5714 int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
5715 bool *audio_enabled, char *buffer, int max_bytes);
5716 @@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
5717 {
5718 return 0;
5719 }
5720 -static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
5721 +static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
5722 {
5723 - return 0;
5724 }
5725 static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
5726 int rate)
5727 diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
5728 index c039f1d68a09..086168e18ca8 100644
5729 --- a/include/uapi/linux/v4l2-dv-timings.h
5730 +++ b/include/uapi/linux/v4l2-dv-timings.h
5731 @@ -183,7 +183,8 @@
5732
5733 #define V4L2_DV_BT_CEA_3840X2160P24 { \
5734 .type = V4L2_DV_BT_656_1120, \
5735 - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5736 + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
5737 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5738 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
5739 V4L2_DV_BT_STD_CEA861, \
5740 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
5741 @@ -191,14 +192,16 @@
5742
5743 #define V4L2_DV_BT_CEA_3840X2160P25 { \
5744 .type = V4L2_DV_BT_656_1120, \
5745 - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5746 + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
5747 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5748 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
5749 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
5750 }
5751
5752 #define V4L2_DV_BT_CEA_3840X2160P30 { \
5753 .type = V4L2_DV_BT_656_1120, \
5754 - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5755 + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
5756 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5757 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
5758 V4L2_DV_BT_STD_CEA861, \
5759 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
5760 @@ -206,14 +209,16 @@
5761
5762 #define V4L2_DV_BT_CEA_3840X2160P50 { \
5763 .type = V4L2_DV_BT_656_1120, \
5764 - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5765 + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
5766 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5767 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
5768 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
5769 }
5770
5771 #define V4L2_DV_BT_CEA_3840X2160P60 { \
5772 .type = V4L2_DV_BT_656_1120, \
5773 - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5774 + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
5775 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5776 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
5777 V4L2_DV_BT_STD_CEA861, \
5778 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
5779 @@ -221,7 +226,8 @@
5780
5781 #define V4L2_DV_BT_CEA_4096X2160P24 { \
5782 .type = V4L2_DV_BT_656_1120, \
5783 - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5784 + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
5785 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5786 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
5787 V4L2_DV_BT_STD_CEA861, \
5788 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
5789 @@ -229,14 +235,16 @@
5790
5791 #define V4L2_DV_BT_CEA_4096X2160P25 { \
5792 .type = V4L2_DV_BT_656_1120, \
5793 - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5794 + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
5795 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5796 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
5797 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
5798 }
5799
5800 #define V4L2_DV_BT_CEA_4096X2160P30 { \
5801 .type = V4L2_DV_BT_656_1120, \
5802 - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5803 + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
5804 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5805 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
5806 V4L2_DV_BT_STD_CEA861, \
5807 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
5808 @@ -244,14 +252,16 @@
5809
5810 #define V4L2_DV_BT_CEA_4096X2160P50 { \
5811 .type = V4L2_DV_BT_656_1120, \
5812 - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5813 + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
5814 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5815 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
5816 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
5817 }
5818
5819 #define V4L2_DV_BT_CEA_4096X2160P60 { \
5820 .type = V4L2_DV_BT_656_1120, \
5821 - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5822 + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
5823 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5824 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
5825 V4L2_DV_BT_STD_CEA861, \
5826 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
5827 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
5828 index 6a498daf2eec..355cd5f2b416 100644
5829 --- a/kernel/cgroup.c
5830 +++ b/kernel/cgroup.c
5831 @@ -2697,9 +2697,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
5832 size_t nbytes, loff_t off, bool threadgroup)
5833 {
5834 struct task_struct *tsk;
5835 + struct cgroup_subsys *ss;
5836 struct cgroup *cgrp;
5837 pid_t pid;
5838 - int ret;
5839 + int ssid, ret;
5840
5841 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
5842 return -EINVAL;
5843 @@ -2747,8 +2748,10 @@ out_unlock_rcu:
5844 rcu_read_unlock();
5845 out_unlock_threadgroup:
5846 percpu_up_write(&cgroup_threadgroup_rwsem);
5847 + for_each_subsys(ss, ssid)
5848 + if (ss->post_attach)
5849 + ss->post_attach();
5850 cgroup_kn_unlock(of->kn);
5851 - cpuset_post_attach_flush();
5852 return ret ?: nbytes;
5853 }
5854
5855 diff --git a/kernel/cpuset.c b/kernel/cpuset.c
5856 index 41989ab4db57..df16d0c9349f 100644
5857 --- a/kernel/cpuset.c
5858 +++ b/kernel/cpuset.c
5859 @@ -58,7 +58,6 @@
5860 #include <asm/uaccess.h>
5861 #include <linux/atomic.h>
5862 #include <linux/mutex.h>
5863 -#include <linux/workqueue.h>
5864 #include <linux/cgroup.h>
5865 #include <linux/wait.h>
5866
5867 @@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
5868 }
5869 }
5870
5871 -void cpuset_post_attach_flush(void)
5872 +static void cpuset_post_attach(void)
5873 {
5874 flush_workqueue(cpuset_migrate_mm_wq);
5875 }
5876 @@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
5877 .can_attach = cpuset_can_attach,
5878 .cancel_attach = cpuset_cancel_attach,
5879 .attach = cpuset_attach,
5880 + .post_attach = cpuset_post_attach,
5881 .bind = cpuset_bind,
5882 .legacy_cftypes = files,
5883 .early_init = 1,
5884 diff --git a/kernel/events/core.c b/kernel/events/core.c
5885 index f0b4b328d8f5..a0ef98b258d7 100644
5886 --- a/kernel/events/core.c
5887 +++ b/kernel/events/core.c
5888 @@ -2402,14 +2402,24 @@ static void ctx_sched_out(struct perf_event_context *ctx,
5889 cpuctx->task_ctx = NULL;
5890 }
5891
5892 - is_active ^= ctx->is_active; /* changed bits */
5893 -
5894 + /*
5895 + * Always update time if it was set; not only when it changes.
5896 + * Otherwise we can 'forget' to update time for any but the last
5897 + * context we sched out. For example:
5898 + *
5899 + * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
5900 + * ctx_sched_out(.event_type = EVENT_PINNED)
5901 + *
5902 + * would only update time for the pinned events.
5903 + */
5904 if (is_active & EVENT_TIME) {
5905 /* update (and stop) ctx time */
5906 update_context_time(ctx);
5907 update_cgrp_time_from_cpuctx(cpuctx);
5908 }
5909
5910 + is_active ^= ctx->is_active; /* changed bits */
5911 +
5912 if (!ctx->nr_active || !(is_active & EVENT_ALL))
5913 return;
5914
5915 @@ -8479,6 +8489,7 @@ SYSCALL_DEFINE5(perf_event_open,
5916 f_flags);
5917 if (IS_ERR(event_file)) {
5918 err = PTR_ERR(event_file);
5919 + event_file = NULL;
5920 goto err_context;
5921 }
5922
5923 diff --git a/kernel/futex.c b/kernel/futex.c
5924 index 5d6ce6413ef1..11b502159f3a 100644
5925 --- a/kernel/futex.c
5926 +++ b/kernel/futex.c
5927 @@ -1212,10 +1212,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
5928 if (unlikely(should_fail_futex(true)))
5929 ret = -EFAULT;
5930
5931 - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
5932 + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
5933 ret = -EFAULT;
5934 - else if (curval != uval)
5935 - ret = -EINVAL;
5936 + } else if (curval != uval) {
5937 + /*
5938 + * If a unconditional UNLOCK_PI operation (user space did not
5939 + * try the TID->0 transition) raced with a waiter setting the
5940 + * FUTEX_WAITERS flag between get_user() and locking the hash
5941 + * bucket lock, retry the operation.
5942 + */
5943 + if ((FUTEX_TID_MASK & curval) == uval)
5944 + ret = -EAGAIN;
5945 + else
5946 + ret = -EINVAL;
5947 + }
5948 if (ret) {
5949 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
5950 return ret;
5951 @@ -1442,8 +1452,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
5952 if (likely(&hb1->chain != &hb2->chain)) {
5953 plist_del(&q->list, &hb1->chain);
5954 hb_waiters_dec(hb1);
5955 - plist_add(&q->list, &hb2->chain);
5956 hb_waiters_inc(hb2);
5957 + plist_add(&q->list, &hb2->chain);
5958 q->lock_ptr = &hb2->lock;
5959 }
5960 get_futex_key_refs(key2);
5961 @@ -2536,6 +2546,15 @@ retry:
5962 if (ret == -EFAULT)
5963 goto pi_faulted;
5964 /*
5965 + * A unconditional UNLOCK_PI op raced against a waiter
5966 + * setting the FUTEX_WAITERS bit. Try again.
5967 + */
5968 + if (ret == -EAGAIN) {
5969 + spin_unlock(&hb->lock);
5970 + put_futex_key(&key);
5971 + goto retry;
5972 + }
5973 + /*
5974 * wake_futex_pi has detected invalid state. Tell user
5975 * space.
5976 */
5977 diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
5978 index 5b9102a47ea5..c835270f0c2f 100644
5979 --- a/kernel/locking/mcs_spinlock.h
5980 +++ b/kernel/locking/mcs_spinlock.h
5981 @@ -67,7 +67,13 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
5982 node->locked = 0;
5983 node->next = NULL;
5984
5985 - prev = xchg_acquire(lock, node);
5986 + /*
5987 + * We rely on the full barrier with global transitivity implied by the
5988 + * below xchg() to order the initialization stores above against any
5989 + * observation of @node. And to provide the ACQUIRE ordering associated
5990 + * with a LOCK primitive.
5991 + */
5992 + prev = xchg(lock, node);
5993 if (likely(prev == NULL)) {
5994 /*
5995 * Lock acquired, don't need to set node->locked to 1. Threads
5996 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
5997 index a74073f8c08c..1c1d2a00ad95 100644
5998 --- a/kernel/sched/core.c
5999 +++ b/kernel/sched/core.c
6000 @@ -7802,7 +7802,7 @@ void set_curr_task(int cpu, struct task_struct *p)
6001 /* task_group_lock serializes the addition/removal of task groups */
6002 static DEFINE_SPINLOCK(task_group_lock);
6003
6004 -static void free_sched_group(struct task_group *tg)
6005 +static void sched_free_group(struct task_group *tg)
6006 {
6007 free_fair_sched_group(tg);
6008 free_rt_sched_group(tg);
6009 @@ -7828,7 +7828,7 @@ struct task_group *sched_create_group(struct task_group *parent)
6010 return tg;
6011
6012 err:
6013 - free_sched_group(tg);
6014 + sched_free_group(tg);
6015 return ERR_PTR(-ENOMEM);
6016 }
6017
6018 @@ -7848,17 +7848,16 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
6019 }
6020
6021 /* rcu callback to free various structures associated with a task group */
6022 -static void free_sched_group_rcu(struct rcu_head *rhp)
6023 +static void sched_free_group_rcu(struct rcu_head *rhp)
6024 {
6025 /* now it should be safe to free those cfs_rqs */
6026 - free_sched_group(container_of(rhp, struct task_group, rcu));
6027 + sched_free_group(container_of(rhp, struct task_group, rcu));
6028 }
6029
6030 -/* Destroy runqueue etc associated with a task group */
6031 void sched_destroy_group(struct task_group *tg)
6032 {
6033 /* wait for possible concurrent references to cfs_rqs complete */
6034 - call_rcu(&tg->rcu, free_sched_group_rcu);
6035 + call_rcu(&tg->rcu, sched_free_group_rcu);
6036 }
6037
6038 void sched_offline_group(struct task_group *tg)
6039 @@ -8319,31 +8318,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6040 if (IS_ERR(tg))
6041 return ERR_PTR(-ENOMEM);
6042
6043 + sched_online_group(tg, parent);
6044 +
6045 return &tg->css;
6046 }
6047
6048 -static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
6049 +static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
6050 {
6051 struct task_group *tg = css_tg(css);
6052 - struct task_group *parent = css_tg(css->parent);
6053
6054 - if (parent)
6055 - sched_online_group(tg, parent);
6056 - return 0;
6057 + sched_offline_group(tg);
6058 }
6059
6060 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
6061 {
6062 struct task_group *tg = css_tg(css);
6063
6064 - sched_destroy_group(tg);
6065 -}
6066 -
6067 -static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
6068 -{
6069 - struct task_group *tg = css_tg(css);
6070 -
6071 - sched_offline_group(tg);
6072 + /*
6073 + * Relies on the RCU grace period between css_released() and this.
6074 + */
6075 + sched_free_group(tg);
6076 }
6077
6078 static void cpu_cgroup_fork(struct task_struct *task)
6079 @@ -8703,9 +8697,8 @@ static struct cftype cpu_files[] = {
6080
6081 struct cgroup_subsys cpu_cgrp_subsys = {
6082 .css_alloc = cpu_cgroup_css_alloc,
6083 + .css_released = cpu_cgroup_css_released,
6084 .css_free = cpu_cgroup_css_free,
6085 - .css_online = cpu_cgroup_css_online,
6086 - .css_offline = cpu_cgroup_css_offline,
6087 .fork = cpu_cgroup_fork,
6088 .can_attach = cpu_cgroup_can_attach,
6089 .attach = cpu_cgroup_attach,
6090 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
6091 index 7ff5dc7d2ac5..9e82d0450fad 100644
6092 --- a/kernel/workqueue.c
6093 +++ b/kernel/workqueue.c
6094 @@ -667,6 +667,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
6095 */
6096 smp_wmb();
6097 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
6098 + /*
6099 + * The following mb guarantees that previous clear of a PENDING bit
6100 + * will not be reordered with any speculative LOADS or STORES from
6101 + * work->current_func, which is executed afterwards. This possible
6102 + * reordering can lead to a missed execution on attempt to qeueue
6103 + * the same @work. E.g. consider this case:
6104 + *
6105 + * CPU#0 CPU#1
6106 + * ---------------------------- --------------------------------
6107 + *
6108 + * 1 STORE event_indicated
6109 + * 2 queue_work_on() {
6110 + * 3 test_and_set_bit(PENDING)
6111 + * 4 } set_..._and_clear_pending() {
6112 + * 5 set_work_data() # clear bit
6113 + * 6 smp_mb()
6114 + * 7 work->current_func() {
6115 + * 8 LOAD event_indicated
6116 + * }
6117 + *
6118 + * Without an explicit full barrier speculative LOAD on line 8 can
6119 + * be executed before CPU#0 does STORE on line 1. If that happens,
6120 + * CPU#0 observes the PENDING bit is still set and new execution of
6121 + * a @work is not queued in a hope, that CPU#1 will eventually
6122 + * finish the queued @work. Meanwhile CPU#1 does not see
6123 + * event_indicated is set, because speculative LOAD was executed
6124 + * before actual STORE.
6125 + */
6126 + smp_mb();
6127 }
6128
6129 static void clear_work_data(struct work_struct *work)
6130 diff --git a/lib/assoc_array.c b/lib/assoc_array.c
6131 index 03dd576e6773..59fd7c0b119c 100644
6132 --- a/lib/assoc_array.c
6133 +++ b/lib/assoc_array.c
6134 @@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
6135 free_slot = i;
6136 continue;
6137 }
6138 - if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
6139 + if (assoc_array_ptr_is_leaf(ptr) &&
6140 + ops->compare_object(assoc_array_ptr_to_leaf(ptr),
6141 + index_key)) {
6142 pr_devel("replace in slot %d\n", i);
6143 edit->leaf_p = &node->slots[i];
6144 edit->dead_leaf = node->slots[i];
6145 diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
6146 index abcecdc2d0f2..0710a62ad2f6 100644
6147 --- a/lib/lz4/lz4defs.h
6148 +++ b/lib/lz4/lz4defs.h
6149 @@ -11,8 +11,7 @@
6150 /*
6151 * Detects 64 bits mode
6152 */
6153 -#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
6154 - || defined(__ppc64__) || defined(__LP64__))
6155 +#if defined(CONFIG_64BIT)
6156 #define LZ4_ARCH64 1
6157 #else
6158 #define LZ4_ARCH64 0
6159 @@ -35,6 +34,10 @@ typedef struct _U64_S { u64 v; } U64_S;
6160
6161 #define PUT4(s, d) (A32(d) = A32(s))
6162 #define PUT8(s, d) (A64(d) = A64(s))
6163 +
6164 +#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
6165 + (d = s - A16(p))
6166 +
6167 #define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
6168 do { \
6169 A16(p) = v; \
6170 @@ -51,10 +54,13 @@ typedef struct _U64_S { u64 v; } U64_S;
6171 #define PUT8(s, d) \
6172 put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
6173
6174 -#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
6175 - do { \
6176 - put_unaligned(v, (u16 *)(p)); \
6177 - p += 2; \
6178 +#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
6179 + (d = s - get_unaligned_le16(p))
6180 +
6181 +#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
6182 + do { \
6183 + put_unaligned_le16(v, (u16 *)(p)); \
6184 + p += 2; \
6185 } while (0)
6186 #endif
6187
6188 @@ -140,9 +146,6 @@ typedef struct _U64_S { u64 v; } U64_S;
6189
6190 #endif
6191
6192 -#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
6193 - (d = s - get_unaligned_le16(p))
6194 -
6195 #define LZ4_WILDCOPY(s, d, e) \
6196 do { \
6197 LZ4_COPYPACKET(s, d); \
6198 diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
6199 index ec533a6c77b5..eb15e7dc7b65 100644
6200 --- a/lib/mpi/mpicoder.c
6201 +++ b/lib/mpi/mpicoder.c
6202 @@ -128,6 +128,23 @@ leave:
6203 }
6204 EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
6205
6206 +static int count_lzeros(MPI a)
6207 +{
6208 + mpi_limb_t alimb;
6209 + int i, lzeros = 0;
6210 +
6211 + for (i = a->nlimbs - 1; i >= 0; i--) {
6212 + alimb = a->d[i];
6213 + if (alimb == 0) {
6214 + lzeros += sizeof(mpi_limb_t);
6215 + } else {
6216 + lzeros += count_leading_zeros(alimb) / 8;
6217 + break;
6218 + }
6219 + }
6220 + return lzeros;
6221 +}
6222 +
6223 /**
6224 * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
6225 *
6226 @@ -148,7 +165,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
6227 uint8_t *p;
6228 mpi_limb_t alimb;
6229 unsigned int n = mpi_get_size(a);
6230 - int i, lzeros = 0;
6231 + int i, lzeros;
6232
6233 if (!buf || !nbytes)
6234 return -EINVAL;
6235 @@ -156,14 +173,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
6236 if (sign)
6237 *sign = a->sign;
6238
6239 - p = (void *)&a->d[a->nlimbs] - 1;
6240 -
6241 - for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
6242 - if (!*p)
6243 - lzeros++;
6244 - else
6245 - break;
6246 - }
6247 + lzeros = count_lzeros(a);
6248
6249 if (buf_len < n - lzeros) {
6250 *nbytes = n - lzeros;
6251 @@ -351,7 +361,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
6252 u8 *p, *p2;
6253 mpi_limb_t alimb, alimb2;
6254 unsigned int n = mpi_get_size(a);
6255 - int i, x, y = 0, lzeros = 0, buf_len;
6256 + int i, x, y = 0, lzeros, buf_len;
6257
6258 if (!nbytes)
6259 return -EINVAL;
6260 @@ -359,14 +369,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
6261 if (sign)
6262 *sign = a->sign;
6263
6264 - p = (void *)&a->d[a->nlimbs] - 1;
6265 -
6266 - for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
6267 - if (!*p)
6268 - lzeros++;
6269 - else
6270 - break;
6271 - }
6272 + lzeros = count_lzeros(a);
6273
6274 if (*nbytes < n - lzeros) {
6275 *nbytes = n - lzeros;
6276 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
6277 index e10a4fee88d2..a7db0a2db1ab 100644
6278 --- a/mm/huge_memory.c
6279 +++ b/mm/huge_memory.c
6280 @@ -1919,10 +1919,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
6281 * page fault if needed.
6282 */
6283 return 0;
6284 - if (vma->vm_ops)
6285 + if (vma->vm_ops || (vm_flags & VM_NO_THP))
6286 /* khugepaged not yet working on file or special mappings */
6287 return 0;
6288 - VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
6289 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
6290 hend = vma->vm_end & HPAGE_PMD_MASK;
6291 if (hstart < hend)
6292 @@ -2310,8 +2309,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
6293 return false;
6294 if (is_vma_temporary_stack(vma))
6295 return false;
6296 - VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
6297 - return true;
6298 + return !(vma->vm_flags & VM_NO_THP);
6299 }
6300
6301 static void collapse_huge_page(struct mm_struct *mm,
6302 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
6303 index caf3bf73b533..a65ad1d59232 100644
6304 --- a/mm/memcontrol.c
6305 +++ b/mm/memcontrol.c
6306 @@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
6307 /* "mc" and its members are protected by cgroup_mutex */
6308 static struct move_charge_struct {
6309 spinlock_t lock; /* for from, to */
6310 + struct mm_struct *mm;
6311 struct mem_cgroup *from;
6312 struct mem_cgroup *to;
6313 unsigned long flags;
6314 @@ -4730,6 +4731,8 @@ static void __mem_cgroup_clear_mc(void)
6315
6316 static void mem_cgroup_clear_mc(void)
6317 {
6318 + struct mm_struct *mm = mc.mm;
6319 +
6320 /*
6321 * we must clear moving_task before waking up waiters at the end of
6322 * task migration.
6323 @@ -4739,7 +4742,10 @@ static void mem_cgroup_clear_mc(void)
6324 spin_lock(&mc.lock);
6325 mc.from = NULL;
6326 mc.to = NULL;
6327 + mc.mm = NULL;
6328 spin_unlock(&mc.lock);
6329 +
6330 + mmput(mm);
6331 }
6332
6333 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6334 @@ -4796,6 +4802,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6335 VM_BUG_ON(mc.moved_swap);
6336
6337 spin_lock(&mc.lock);
6338 + mc.mm = mm;
6339 mc.from = from;
6340 mc.to = memcg;
6341 mc.flags = move_flags;
6342 @@ -4805,8 +4812,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6343 ret = mem_cgroup_precharge_mc(mm);
6344 if (ret)
6345 mem_cgroup_clear_mc();
6346 + } else {
6347 + mmput(mm);
6348 }
6349 - mmput(mm);
6350 return ret;
6351 }
6352
6353 @@ -4915,11 +4923,11 @@ put: /* get_mctgt_type() gets the page */
6354 return ret;
6355 }
6356
6357 -static void mem_cgroup_move_charge(struct mm_struct *mm)
6358 +static void mem_cgroup_move_charge(void)
6359 {
6360 struct mm_walk mem_cgroup_move_charge_walk = {
6361 .pmd_entry = mem_cgroup_move_charge_pte_range,
6362 - .mm = mm,
6363 + .mm = mc.mm,
6364 };
6365
6366 lru_add_drain_all();
6367 @@ -4931,7 +4939,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
6368 atomic_inc(&mc.from->moving_account);
6369 synchronize_rcu();
6370 retry:
6371 - if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
6372 + if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
6373 /*
6374 * Someone who are holding the mmap_sem might be waiting in
6375 * waitq. So we cancel all extra charges, wake up all waiters,
6376 @@ -4948,23 +4956,16 @@ retry:
6377 * additional charge, the page walk just aborts.
6378 */
6379 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
6380 - up_read(&mm->mmap_sem);
6381 + up_read(&mc.mm->mmap_sem);
6382 atomic_dec(&mc.from->moving_account);
6383 }
6384
6385 -static void mem_cgroup_move_task(struct cgroup_taskset *tset)
6386 +static void mem_cgroup_move_task(void)
6387 {
6388 - struct cgroup_subsys_state *css;
6389 - struct task_struct *p = cgroup_taskset_first(tset, &css);
6390 - struct mm_struct *mm = get_task_mm(p);
6391 -
6392 - if (mm) {
6393 - if (mc.to)
6394 - mem_cgroup_move_charge(mm);
6395 - mmput(mm);
6396 - }
6397 - if (mc.to)
6398 + if (mc.to) {
6399 + mem_cgroup_move_charge();
6400 mem_cgroup_clear_mc();
6401 + }
6402 }
6403 #else /* !CONFIG_MMU */
6404 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6405 @@ -4974,7 +4975,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6406 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6407 {
6408 }
6409 -static void mem_cgroup_move_task(struct cgroup_taskset *tset)
6410 +static void mem_cgroup_move_task(void)
6411 {
6412 }
6413 #endif
6414 @@ -5246,7 +5247,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
6415 .css_reset = mem_cgroup_css_reset,
6416 .can_attach = mem_cgroup_can_attach,
6417 .cancel_attach = mem_cgroup_cancel_attach,
6418 - .attach = mem_cgroup_move_task,
6419 + .post_attach = mem_cgroup_move_task,
6420 .bind = mem_cgroup_bind,
6421 .dfl_cftypes = memory_files,
6422 .legacy_cftypes = mem_cgroup_legacy_files,
6423 diff --git a/mm/memory.c b/mm/memory.c
6424 index 8132787ae4d5..3345dcf862cf 100644
6425 --- a/mm/memory.c
6426 +++ b/mm/memory.c
6427 @@ -792,6 +792,46 @@ out:
6428 return pfn_to_page(pfn);
6429 }
6430
6431 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6432 +struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
6433 + pmd_t pmd)
6434 +{
6435 + unsigned long pfn = pmd_pfn(pmd);
6436 +
6437 + /*
6438 + * There is no pmd_special() but there may be special pmds, e.g.
6439 + * in a direct-access (dax) mapping, so let's just replicate the
6440 + * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
6441 + */
6442 + if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
6443 + if (vma->vm_flags & VM_MIXEDMAP) {
6444 + if (!pfn_valid(pfn))
6445 + return NULL;
6446 + goto out;
6447 + } else {
6448 + unsigned long off;
6449 + off = (addr - vma->vm_start) >> PAGE_SHIFT;
6450 + if (pfn == vma->vm_pgoff + off)
6451 + return NULL;
6452 + if (!is_cow_mapping(vma->vm_flags))
6453 + return NULL;
6454 + }
6455 + }
6456 +
6457 + if (is_zero_pfn(pfn))
6458 + return NULL;
6459 + if (unlikely(pfn > highest_memmap_pfn))
6460 + return NULL;
6461 +
6462 + /*
6463 + * NOTE! We still have PageReserved() pages in the page tables.
6464 + * eg. VDSO mappings can cause them to exist.
6465 + */
6466 +out:
6467 + return pfn_to_page(pfn);
6468 +}
6469 +#endif
6470 +
6471 /*
6472 * copy one vm_area from one task to the other. Assumes the page tables
6473 * already present in the new task to be cleared in the whole range
6474 diff --git a/mm/migrate.c b/mm/migrate.c
6475 index 3ad0fea5c438..625741faa068 100644
6476 --- a/mm/migrate.c
6477 +++ b/mm/migrate.c
6478 @@ -967,7 +967,13 @@ out:
6479 dec_zone_page_state(page, NR_ISOLATED_ANON +
6480 page_is_file_cache(page));
6481 /* Soft-offlined page shouldn't go through lru cache list */
6482 - if (reason == MR_MEMORY_FAILURE) {
6483 + if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
6484 + /*
6485 + * With this release, we free successfully migrated
6486 + * page and set PG_HWPoison on just freed page
6487 + * intentionally. Although it's rather weird, it's how
6488 + * HWPoison flag works at the moment.
6489 + */
6490 put_page(page);
6491 if (!test_set_page_hwpoison(page))
6492 num_poisoned_pages_inc();
6493 diff --git a/mm/slub.c b/mm/slub.c
6494 index d8fbd4a6ed59..2a722e141958 100644
6495 --- a/mm/slub.c
6496 +++ b/mm/slub.c
6497 @@ -2815,6 +2815,7 @@ struct detached_freelist {
6498 void *tail;
6499 void *freelist;
6500 int cnt;
6501 + struct kmem_cache *s;
6502 };
6503
6504 /*
6505 @@ -2829,8 +2830,9 @@ struct detached_freelist {
6506 * synchronization primitive. Look ahead in the array is limited due
6507 * to performance reasons.
6508 */
6509 -static int build_detached_freelist(struct kmem_cache *s, size_t size,
6510 - void **p, struct detached_freelist *df)
6511 +static inline
6512 +int build_detached_freelist(struct kmem_cache *s, size_t size,
6513 + void **p, struct detached_freelist *df)
6514 {
6515 size_t first_skipped_index = 0;
6516 int lookahead = 3;
6517 @@ -2846,8 +2848,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
6518 if (!object)
6519 return 0;
6520
6521 + /* Support for memcg, compiler can optimize this out */
6522 + df->s = cache_from_obj(s, object);
6523 +
6524 /* Start new detached freelist */
6525 - set_freepointer(s, object, NULL);
6526 + set_freepointer(df->s, object, NULL);
6527 df->page = virt_to_head_page(object);
6528 df->tail = object;
6529 df->freelist = object;
6530 @@ -2862,7 +2867,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
6531 /* df->page is always set at this point */
6532 if (df->page == virt_to_head_page(object)) {
6533 /* Opportunity build freelist */
6534 - set_freepointer(s, object, df->freelist);
6535 + set_freepointer(df->s, object, df->freelist);
6536 df->freelist = object;
6537 df->cnt++;
6538 p[size] = NULL; /* mark object processed */
6539 @@ -2881,25 +2886,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
6540 return first_skipped_index;
6541 }
6542
6543 -
6544 /* Note that interrupts must be enabled when calling this function. */
6545 -void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
6546 +void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
6547 {
6548 if (WARN_ON(!size))
6549 return;
6550
6551 do {
6552 struct detached_freelist df;
6553 - struct kmem_cache *s;
6554 -
6555 - /* Support for memcg */
6556 - s = cache_from_obj(orig_s, p[size - 1]);
6557
6558 size = build_detached_freelist(s, size, p, &df);
6559 if (unlikely(!df.page))
6560 continue;
6561
6562 - slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
6563 + slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
6564 } while (likely(size));
6565 }
6566 EXPORT_SYMBOL(kmem_cache_free_bulk);
6567 diff --git a/mm/vmscan.c b/mm/vmscan.c
6568 index 71b1c29948db..c712b016e0ab 100644
6569 --- a/mm/vmscan.c
6570 +++ b/mm/vmscan.c
6571 @@ -2561,7 +2561,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
6572 sc->gfp_mask |= __GFP_HIGHMEM;
6573
6574 for_each_zone_zonelist_nodemask(zone, z, zonelist,
6575 - requested_highidx, sc->nodemask) {
6576 + gfp_zone(sc->gfp_mask), sc->nodemask) {
6577 enum zone_type classzone_idx;
6578
6579 if (!populated_zone(zone))
6580 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
6581 index f1ffb34e253f..d2bc03f0b4d7 100644
6582 --- a/net/netlink/af_netlink.c
6583 +++ b/net/netlink/af_netlink.c
6584 @@ -1305,7 +1305,7 @@ static int netlink_release(struct socket *sock)
6585
6586 skb_queue_purge(&sk->sk_write_queue);
6587
6588 - if (nlk->portid) {
6589 + if (nlk->portid && nlk->bound) {
6590 struct netlink_notify n = {
6591 .net = sock_net(sk),
6592 .protocol = sk->sk_protocol,
6593 diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
6594 index 273bc3a35425..008c25d1b9f9 100644
6595 --- a/net/sunrpc/cache.c
6596 +++ b/net/sunrpc/cache.c
6597 @@ -1182,14 +1182,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
6598 }
6599
6600 crq->q.reader = 0;
6601 - crq->item = cache_get(h);
6602 crq->buf = buf;
6603 crq->len = 0;
6604 crq->readers = 0;
6605 spin_lock(&queue_lock);
6606 - if (test_bit(CACHE_PENDING, &h->flags))
6607 + if (test_bit(CACHE_PENDING, &h->flags)) {
6608 + crq->item = cache_get(h);
6609 list_add_tail(&crq->q.list, &detail->queue);
6610 - else
6611 + } else
6612 /* Lost a race, no longer PENDING, so don't enqueue */
6613 ret = -EAGAIN;
6614 spin_unlock(&queue_lock);
6615 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
6616 index 711cb7ad6ae0..ab62d305b48b 100644
6617 --- a/net/wireless/nl80211.c
6618 +++ b/net/wireless/nl80211.c
6619 @@ -13201,7 +13201,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
6620 struct wireless_dev *wdev;
6621 struct cfg80211_beacon_registration *reg, *tmp;
6622
6623 - if (state != NETLINK_URELEASE)
6624 + if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
6625 return NOTIFY_DONE;
6626
6627 rcu_read_lock();
6628 diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
6629 index 0b7dc2fd7bac..dd243d2abd87 100644
6630 --- a/scripts/kconfig/confdata.c
6631 +++ b/scripts/kconfig/confdata.c
6632 @@ -267,10 +267,8 @@ int conf_read_simple(const char *name, int def)
6633 if (in)
6634 goto load;
6635 sym_add_change_count(1);
6636 - if (!sym_defconfig_list) {
6637 - sym_calc_value(modules_sym);
6638 + if (!sym_defconfig_list)
6639 return 1;
6640 - }
6641
6642 for_all_defaults(sym_defconfig_list, prop) {
6643 if (expr_calc_value(prop->visible.expr) == no ||
6644 @@ -403,7 +401,6 @@ setsym:
6645 }
6646 free(line);
6647 fclose(in);
6648 - sym_calc_value(modules_sym);
6649 return 0;
6650 }
6651
6652 @@ -414,8 +411,12 @@ int conf_read(const char *name)
6653
6654 sym_set_change_count(0);
6655
6656 - if (conf_read_simple(name, S_DEF_USER))
6657 + if (conf_read_simple(name, S_DEF_USER)) {
6658 + sym_calc_value(modules_sym);
6659 return 1;
6660 + }
6661 +
6662 + sym_calc_value(modules_sym);
6663
6664 for_all_symbols(i, sym) {
6665 sym_calc_value(sym);
6666 @@ -846,6 +847,7 @@ static int conf_split_config(void)
6667
6668 name = conf_get_autoconfig_name();
6669 conf_read_simple(name, S_DEF_AUTO);
6670 + sym_calc_value(modules_sym);
6671
6672 if (chdir("include/config"))
6673 return 1;
6674 diff --git a/security/keys/trusted.c b/security/keys/trusted.c
6675 index 0dcab20cdacd..90d61751ff12 100644
6676 --- a/security/keys/trusted.c
6677 +++ b/security/keys/trusted.c
6678 @@ -744,6 +744,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
6679 unsigned long handle;
6680 unsigned long lock;
6681 unsigned long token_mask = 0;
6682 + unsigned int digest_len;
6683 int i;
6684 int tpm2;
6685
6686 @@ -752,7 +753,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
6687 return tpm2;
6688
6689 opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1;
6690 - opt->digest_len = hash_digest_size[opt->hash];
6691
6692 while ((p = strsep(&c, " \t"))) {
6693 if (*p == '\0' || *p == ' ' || *p == '\t')
6694 @@ -812,8 +812,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
6695 for (i = 0; i < HASH_ALGO__LAST; i++) {
6696 if (!strcmp(args[0].from, hash_algo_name[i])) {
6697 opt->hash = i;
6698 - opt->digest_len =
6699 - hash_digest_size[opt->hash];
6700 break;
6701 }
6702 }
6703 @@ -825,13 +823,14 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
6704 }
6705 break;
6706 case Opt_policydigest:
6707 - if (!tpm2 ||
6708 - strlen(args[0].from) != (2 * opt->digest_len))
6709 + digest_len = hash_digest_size[opt->hash];
6710 + if (!tpm2 || strlen(args[0].from) != (2 * digest_len))
6711 return -EINVAL;
6712 res = hex2bin(opt->policydigest, args[0].from,
6713 - opt->digest_len);
6714 + digest_len);
6715 if (res < 0)
6716 return -EINVAL;
6717 + opt->policydigest_len = digest_len;
6718 break;
6719 case Opt_policyhandle:
6720 if (!tpm2)
6721 diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
6722 index f6854dbd7d8d..69ead7150a5c 100644
6723 --- a/sound/hda/hdac_i915.c
6724 +++ b/sound/hda/hdac_i915.c
6725 @@ -20,6 +20,7 @@
6726 #include <sound/core.h>
6727 #include <sound/hdaudio.h>
6728 #include <sound/hda_i915.h>
6729 +#include <sound/hda_register.h>
6730
6731 static struct i915_audio_component *hdac_acomp;
6732
6733 @@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
6734 }
6735 EXPORT_SYMBOL_GPL(snd_hdac_display_power);
6736
6737 +#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
6738 + ((pci)->device == 0x0c0c) || \
6739 + ((pci)->device == 0x0d0c) || \
6740 + ((pci)->device == 0x160c))
6741 +
6742 /**
6743 - * snd_hdac_get_display_clk - Get CDCLK in kHz
6744 + * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
6745 * @bus: HDA core bus
6746 *
6747 - * This function is supposed to be used only by a HD-audio controller
6748 - * driver that needs the interaction with i915 graphics.
6749 + * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
6750 + * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
6751 + * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
6752 + * BCLK = CDCLK * M / N
6753 + * The values will be lost when the display power well is disabled and need to
6754 + * be restored to avoid abnormal playback speed.
6755 *
6756 - * This function queries CDCLK value in kHz from the graphics driver and
6757 - * returns the value. A negative code is returned in error.
6758 + * Call this function at initializing and changing power well, as well as
6759 + * at ELD notifier for the hotplug.
6760 */
6761 -int snd_hdac_get_display_clk(struct hdac_bus *bus)
6762 +void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
6763 {
6764 struct i915_audio_component *acomp = bus->audio_component;
6765 + struct pci_dev *pci = to_pci_dev(bus->dev);
6766 + int cdclk_freq;
6767 + unsigned int bclk_m, bclk_n;
6768 +
6769 + if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
6770 + return; /* only for i915 binding */
6771 + if (!CONTROLLER_IN_GPU(pci))
6772 + return; /* only HSW/BDW */
6773 +
6774 + cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
6775 + switch (cdclk_freq) {
6776 + case 337500:
6777 + bclk_m = 16;
6778 + bclk_n = 225;
6779 + break;
6780 +
6781 + case 450000:
6782 + default: /* default CDCLK 450MHz */
6783 + bclk_m = 4;
6784 + bclk_n = 75;
6785 + break;
6786 +
6787 + case 540000:
6788 + bclk_m = 4;
6789 + bclk_n = 90;
6790 + break;
6791 +
6792 + case 675000:
6793 + bclk_m = 8;
6794 + bclk_n = 225;
6795 + break;
6796 + }
6797
6798 - if (!acomp || !acomp->ops)
6799 - return -ENODEV;
6800 -
6801 - return acomp->ops->get_cdclk_freq(acomp->dev);
6802 + snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
6803 + snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
6804 }
6805 -EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk);
6806 +EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
6807
6808 /* There is a fixed mapping between audio pin node and display port
6809 * on current Intel platforms:
6810 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
6811 index 7ca5b89f088a..dfaf1a93fb8a 100644
6812 --- a/sound/pci/hda/hda_generic.c
6813 +++ b/sound/pci/hda/hda_generic.c
6814 @@ -826,7 +826,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
6815 bool allow_powerdown)
6816 {
6817 hda_nid_t nid, changed = 0;
6818 - int i, state;
6819 + int i, state, power;
6820
6821 for (i = 0; i < path->depth; i++) {
6822 nid = path->path[i];
6823 @@ -838,7 +838,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
6824 state = AC_PWRST_D0;
6825 else
6826 state = AC_PWRST_D3;
6827 - if (!snd_hda_check_power_state(codec, nid, state)) {
6828 + power = snd_hda_codec_read(codec, nid, 0,
6829 + AC_VERB_GET_POWER_STATE, 0);
6830 + if (power != (state | (state << 4))) {
6831 snd_hda_codec_write(codec, nid, 0,
6832 AC_VERB_SET_POWER_STATE, state);
6833 changed = nid;
6834 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6835 index e5240cb3749f..c0b772bb49af 100644
6836 --- a/sound/pci/hda/hda_intel.c
6837 +++ b/sound/pci/hda/hda_intel.c
6838 @@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
6839 #define azx_del_card_list(chip) /* NOP */
6840 #endif /* CONFIG_PM */
6841
6842 -/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
6843 - * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
6844 - * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
6845 - * BCLK = CDCLK * M / N
6846 - * The values will be lost when the display power well is disabled and need to
6847 - * be restored to avoid abnormal playback speed.
6848 - */
6849 -static void haswell_set_bclk(struct hda_intel *hda)
6850 -{
6851 - struct azx *chip = &hda->chip;
6852 - int cdclk_freq;
6853 - unsigned int bclk_m, bclk_n;
6854 -
6855 - if (!hda->need_i915_power)
6856 - return;
6857 -
6858 - cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
6859 - switch (cdclk_freq) {
6860 - case 337500:
6861 - bclk_m = 16;
6862 - bclk_n = 225;
6863 - break;
6864 -
6865 - case 450000:
6866 - default: /* default CDCLK 450MHz */
6867 - bclk_m = 4;
6868 - bclk_n = 75;
6869 - break;
6870 -
6871 - case 540000:
6872 - bclk_m = 4;
6873 - bclk_n = 90;
6874 - break;
6875 -
6876 - case 675000:
6877 - bclk_m = 8;
6878 - bclk_n = 225;
6879 - break;
6880 - }
6881 -
6882 - azx_writew(chip, HSW_EM4, bclk_m);
6883 - azx_writew(chip, HSW_EM5, bclk_n);
6884 -}
6885 -
6886 #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
6887 /*
6888 * power management
6889 @@ -958,7 +914,7 @@ static int azx_resume(struct device *dev)
6890 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
6891 && hda->need_i915_power) {
6892 snd_hdac_display_power(azx_bus(chip), true);
6893 - haswell_set_bclk(hda);
6894 + snd_hdac_i915_set_bclk(azx_bus(chip));
6895 }
6896 if (chip->msi)
6897 if (pci_enable_msi(pci) < 0)
6898 @@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev)
6899 bus = azx_bus(chip);
6900 if (hda->need_i915_power) {
6901 snd_hdac_display_power(bus, true);
6902 - haswell_set_bclk(hda);
6903 + snd_hdac_i915_set_bclk(bus);
6904 } else {
6905 /* toggle codec wakeup bit for STATESTS read */
6906 snd_hdac_set_codec_wakeup(bus, true);
6907 @@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip)
6908 /* initialize chip */
6909 azx_init_pci(chip);
6910
6911 - if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
6912 - struct hda_intel *hda;
6913 -
6914 - hda = container_of(chip, struct hda_intel, chip);
6915 - haswell_set_bclk(hda);
6916 - }
6917 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
6918 + snd_hdac_i915_set_bclk(bus);
6919
6920 hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
6921
6922 @@ -2232,6 +2184,9 @@ static const struct pci_device_id azx_ids[] = {
6923 /* Broxton-P(Apollolake) */
6924 { PCI_DEVICE(0x8086, 0x5a98),
6925 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
6926 + /* Broxton-T */
6927 + { PCI_DEVICE(0x8086, 0x1a98),
6928 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
6929 /* Haswell */
6930 { PCI_DEVICE(0x8086, 0x0a0c),
6931 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
6932 diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
6933 index a47e8ae0eb30..80bbadc83721 100644
6934 --- a/sound/pci/hda/patch_cirrus.c
6935 +++ b/sound/pci/hda/patch_cirrus.c
6936 @@ -361,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
6937 {
6938 struct cs_spec *spec = codec->spec;
6939 int err;
6940 + int i;
6941
6942 err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
6943 if (err < 0)
6944 @@ -370,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
6945 if (err < 0)
6946 return err;
6947
6948 + /* keep the ADCs powered up when it's dynamically switchable */
6949 + if (spec->gen.dyn_adc_switch) {
6950 + unsigned int done = 0;
6951 + for (i = 0; i < spec->gen.input_mux.num_items; i++) {
6952 + int idx = spec->gen.dyn_adc_idx[i];
6953 + if (done & (1 << idx))
6954 + continue;
6955 + snd_hda_gen_fix_pin_power(codec,
6956 + spec->gen.adc_nids[idx]);
6957 + done |= 1 << idx;
6958 + }
6959 + }
6960 +
6961 return 0;
6962 }
6963
6964 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
6965 index 0c9585602bf3..c98e404afbe0 100644
6966 --- a/sound/pci/hda/patch_hdmi.c
6967 +++ b/sound/pci/hda/patch_hdmi.c
6968 @@ -2452,6 +2452,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
6969 if (atomic_read(&(codec)->core.in_pm))
6970 return;
6971
6972 + snd_hdac_i915_set_bclk(&codec->bus->core);
6973 check_presence_and_report(codec, pin_nid);
6974 }
6975
6976 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6977 index 1402ba954b3d..ac4490a96863 100644
6978 --- a/sound/pci/hda/patch_realtek.c
6979 +++ b/sound/pci/hda/patch_realtek.c
6980 @@ -5449,6 +5449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6981 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6982 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6983 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
6984 + SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
6985 SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
6986 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
6987 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6988 @@ -5583,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6989 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
6990 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
6991 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
6992 + SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
6993 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
6994 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
6995 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
6996 diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
6997 index c5194f5b150a..d7e71f309299 100644
6998 --- a/sound/pci/pcxhr/pcxhr_core.c
6999 +++ b/sound/pci/pcxhr/pcxhr_core.c
7000 @@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
7001 }
7002
7003 pcxhr_msg_thread(mgr);
7004 + mutex_unlock(&mgr->lock);
7005 return IRQ_HANDLED;
7006 }
7007 diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
7008 index 11d032cdc658..48dbb2fdeb09 100644
7009 --- a/sound/soc/codecs/rt5640.c
7010 +++ b/sound/soc/codecs/rt5640.c
7011 @@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
7012
7013 /* Interface data select */
7014 static const char * const rt5640_data_select[] = {
7015 - "Normal", "left copy to right", "right copy to left", "Swap"};
7016 + "Normal", "Swap", "left copy to right", "right copy to left"};
7017
7018 static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
7019 RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
7020 diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
7021 index 83a7150ddc24..f84231e7d1dd 100644
7022 --- a/sound/soc/codecs/rt5640.h
7023 +++ b/sound/soc/codecs/rt5640.h
7024 @@ -442,39 +442,39 @@
7025 #define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
7026 #define RT5640_IF1_DAC_SEL_SFT 14
7027 #define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
7028 -#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14)
7029 -#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14)
7030 -#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14)
7031 +#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
7032 +#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
7033 +#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
7034 #define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
7035 #define RT5640_IF1_ADC_SEL_SFT 12
7036 #define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
7037 -#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12)
7038 -#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12)
7039 -#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12)
7040 +#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
7041 +#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
7042 +#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
7043 #define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
7044 #define RT5640_IF2_DAC_SEL_SFT 10
7045 #define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
7046 -#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10)
7047 -#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10)
7048 -#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10)
7049 +#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
7050 +#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
7051 +#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
7052 #define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
7053 #define RT5640_IF2_ADC_SEL_SFT 8
7054 #define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
7055 -#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8)
7056 -#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8)
7057 -#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8)
7058 +#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
7059 +#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
7060 +#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
7061 #define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
7062 #define RT5640_IF3_DAC_SEL_SFT 6
7063 #define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
7064 -#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6)
7065 -#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6)
7066 -#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6)
7067 +#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
7068 +#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
7069 +#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
7070 #define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
7071 #define RT5640_IF3_ADC_SEL_SFT 4
7072 #define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
7073 -#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4)
7074 -#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4)
7075 -#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4)
7076 +#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
7077 +#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
7078 +#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
7079
7080 /* REC Left Mixer Control 1 (0x3b) */
7081 #define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
7082 diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
7083 index e619d5651b09..080c78e88e10 100644
7084 --- a/sound/soc/codecs/ssm4567.c
7085 +++ b/sound/soc/codecs/ssm4567.c
7086 @@ -352,6 +352,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
7087 regcache_cache_only(ssm4567->regmap, !enable);
7088
7089 if (enable) {
7090 + ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
7091 + 0x00);
7092 + if (ret)
7093 + return ret;
7094 +
7095 ret = regmap_update_bits(ssm4567->regmap,
7096 SSM4567_REG_POWER_CTRL,
7097 SSM4567_POWER_SPWDN, 0x00);
7098 diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
7099 index df65c5b494b1..b6ab3fc5789e 100644
7100 --- a/sound/soc/samsung/s3c-i2s-v2.c
7101 +++ b/sound/soc/samsung/s3c-i2s-v2.c
7102 @@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
7103 #endif
7104
7105 int s3c_i2sv2_register_component(struct device *dev, int id,
7106 - struct snd_soc_component_driver *cmp_drv,
7107 + const struct snd_soc_component_driver *cmp_drv,
7108 struct snd_soc_dai_driver *dai_drv)
7109 {
7110 struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
7111 diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
7112 index 90abab364b49..d0684145ed1f 100644
7113 --- a/sound/soc/samsung/s3c-i2s-v2.h
7114 +++ b/sound/soc/samsung/s3c-i2s-v2.h
7115 @@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
7116 * soc core.
7117 */
7118 extern int s3c_i2sv2_register_component(struct device *dev, int id,
7119 - struct snd_soc_component_driver *cmp_drv,
7120 + const struct snd_soc_component_driver *cmp_drv,
7121 struct snd_soc_dai_driver *dai_drv);
7122
7123 #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
7124 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
7125 index 581175a51ecf..5e811dc02fb9 100644
7126 --- a/sound/soc/soc-dapm.c
7127 +++ b/sound/soc/soc-dapm.c
7128 @@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
7129 int count = 0;
7130 char *state = "not set";
7131
7132 + /* card won't be set for the dummy component, as a spot fix
7133 + * we're checking for that case specifically here but in future
7134 + * we will ensure that the dummy component looks like others.
7135 + */
7136 + if (!cmpnt->card)
7137 + return 0;
7138 +
7139 list_for_each_entry(w, &cmpnt->card->widgets, list) {
7140 if (w->dapm != dapm)
7141 continue;
7142 diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
7143 index 52ef7a9d50aa..14d9e8ffaff7 100644
7144 --- a/tools/perf/Documentation/perf-stat.txt
7145 +++ b/tools/perf/Documentation/perf-stat.txt
7146 @@ -69,6 +69,14 @@ report::
7147 --scale::
7148 scale/normalize counter values
7149
7150 +-d::
7151 +--detailed::
7152 + print more detailed statistics, can be specified up to 3 times
7153 +
7154 + -d: detailed events, L1 and LLC data cache
7155 + -d -d: more detailed events, dTLB and iTLB events
7156 + -d -d -d: very detailed events, adding prefetch events
7157 +
7158 -r::
7159 --repeat=<n>::
7160 repeat command and print average + stddev (max: 100). 0 means forever.
7161 diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
7162 index 08c09ad755d2..7bb47424bc49 100644
7163 --- a/tools/perf/ui/browsers/hists.c
7164 +++ b/tools/perf/ui/browsers/hists.c
7165 @@ -302,7 +302,7 @@ static void callchain_node__init_have_children(struct callchain_node *node,
7166 chain = list_entry(node->val.next, struct callchain_list, list);
7167 chain->has_children = has_sibling;
7168
7169 - if (node->val.next != node->val.prev) {
7170 + if (!list_empty(&node->val)) {
7171 chain = list_entry(node->val.prev, struct callchain_list, list);
7172 chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
7173 }
7174 @@ -844,7 +844,7 @@ next:
7175 return row - first_row;
7176 }
7177
7178 -static int hist_browser__show_callchain(struct hist_browser *browser,
7179 +static int hist_browser__show_callchain_graph(struct hist_browser *browser,
7180 struct rb_root *root, int level,
7181 unsigned short row, u64 total,
7182 print_callchain_entry_fn print,
7183 @@ -898,7 +898,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
7184 else
7185 new_total = total;
7186
7187 - row += hist_browser__show_callchain(browser, &child->rb_root,
7188 + row += hist_browser__show_callchain_graph(browser, &child->rb_root,
7189 new_level, row, new_total,
7190 print, arg, is_output_full);
7191 }
7192 @@ -910,6 +910,43 @@ out:
7193 return row - first_row;
7194 }
7195
7196 +static int hist_browser__show_callchain(struct hist_browser *browser,
7197 + struct hist_entry *entry, int level,
7198 + unsigned short row,
7199 + print_callchain_entry_fn print,
7200 + struct callchain_print_arg *arg,
7201 + check_output_full_fn is_output_full)
7202 +{
7203 + u64 total = hists__total_period(entry->hists);
7204 + int printed;
7205 +
7206 + if (callchain_param.mode == CHAIN_GRAPH_REL) {
7207 + if (symbol_conf.cumulate_callchain)
7208 + total = entry->stat_acc->period;
7209 + else
7210 + total = entry->stat.period;
7211 + }
7212 +
7213 + if (callchain_param.mode == CHAIN_FLAT) {
7214 + printed = hist_browser__show_callchain_flat(browser,
7215 + &entry->sorted_chain, row, total,
7216 + print, arg, is_output_full);
7217 + } else if (callchain_param.mode == CHAIN_FOLDED) {
7218 + printed = hist_browser__show_callchain_folded(browser,
7219 + &entry->sorted_chain, row, total,
7220 + print, arg, is_output_full);
7221 + } else {
7222 + printed = hist_browser__show_callchain_graph(browser,
7223 + &entry->sorted_chain, level, row, total,
7224 + print, arg, is_output_full);
7225 + }
7226 +
7227 + if (arg->is_current_entry)
7228 + browser->he_selection = entry;
7229 +
7230 + return printed;
7231 +}
7232 +
7233 struct hpp_arg {
7234 struct ui_browser *b;
7235 char folded_sign;
7236 @@ -1084,38 +1121,14 @@ static int hist_browser__show_entry(struct hist_browser *browser,
7237 --row_offset;
7238
7239 if (folded_sign == '-' && row != browser->b.rows) {
7240 - u64 total = hists__total_period(entry->hists);
7241 struct callchain_print_arg arg = {
7242 .row_offset = row_offset,
7243 .is_current_entry = current_entry,
7244 };
7245
7246 - if (callchain_param.mode == CHAIN_GRAPH_REL) {
7247 - if (symbol_conf.cumulate_callchain)
7248 - total = entry->stat_acc->period;
7249 - else
7250 - total = entry->stat.period;
7251 - }
7252 -
7253 - if (callchain_param.mode == CHAIN_FLAT) {
7254 - printed += hist_browser__show_callchain_flat(browser,
7255 - &entry->sorted_chain, row, total,
7256 + printed += hist_browser__show_callchain(browser, entry, 1, row,
7257 hist_browser__show_callchain_entry, &arg,
7258 hist_browser__check_output_full);
7259 - } else if (callchain_param.mode == CHAIN_FOLDED) {
7260 - printed += hist_browser__show_callchain_folded(browser,
7261 - &entry->sorted_chain, row, total,
7262 - hist_browser__show_callchain_entry, &arg,
7263 - hist_browser__check_output_full);
7264 - } else {
7265 - printed += hist_browser__show_callchain(browser,
7266 - &entry->sorted_chain, 1, row, total,
7267 - hist_browser__show_callchain_entry, &arg,
7268 - hist_browser__check_output_full);
7269 - }
7270 -
7271 - if (arg.is_current_entry)
7272 - browser->he_selection = entry;
7273 }
7274
7275 return printed;
7276 @@ -1380,15 +1393,11 @@ do_offset:
7277 static int hist_browser__fprintf_callchain(struct hist_browser *browser,
7278 struct hist_entry *he, FILE *fp)
7279 {
7280 - u64 total = hists__total_period(he->hists);
7281 struct callchain_print_arg arg = {
7282 .fp = fp,
7283 };
7284
7285 - if (symbol_conf.cumulate_callchain)
7286 - total = he->stat_acc->period;
7287 -
7288 - hist_browser__show_callchain(browser, &he->sorted_chain, 1, 0, total,
7289 + hist_browser__show_callchain(browser, he, 1, 0,
7290 hist_browser__fprintf_callchain_entry, &arg,
7291 hist_browser__check_dump_full);
7292 return arg.printed;
7293 @@ -2320,10 +2329,12 @@ skip_annotation:
7294 *
7295 * See hist_browser__show_entry.
7296 */
7297 - nr_options += add_script_opt(browser,
7298 - &actions[nr_options],
7299 - &options[nr_options],
7300 - NULL, browser->selection->sym);
7301 + if (sort__has_sym && browser->selection->sym) {
7302 + nr_options += add_script_opt(browser,
7303 + &actions[nr_options],
7304 + &options[nr_options],
7305 + NULL, browser->selection->sym);
7306 + }
7307 }
7308 nr_options += add_script_opt(browser, &actions[nr_options],
7309 &options[nr_options], NULL, NULL);
7310 diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
7311 index 85155e91b61b..7bad5c3fa7b7 100644
7312 --- a/tools/perf/util/event.c
7313 +++ b/tools/perf/util/event.c
7314 @@ -282,7 +282,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
7315 strcpy(execname, "");
7316
7317 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
7318 - n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
7319 + n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
7320 &event->mmap2.start, &event->mmap2.len, prot,
7321 &event->mmap2.pgoff, &event->mmap2.maj,
7322 &event->mmap2.min,
7323 diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
7324 index d81f13de2476..a7eb0eae9938 100644
7325 --- a/tools/perf/util/evlist.c
7326 +++ b/tools/perf/util/evlist.c
7327 @@ -1181,12 +1181,12 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
7328 */
7329 if (cpus != evlist->cpus) {
7330 cpu_map__put(evlist->cpus);
7331 - evlist->cpus = cpus;
7332 + evlist->cpus = cpu_map__get(cpus);
7333 }
7334
7335 if (threads != evlist->threads) {
7336 thread_map__put(evlist->threads);
7337 - evlist->threads = threads;
7338 + evlist->threads = thread_map__get(threads);
7339 }
7340
7341 perf_evlist__propagate_maps(evlist);
7342 diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
7343 index 8e75434bd01c..4d8037a3d8a4 100644
7344 --- a/tools/perf/util/evsel.h
7345 +++ b/tools/perf/util/evsel.h
7346 @@ -93,10 +93,8 @@ struct perf_evsel {
7347 const char *unit;
7348 struct event_format *tp_format;
7349 off_t id_offset;
7350 - union {
7351 - void *priv;
7352 - u64 db_id;
7353 - };
7354 + void *priv;
7355 + u64 db_id;
7356 struct cgroup_sel *cgrp;
7357 void *handler;
7358 struct cpu_map *cpus;
7359 diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
7360 index 05d815851be1..4e1590ba8902 100644
7361 --- a/tools/perf/util/intel-pt.c
7362 +++ b/tools/perf/util/intel-pt.c
7363 @@ -1127,7 +1127,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
7364 pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
7365 ret);
7366
7367 - if (pt->synth_opts.callchain)
7368 + if (pt->synth_opts.last_branch)
7369 intel_pt_reset_last_branch_rb(ptq);
7370
7371 return ret;
7372 diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
7373 index ea6064696fe4..a7b9022b5c8f 100644
7374 --- a/virt/kvm/arm/arch_timer.c
7375 +++ b/virt/kvm/arm/arch_timer.c
7376 @@ -86,6 +86,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
7377 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
7378 vcpu->arch.timer_cpu.armed = false;
7379
7380 + WARN_ON(!kvm_timer_should_fire(vcpu));
7381 +
7382 /*
7383 * If the vcpu is blocked we want to wake it up so that it will see
7384 * the timer has expired when entering the guest.
7385 @@ -93,10 +95,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
7386 kvm_vcpu_kick(vcpu);
7387 }
7388
7389 +static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
7390 +{
7391 + cycle_t cval, now;
7392 +
7393 + cval = vcpu->arch.timer_cpu.cntv_cval;
7394 + now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
7395 +
7396 + if (now < cval) {
7397 + u64 ns;
7398 +
7399 + ns = cyclecounter_cyc2ns(timecounter->cc,
7400 + cval - now,
7401 + timecounter->mask,
7402 + &timecounter->frac);
7403 + return ns;
7404 + }
7405 +
7406 + return 0;
7407 +}
7408 +
7409 static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
7410 {
7411 struct arch_timer_cpu *timer;
7412 + struct kvm_vcpu *vcpu;
7413 + u64 ns;
7414 +
7415 timer = container_of(hrt, struct arch_timer_cpu, timer);
7416 + vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
7417 +
7418 + /*
7419 + * Check that the timer has really expired from the guest's
7420 + * PoV (NTP on the host may have forced it to expire
7421 + * early). If we should have slept longer, restart it.
7422 + */
7423 + ns = kvm_timer_compute_delta(vcpu);
7424 + if (unlikely(ns)) {
7425 + hrtimer_forward_now(hrt, ns_to_ktime(ns));
7426 + return HRTIMER_RESTART;
7427 + }
7428 +
7429 queue_work(wqueue, &timer->expired);
7430 return HRTIMER_NORESTART;
7431 }
7432 @@ -170,8 +208,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
7433 void kvm_timer_schedule(struct kvm_vcpu *vcpu)
7434 {
7435 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
7436 - u64 ns;
7437 - cycle_t cval, now;
7438
7439 BUG_ON(timer_is_armed(timer));
7440
7441 @@ -191,14 +227,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
7442 return;
7443
7444 /* The timer has not yet expired, schedule a background timer */
7445 - cval = timer->cntv_cval;
7446 - now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
7447 -
7448 - ns = cyclecounter_cyc2ns(timecounter->cc,
7449 - cval - now,
7450 - timecounter->mask,
7451 - &timecounter->frac);
7452 - timer_arm(timer, ns);
7453 + timer_arm(timer, kvm_timer_compute_delta(vcpu));
7454 }
7455
7456 void kvm_timer_unschedule(struct kvm_vcpu *vcpu)