Contents of /trunk/kernel-magellan/patches-4.8/0105-4.8.6-all-fixes.patch
Parent Directory | Revision Log
Revision 2845 -
(show annotations)
(download)
Tue Nov 22 13:19:35 2016 UTC (7 years, 10 months ago) by niro
File size: 174633 byte(s)
Tue Nov 22 13:19:35 2016 UTC (7 years, 10 months ago) by niro
File size: 174633 byte(s)
-linux-4.8.6
1 | diff --git a/Makefile b/Makefile |
2 | index daa3a01d2525..b249529204cd 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 8 |
8 | -SUBLEVEL = 5 |
9 | +SUBLEVEL = 6 |
10 | EXTRAVERSION = |
11 | NAME = Psychotic Stoned Sheep |
12 | |
13 | diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi |
14 | index 1c6a040218e3..e2e9599596e2 100644 |
15 | --- a/arch/arm/boot/dts/arm-realview-eb.dtsi |
16 | +++ b/arch/arm/boot/dts/arm-realview-eb.dtsi |
17 | @@ -51,14 +51,6 @@ |
18 | regulator-boot-on; |
19 | }; |
20 | |
21 | - veth: fixedregulator@0 { |
22 | - compatible = "regulator-fixed"; |
23 | - regulator-name = "veth"; |
24 | - regulator-min-microvolt = <3300000>; |
25 | - regulator-max-microvolt = <3300000>; |
26 | - regulator-boot-on; |
27 | - }; |
28 | - |
29 | xtal24mhz: xtal24mhz@24M { |
30 | #clock-cells = <0>; |
31 | compatible = "fixed-clock"; |
32 | @@ -134,16 +126,15 @@ |
33 | bank-width = <4>; |
34 | }; |
35 | |
36 | - /* SMSC 9118 ethernet with PHY and EEPROM */ |
37 | + /* SMSC LAN91C111 ethernet with PHY and EEPROM */ |
38 | ethernet: ethernet@4e000000 { |
39 | - compatible = "smsc,lan9118", "smsc,lan9115"; |
40 | + compatible = "smsc,lan91c111"; |
41 | reg = <0x4e000000 0x10000>; |
42 | - phy-mode = "mii"; |
43 | - reg-io-width = <4>; |
44 | - smsc,irq-active-high; |
45 | - smsc,irq-push-pull; |
46 | - vdd33a-supply = <&veth>; |
47 | - vddvario-supply = <&veth>; |
48 | + /* |
49 | + * This means the adapter can be accessed with 8, 16 or |
50 | + * 32 bit reads/writes. |
51 | + */ |
52 | + reg-io-width = <7>; |
53 | }; |
54 | |
55 | usb: usb@4f000000 { |
56 | diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts |
57 | index 03b8bbeb694f..652418aa2700 100644 |
58 | --- a/arch/arm/boot/dts/bcm958625hr.dts |
59 | +++ b/arch/arm/boot/dts/bcm958625hr.dts |
60 | @@ -47,7 +47,8 @@ |
61 | }; |
62 | |
63 | memory { |
64 | - reg = <0x60000000 0x20000000>; |
65 | + device_type = "memory"; |
66 | + reg = <0x60000000 0x80000000>; |
67 | }; |
68 | }; |
69 | |
70 | diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi |
71 | index ca86da68220c..854117dc0b77 100644 |
72 | --- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi |
73 | +++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi |
74 | @@ -119,7 +119,7 @@ |
75 | pinctrl-names = "default"; |
76 | pinctrl-0 = <&mcspi1_pins>; |
77 | |
78 | - lcd0: display { |
79 | + lcd0: display@1 { |
80 | compatible = "lgphilips,lb035q02"; |
81 | label = "lcd35"; |
82 | |
83 | diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi |
84 | index f68b3242b33a..3f528a379288 100644 |
85 | --- a/arch/arm/boot/dts/sun9i-a80.dtsi |
86 | +++ b/arch/arm/boot/dts/sun9i-a80.dtsi |
87 | @@ -899,8 +899,7 @@ |
88 | resets = <&apbs_rst 0>; |
89 | gpio-controller; |
90 | interrupt-controller; |
91 | - #address-cells = <1>; |
92 | - #size-cells = <0>; |
93 | + #interrupt-cells = <3>; |
94 | #gpio-cells = <3>; |
95 | |
96 | r_ir_pins: r_ir { |
97 | diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c |
98 | index 1568cb5cd870..b88364aa149a 100644 |
99 | --- a/arch/arm/crypto/ghash-ce-glue.c |
100 | +++ b/arch/arm/crypto/ghash-ce-glue.c |
101 | @@ -220,6 +220,27 @@ static int ghash_async_digest(struct ahash_request *req) |
102 | } |
103 | } |
104 | |
105 | +static int ghash_async_import(struct ahash_request *req, const void *in) |
106 | +{ |
107 | + struct ahash_request *cryptd_req = ahash_request_ctx(req); |
108 | + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
109 | + struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); |
110 | + struct shash_desc *desc = cryptd_shash_desc(cryptd_req); |
111 | + |
112 | + desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm); |
113 | + desc->flags = req->base.flags; |
114 | + |
115 | + return crypto_shash_import(desc, in); |
116 | +} |
117 | + |
118 | +static int ghash_async_export(struct ahash_request *req, void *out) |
119 | +{ |
120 | + struct ahash_request *cryptd_req = ahash_request_ctx(req); |
121 | + struct shash_desc *desc = cryptd_shash_desc(cryptd_req); |
122 | + |
123 | + return crypto_shash_export(desc, out); |
124 | +} |
125 | + |
126 | static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, |
127 | unsigned int keylen) |
128 | { |
129 | @@ -268,7 +289,10 @@ static struct ahash_alg ghash_async_alg = { |
130 | .final = ghash_async_final, |
131 | .setkey = ghash_async_setkey, |
132 | .digest = ghash_async_digest, |
133 | + .import = ghash_async_import, |
134 | + .export = ghash_async_export, |
135 | .halg.digestsize = GHASH_DIGEST_SIZE, |
136 | + .halg.statesize = sizeof(struct ghash_desc_ctx), |
137 | .halg.base = { |
138 | .cra_name = "ghash", |
139 | .cra_driver_name = "ghash-ce", |
140 | diff --git a/arch/arm/mach-pxa/corgi_pm.c b/arch/arm/mach-pxa/corgi_pm.c |
141 | index d9206811be9b..c71c483f410e 100644 |
142 | --- a/arch/arm/mach-pxa/corgi_pm.c |
143 | +++ b/arch/arm/mach-pxa/corgi_pm.c |
144 | @@ -131,16 +131,11 @@ static int corgi_should_wakeup(unsigned int resume_on_alarm) |
145 | return is_resume; |
146 | } |
147 | |
148 | -static unsigned long corgi_charger_wakeup(void) |
149 | +static bool corgi_charger_wakeup(void) |
150 | { |
151 | - unsigned long ret; |
152 | - |
153 | - ret = (!gpio_get_value(CORGI_GPIO_AC_IN) << GPIO_bit(CORGI_GPIO_AC_IN)) |
154 | - | (!gpio_get_value(CORGI_GPIO_KEY_INT) |
155 | - << GPIO_bit(CORGI_GPIO_KEY_INT)) |
156 | - | (!gpio_get_value(CORGI_GPIO_WAKEUP) |
157 | - << GPIO_bit(CORGI_GPIO_WAKEUP)); |
158 | - return ret; |
159 | + return !gpio_get_value(CORGI_GPIO_AC_IN) || |
160 | + !gpio_get_value(CORGI_GPIO_KEY_INT) || |
161 | + !gpio_get_value(CORGI_GPIO_WAKEUP); |
162 | } |
163 | |
164 | unsigned long corgipm_read_devdata(int type) |
165 | diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c |
166 | index 2385052b0ce1..e362f865fcd2 100644 |
167 | --- a/arch/arm/mach-pxa/pxa_cplds_irqs.c |
168 | +++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c |
169 | @@ -41,30 +41,35 @@ static irqreturn_t cplds_irq_handler(int in_irq, void *d) |
170 | unsigned long pending; |
171 | unsigned int bit; |
172 | |
173 | - pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask; |
174 | - for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) |
175 | - generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit)); |
176 | + do { |
177 | + pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask; |
178 | + for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) { |
179 | + generic_handle_irq(irq_find_mapping(fpga->irqdomain, |
180 | + bit)); |
181 | + } |
182 | + } while (pending); |
183 | |
184 | return IRQ_HANDLED; |
185 | } |
186 | |
187 | -static void cplds_irq_mask_ack(struct irq_data *d) |
188 | +static void cplds_irq_mask(struct irq_data *d) |
189 | { |
190 | struct cplds *fpga = irq_data_get_irq_chip_data(d); |
191 | unsigned int cplds_irq = irqd_to_hwirq(d); |
192 | - unsigned int set, bit = BIT(cplds_irq); |
193 | + unsigned int bit = BIT(cplds_irq); |
194 | |
195 | fpga->irq_mask &= ~bit; |
196 | writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN); |
197 | - set = readl(fpga->base + FPGA_IRQ_SET_CLR); |
198 | - writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR); |
199 | } |
200 | |
201 | static void cplds_irq_unmask(struct irq_data *d) |
202 | { |
203 | struct cplds *fpga = irq_data_get_irq_chip_data(d); |
204 | unsigned int cplds_irq = irqd_to_hwirq(d); |
205 | - unsigned int bit = BIT(cplds_irq); |
206 | + unsigned int set, bit = BIT(cplds_irq); |
207 | + |
208 | + set = readl(fpga->base + FPGA_IRQ_SET_CLR); |
209 | + writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR); |
210 | |
211 | fpga->irq_mask |= bit; |
212 | writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN); |
213 | @@ -72,7 +77,8 @@ static void cplds_irq_unmask(struct irq_data *d) |
214 | |
215 | static struct irq_chip cplds_irq_chip = { |
216 | .name = "pxa_cplds", |
217 | - .irq_mask_ack = cplds_irq_mask_ack, |
218 | + .irq_ack = cplds_irq_mask, |
219 | + .irq_mask = cplds_irq_mask, |
220 | .irq_unmask = cplds_irq_unmask, |
221 | .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, |
222 | }; |
223 | diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c |
224 | index b80eab9993c5..249b7bd5fbc4 100644 |
225 | --- a/arch/arm/mach-pxa/sharpsl_pm.c |
226 | +++ b/arch/arm/mach-pxa/sharpsl_pm.c |
227 | @@ -744,7 +744,7 @@ static int sharpsl_off_charge_battery(void) |
228 | time = RCNR; |
229 | while (1) { |
230 | /* Check if any wakeup event had occurred */ |
231 | - if (sharpsl_pm.machinfo->charger_wakeup() != 0) |
232 | + if (sharpsl_pm.machinfo->charger_wakeup()) |
233 | return 0; |
234 | /* Check for timeout */ |
235 | if ((RCNR - time) > SHARPSL_WAIT_CO_TIME) |
236 | diff --git a/arch/arm/mach-pxa/sharpsl_pm.h b/arch/arm/mach-pxa/sharpsl_pm.h |
237 | index 905be6755f04..fa75b6df8134 100644 |
238 | --- a/arch/arm/mach-pxa/sharpsl_pm.h |
239 | +++ b/arch/arm/mach-pxa/sharpsl_pm.h |
240 | @@ -34,7 +34,7 @@ struct sharpsl_charger_machinfo { |
241 | #define SHARPSL_STATUS_LOCK 5 |
242 | #define SHARPSL_STATUS_CHRGFULL 6 |
243 | #define SHARPSL_STATUS_FATAL 7 |
244 | - unsigned long (*charger_wakeup)(void); |
245 | + bool (*charger_wakeup)(void); |
246 | int (*should_wakeup)(unsigned int resume_on_alarm); |
247 | void (*backlight_limit)(int); |
248 | int (*backlight_get_status) (void); |
249 | diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c |
250 | index ea9f9034cb54..4e64a140252e 100644 |
251 | --- a/arch/arm/mach-pxa/spitz_pm.c |
252 | +++ b/arch/arm/mach-pxa/spitz_pm.c |
253 | @@ -165,13 +165,10 @@ static int spitz_should_wakeup(unsigned int resume_on_alarm) |
254 | return is_resume; |
255 | } |
256 | |
257 | -static unsigned long spitz_charger_wakeup(void) |
258 | +static bool spitz_charger_wakeup(void) |
259 | { |
260 | - unsigned long ret; |
261 | - ret = ((!gpio_get_value(SPITZ_GPIO_KEY_INT) |
262 | - << GPIO_bit(SPITZ_GPIO_KEY_INT)) |
263 | - | gpio_get_value(SPITZ_GPIO_SYNC)); |
264 | - return ret; |
265 | + return !gpio_get_value(SPITZ_GPIO_KEY_INT) || |
266 | + gpio_get_value(SPITZ_GPIO_SYNC); |
267 | } |
268 | |
269 | unsigned long spitzpm_read_devdata(int type) |
270 | diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h |
271 | index 263bf39ced40..9bd84ba06ec4 100644 |
272 | --- a/arch/powerpc/include/asm/book3s/64/pgtable.h |
273 | +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h |
274 | @@ -6,6 +6,8 @@ |
275 | */ |
276 | #define _PAGE_BIT_SWAP_TYPE 0 |
277 | |
278 | +#define _PAGE_RO 0 |
279 | + |
280 | #define _PAGE_EXEC 0x00001 /* execute permission */ |
281 | #define _PAGE_WRITE 0x00002 /* write access allowed */ |
282 | #define _PAGE_READ 0x00004 /* read access allowed */ |
283 | diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c |
284 | index 64174bf95611..05a0a913ec38 100644 |
285 | --- a/arch/powerpc/kernel/nvram_64.c |
286 | +++ b/arch/powerpc/kernel/nvram_64.c |
287 | @@ -956,7 +956,7 @@ int __init nvram_remove_partition(const char *name, int sig, |
288 | |
289 | /* Make partition a free partition */ |
290 | part->header.signature = NVRAM_SIG_FREE; |
291 | - strncpy(part->header.name, "wwwwwwwwwwww", 12); |
292 | + memset(part->header.name, 'w', 12); |
293 | part->header.checksum = nvram_checksum(&part->header); |
294 | rc = nvram_write_header(part); |
295 | if (rc <= 0) { |
296 | @@ -974,8 +974,8 @@ int __init nvram_remove_partition(const char *name, int sig, |
297 | } |
298 | if (prev) { |
299 | prev->header.length += part->header.length; |
300 | - prev->header.checksum = nvram_checksum(&part->header); |
301 | - rc = nvram_write_header(part); |
302 | + prev->header.checksum = nvram_checksum(&prev->header); |
303 | + rc = nvram_write_header(prev); |
304 | if (rc <= 0) { |
305 | printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc); |
306 | return rc; |
307 | diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c |
308 | index 9ee2623e0f67..ad37aa175f59 100644 |
309 | --- a/arch/powerpc/kernel/process.c |
310 | +++ b/arch/powerpc/kernel/process.c |
311 | @@ -88,7 +88,13 @@ static void check_if_tm_restore_required(struct task_struct *tsk) |
312 | set_thread_flag(TIF_RESTORE_TM); |
313 | } |
314 | } |
315 | + |
316 | +static inline bool msr_tm_active(unsigned long msr) |
317 | +{ |
318 | + return MSR_TM_ACTIVE(msr); |
319 | +} |
320 | #else |
321 | +static inline bool msr_tm_active(unsigned long msr) { return false; } |
322 | static inline void check_if_tm_restore_required(struct task_struct *tsk) { } |
323 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
324 | |
325 | @@ -208,7 +214,7 @@ void enable_kernel_fp(void) |
326 | EXPORT_SYMBOL(enable_kernel_fp); |
327 | |
328 | static int restore_fp(struct task_struct *tsk) { |
329 | - if (tsk->thread.load_fp) { |
330 | + if (tsk->thread.load_fp || msr_tm_active(tsk->thread.regs->msr)) { |
331 | load_fp_state(¤t->thread.fp_state); |
332 | current->thread.load_fp++; |
333 | return 1; |
334 | @@ -278,7 +284,8 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); |
335 | |
336 | static int restore_altivec(struct task_struct *tsk) |
337 | { |
338 | - if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) { |
339 | + if (cpu_has_feature(CPU_FTR_ALTIVEC) && |
340 | + (tsk->thread.load_vec || msr_tm_active(tsk->thread.regs->msr))) { |
341 | load_vr_state(&tsk->thread.vr_state); |
342 | tsk->thread.used_vr = 1; |
343 | tsk->thread.load_vec++; |
344 | @@ -438,6 +445,7 @@ void giveup_all(struct task_struct *tsk) |
345 | return; |
346 | |
347 | msr_check_and_set(msr_all_available); |
348 | + check_if_tm_restore_required(tsk); |
349 | |
350 | #ifdef CONFIG_PPC_FPU |
351 | if (usermsr & MSR_FP) |
352 | @@ -464,7 +472,8 @@ void restore_math(struct pt_regs *regs) |
353 | { |
354 | unsigned long msr; |
355 | |
356 | - if (!current->thread.load_fp && !loadvec(current->thread)) |
357 | + if (!msr_tm_active(regs->msr) && |
358 | + !current->thread.load_fp && !loadvec(current->thread)) |
359 | return; |
360 | |
361 | msr = regs->msr; |
362 | @@ -983,6 +992,13 @@ void restore_tm_state(struct pt_regs *regs) |
363 | msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; |
364 | msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; |
365 | |
366 | + /* Ensure that restore_math() will restore */ |
367 | + if (msr_diff & MSR_FP) |
368 | + current->thread.load_fp = 1; |
369 | +#ifdef CONFIG_ALIVEC |
370 | + if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC) |
371 | + current->thread.load_vec = 1; |
372 | +#endif |
373 | restore_math(regs); |
374 | |
375 | regs->msr |= msr_diff; |
376 | diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c |
377 | index 7372ee13eb1e..a5d3ecdabc44 100644 |
378 | --- a/arch/powerpc/mm/hugetlbpage.c |
379 | +++ b/arch/powerpc/mm/hugetlbpage.c |
380 | @@ -1019,8 +1019,15 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, |
381 | |
382 | pte = READ_ONCE(*ptep); |
383 | mask = _PAGE_PRESENT | _PAGE_READ; |
384 | + |
385 | + /* |
386 | + * On some CPUs like the 8xx, _PAGE_RW hence _PAGE_WRITE is defined |
387 | + * as 0 and _PAGE_RO has to be set when a page is not writable |
388 | + */ |
389 | if (write) |
390 | mask |= _PAGE_WRITE; |
391 | + else |
392 | + mask |= _PAGE_RO; |
393 | |
394 | if ((pte_val(pte) & mask) != mask) |
395 | return 0; |
396 | diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c |
397 | index de7501edb21c..8b8852bc2f4a 100644 |
398 | --- a/arch/x86/kernel/early-quirks.c |
399 | +++ b/arch/x86/kernel/early-quirks.c |
400 | @@ -317,16 +317,11 @@ static phys_addr_t __init i85x_stolen_base(int num, int slot, int func, |
401 | static phys_addr_t __init i865_stolen_base(int num, int slot, int func, |
402 | size_t stolen_size) |
403 | { |
404 | - u16 toud; |
405 | + u16 toud = 0; |
406 | |
407 | - /* |
408 | - * FIXME is the graphics stolen memory region |
409 | - * always at TOUD? Ie. is it always the last |
410 | - * one to be allocated by the BIOS? |
411 | - */ |
412 | toud = read_pci_config_16(0, 0, 0, I865_TOUD); |
413 | |
414 | - return (phys_addr_t)toud << 16; |
415 | + return (phys_addr_t)(toud << 16) + i845_tseg_size(); |
416 | } |
417 | |
418 | static phys_addr_t __init gen3_stolen_base(int num, int slot, int func, |
419 | diff --git a/crypto/gcm.c b/crypto/gcm.c |
420 | index 70a892e87ccb..f624ac98c94e 100644 |
421 | --- a/crypto/gcm.c |
422 | +++ b/crypto/gcm.c |
423 | @@ -117,7 +117,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, |
424 | struct crypto_skcipher *ctr = ctx->ctr; |
425 | struct { |
426 | be128 hash; |
427 | - u8 iv[8]; |
428 | + u8 iv[16]; |
429 | |
430 | struct crypto_gcm_setkey_result result; |
431 | |
432 | diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c |
433 | index 01d4be2c354b..f5c26a5f6875 100644 |
434 | --- a/drivers/char/hw_random/omap-rng.c |
435 | +++ b/drivers/char/hw_random/omap-rng.c |
436 | @@ -385,7 +385,7 @@ static int omap_rng_probe(struct platform_device *pdev) |
437 | |
438 | pm_runtime_enable(&pdev->dev); |
439 | ret = pm_runtime_get_sync(&pdev->dev); |
440 | - if (ret) { |
441 | + if (ret < 0) { |
442 | dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret); |
443 | pm_runtime_put_noidle(&pdev->dev); |
444 | goto err_ioremap; |
445 | @@ -443,7 +443,7 @@ static int __maybe_unused omap_rng_resume(struct device *dev) |
446 | int ret; |
447 | |
448 | ret = pm_runtime_get_sync(dev); |
449 | - if (ret) { |
450 | + if (ret < 0) { |
451 | dev_err(dev, "Failed to runtime_get device: %d\n", ret); |
452 | pm_runtime_put_noidle(dev); |
453 | return ret; |
454 | diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c |
455 | index 7a7970865c2d..0fc71cbaa440 100644 |
456 | --- a/drivers/clk/bcm/clk-bcm2835.c |
457 | +++ b/drivers/clk/bcm/clk-bcm2835.c |
458 | @@ -1006,16 +1006,28 @@ static int bcm2835_clock_set_rate(struct clk_hw *hw, |
459 | return 0; |
460 | } |
461 | |
462 | +static bool |
463 | +bcm2835_clk_is_pllc(struct clk_hw *hw) |
464 | +{ |
465 | + if (!hw) |
466 | + return false; |
467 | + |
468 | + return strncmp(clk_hw_get_name(hw), "pllc", 4) == 0; |
469 | +} |
470 | + |
471 | static int bcm2835_clock_determine_rate(struct clk_hw *hw, |
472 | struct clk_rate_request *req) |
473 | { |
474 | struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw); |
475 | struct clk_hw *parent, *best_parent = NULL; |
476 | + bool current_parent_is_pllc; |
477 | unsigned long rate, best_rate = 0; |
478 | unsigned long prate, best_prate = 0; |
479 | size_t i; |
480 | u32 div; |
481 | |
482 | + current_parent_is_pllc = bcm2835_clk_is_pllc(clk_hw_get_parent(hw)); |
483 | + |
484 | /* |
485 | * Select parent clock that results in the closest but lower rate |
486 | */ |
487 | @@ -1023,6 +1035,17 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw, |
488 | parent = clk_hw_get_parent_by_index(hw, i); |
489 | if (!parent) |
490 | continue; |
491 | + |
492 | + /* |
493 | + * Don't choose a PLLC-derived clock as our parent |
494 | + * unless it had been manually set that way. PLLC's |
495 | + * frequency gets adjusted by the firmware due to |
496 | + * over-temp or under-voltage conditions, without |
497 | + * prior notification to our clock consumer. |
498 | + */ |
499 | + if (bcm2835_clk_is_pllc(parent) && !current_parent_is_pllc) |
500 | + continue; |
501 | + |
502 | prate = clk_hw_get_rate(parent); |
503 | div = bcm2835_clock_choose_div(hw, req->rate, prate, true); |
504 | rate = bcm2835_clock_rate_from_divisor(clock, prate, div); |
505 | diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c |
506 | index a0f55bc1ad3d..96386ffc8483 100644 |
507 | --- a/drivers/clk/clk-divider.c |
508 | +++ b/drivers/clk/clk-divider.c |
509 | @@ -352,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, |
510 | |
511 | /* if read only, just return current value */ |
512 | if (divider->flags & CLK_DIVIDER_READ_ONLY) { |
513 | - bestdiv = readl(divider->reg) >> divider->shift; |
514 | + bestdiv = clk_readl(divider->reg) >> divider->shift; |
515 | bestdiv &= div_mask(divider->width); |
516 | bestdiv = _get_div(divider->table, bestdiv, divider->flags, |
517 | divider->width); |
518 | diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c |
519 | index 58566a17944a..20b105584f82 100644 |
520 | --- a/drivers/clk/clk-qoriq.c |
521 | +++ b/drivers/clk/clk-qoriq.c |
522 | @@ -766,7 +766,11 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) |
523 | if (!hwc) |
524 | return NULL; |
525 | |
526 | - hwc->reg = cg->regs + 0x20 * idx; |
527 | + if (cg->info.flags & CG_VER3) |
528 | + hwc->reg = cg->regs + 0x70000 + 0x20 * idx; |
529 | + else |
530 | + hwc->reg = cg->regs + 0x20 * idx; |
531 | + |
532 | hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]]; |
533 | |
534 | /* |
535 | diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c |
536 | index 820a939fb6bb..2877a4ddeda2 100644 |
537 | --- a/drivers/clk/clk.c |
538 | +++ b/drivers/clk/clk.c |
539 | @@ -1908,10 +1908,6 @@ int clk_set_phase(struct clk *clk, int degrees) |
540 | |
541 | clk_prepare_lock(); |
542 | |
543 | - /* bail early if nothing to do */ |
544 | - if (degrees == clk->core->phase) |
545 | - goto out; |
546 | - |
547 | trace_clk_set_phase(clk->core, degrees); |
548 | |
549 | if (clk->core->ops->set_phase) |
550 | @@ -1922,7 +1918,6 @@ int clk_set_phase(struct clk *clk, int degrees) |
551 | if (!ret) |
552 | clk->core->phase = degrees; |
553 | |
554 | -out: |
555 | clk_prepare_unlock(); |
556 | |
557 | return ret; |
558 | @@ -3186,7 +3181,7 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, |
559 | { |
560 | struct of_clk_provider *provider; |
561 | struct clk *clk = ERR_PTR(-EPROBE_DEFER); |
562 | - struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER); |
563 | + struct clk_hw *hw; |
564 | |
565 | if (!clkspec) |
566 | return ERR_PTR(-EINVAL); |
567 | @@ -3194,12 +3189,13 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, |
568 | /* Check if we have such a provider in our array */ |
569 | mutex_lock(&of_clk_mutex); |
570 | list_for_each_entry(provider, &of_clk_providers, link) { |
571 | - if (provider->node == clkspec->np) |
572 | + if (provider->node == clkspec->np) { |
573 | hw = __of_clk_get_hw_from_provider(provider, clkspec); |
574 | - if (!IS_ERR(hw)) { |
575 | clk = __clk_create_clk(hw, dev_id, con_id); |
576 | + } |
577 | |
578 | - if (!IS_ERR(clk) && !__clk_get(clk)) { |
579 | + if (!IS_ERR(clk)) { |
580 | + if (!__clk_get(clk)) { |
581 | __clk_free_clk(clk); |
582 | clk = ERR_PTR(-ENOENT); |
583 | } |
584 | diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c |
585 | index b0978d3b83e2..d302ed3b8225 100644 |
586 | --- a/drivers/clk/imx/clk-imx35.c |
587 | +++ b/drivers/clk/imx/clk-imx35.c |
588 | @@ -115,7 +115,7 @@ static void __init _mx35_clocks_init(void) |
589 | } |
590 | |
591 | clk[ckih] = imx_clk_fixed("ckih", 24000000); |
592 | - clk[ckil] = imx_clk_fixed("ckih", 32768); |
593 | + clk[ckil] = imx_clk_fixed("ckil", 32768); |
594 | clk[mpll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "mpll", "ckih", base + MX35_CCM_MPCTL); |
595 | clk[ppll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "ppll", "ckih", base + MX35_CCM_PPCTL); |
596 | |
597 | diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig |
598 | index 95e3b3e0fa1c..98909b184d44 100644 |
599 | --- a/drivers/clk/qcom/Kconfig |
600 | +++ b/drivers/clk/qcom/Kconfig |
601 | @@ -117,6 +117,7 @@ config MSM_MMCC_8974 |
602 | |
603 | config MSM_GCC_8996 |
604 | tristate "MSM8996 Global Clock Controller" |
605 | + select QCOM_GDSC |
606 | depends on COMMON_CLK_QCOM |
607 | help |
608 | Support for the global clock controller on msm8996 devices. |
609 | @@ -126,6 +127,7 @@ config MSM_GCC_8996 |
610 | config MSM_MMCC_8996 |
611 | tristate "MSM8996 Multimedia Clock Controller" |
612 | select MSM_GCC_8996 |
613 | + select QCOM_GDSC |
614 | depends on COMMON_CLK_QCOM |
615 | help |
616 | Support for the multimedia clock controller on msm8996 devices. |
617 | diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c |
618 | index bbf732bbc3fd..9f643cca85d0 100644 |
619 | --- a/drivers/clk/qcom/gcc-msm8996.c |
620 | +++ b/drivers/clk/qcom/gcc-msm8996.c |
621 | @@ -2592,9 +2592,9 @@ static struct clk_branch gcc_pcie_2_aux_clk = { |
622 | }; |
623 | |
624 | static struct clk_branch gcc_pcie_2_pipe_clk = { |
625 | - .halt_reg = 0x6e108, |
626 | + .halt_reg = 0x6e018, |
627 | .clkr = { |
628 | - .enable_reg = 0x6e108, |
629 | + .enable_reg = 0x6e018, |
630 | .enable_mask = BIT(0), |
631 | .hw.init = &(struct clk_init_data){ |
632 | .name = "gcc_pcie_2_pipe_clk", |
633 | diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c |
634 | index 94f77b0f9ae7..32f645ea77b8 100644 |
635 | --- a/drivers/crypto/ccp/ccp-dmaengine.c |
636 | +++ b/drivers/crypto/ccp/ccp-dmaengine.c |
637 | @@ -650,7 +650,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp) |
638 | dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL, |
639 | "%s-dmaengine-desc-cache", |
640 | ccp->name); |
641 | - if (!dma_cmd_cache_name) |
642 | + if (!dma_desc_cache_name) |
643 | return -ENOMEM; |
644 | ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name, |
645 | sizeof(struct ccp_dma_desc), |
646 | diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c |
647 | index d64af8625d7e..37dadb2a4feb 100644 |
648 | --- a/drivers/crypto/marvell/cesa.c |
649 | +++ b/drivers/crypto/marvell/cesa.c |
650 | @@ -166,6 +166,7 @@ static irqreturn_t mv_cesa_int(int irq, void *priv) |
651 | if (!req) |
652 | break; |
653 | |
654 | + ctx = crypto_tfm_ctx(req->tfm); |
655 | mv_cesa_complete_req(ctx, req, 0); |
656 | } |
657 | } |
658 | diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c |
659 | index 82e0f4e6eb1c..b111e14bac1e 100644 |
660 | --- a/drivers/crypto/marvell/hash.c |
661 | +++ b/drivers/crypto/marvell/hash.c |
662 | @@ -805,13 +805,14 @@ static int mv_cesa_md5_init(struct ahash_request *req) |
663 | struct mv_cesa_op_ctx tmpl = { }; |
664 | |
665 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); |
666 | + |
667 | + mv_cesa_ahash_init(req, &tmpl, true); |
668 | + |
669 | creq->state[0] = MD5_H0; |
670 | creq->state[1] = MD5_H1; |
671 | creq->state[2] = MD5_H2; |
672 | creq->state[3] = MD5_H3; |
673 | |
674 | - mv_cesa_ahash_init(req, &tmpl, true); |
675 | - |
676 | return 0; |
677 | } |
678 | |
679 | @@ -873,14 +874,15 @@ static int mv_cesa_sha1_init(struct ahash_request *req) |
680 | struct mv_cesa_op_ctx tmpl = { }; |
681 | |
682 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); |
683 | + |
684 | + mv_cesa_ahash_init(req, &tmpl, false); |
685 | + |
686 | creq->state[0] = SHA1_H0; |
687 | creq->state[1] = SHA1_H1; |
688 | creq->state[2] = SHA1_H2; |
689 | creq->state[3] = SHA1_H3; |
690 | creq->state[4] = SHA1_H4; |
691 | |
692 | - mv_cesa_ahash_init(req, &tmpl, false); |
693 | - |
694 | return 0; |
695 | } |
696 | |
697 | @@ -942,6 +944,9 @@ static int mv_cesa_sha256_init(struct ahash_request *req) |
698 | struct mv_cesa_op_ctx tmpl = { }; |
699 | |
700 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); |
701 | + |
702 | + mv_cesa_ahash_init(req, &tmpl, false); |
703 | + |
704 | creq->state[0] = SHA256_H0; |
705 | creq->state[1] = SHA256_H1; |
706 | creq->state[2] = SHA256_H2; |
707 | @@ -951,8 +956,6 @@ static int mv_cesa_sha256_init(struct ahash_request *req) |
708 | creq->state[6] = SHA256_H6; |
709 | creq->state[7] = SHA256_H7; |
710 | |
711 | - mv_cesa_ahash_init(req, &tmpl, false); |
712 | - |
713 | return 0; |
714 | } |
715 | |
716 | diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c |
717 | index 2bf37e68ad0f..dd184b50e5b4 100644 |
718 | --- a/drivers/dma/ipu/ipu_irq.c |
719 | +++ b/drivers/dma/ipu/ipu_irq.c |
720 | @@ -286,22 +286,21 @@ static void ipu_irq_handler(struct irq_desc *desc) |
721 | raw_spin_unlock(&bank_lock); |
722 | while ((line = ffs(status))) { |
723 | struct ipu_irq_map *map; |
724 | - unsigned int irq = NO_IRQ; |
725 | + unsigned int irq; |
726 | |
727 | line--; |
728 | status &= ~(1UL << line); |
729 | |
730 | raw_spin_lock(&bank_lock); |
731 | map = src2map(32 * i + line); |
732 | - if (map) |
733 | - irq = map->irq; |
734 | - raw_spin_unlock(&bank_lock); |
735 | - |
736 | if (!map) { |
737 | + raw_spin_unlock(&bank_lock); |
738 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", |
739 | line, i); |
740 | continue; |
741 | } |
742 | + irq = map->irq; |
743 | + raw_spin_unlock(&bank_lock); |
744 | generic_handle_irq(irq); |
745 | } |
746 | } |
747 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c |
748 | index 17e13621fae9..4e71a680e91b 100644 |
749 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c |
750 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c |
751 | @@ -43,6 +43,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) |
752 | ctx->rings[i].sequence = 1; |
753 | ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; |
754 | } |
755 | + |
756 | + ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); |
757 | + |
758 | /* create context entity for each ring */ |
759 | for (i = 0; i < adev->num_rings; i++) { |
760 | struct amdgpu_ring *ring = adev->rings[i]; |
761 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c |
762 | index fe36caf1b7d7..14f57d9915e3 100644 |
763 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c |
764 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c |
765 | @@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, |
766 | printk("\n"); |
767 | } |
768 | |
769 | + |
770 | u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) |
771 | { |
772 | struct drm_device *dev = adev->ddev; |
773 | struct drm_crtc *crtc; |
774 | struct amdgpu_crtc *amdgpu_crtc; |
775 | - u32 line_time_us, vblank_lines; |
776 | + u32 vblank_in_pixels; |
777 | u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ |
778 | |
779 | if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { |
780 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
781 | amdgpu_crtc = to_amdgpu_crtc(crtc); |
782 | if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { |
783 | - line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / |
784 | - amdgpu_crtc->hw_mode.clock; |
785 | - vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - |
786 | + vblank_in_pixels = |
787 | + amdgpu_crtc->hw_mode.crtc_htotal * |
788 | + (amdgpu_crtc->hw_mode.crtc_vblank_end - |
789 | amdgpu_crtc->hw_mode.crtc_vdisplay + |
790 | - (amdgpu_crtc->v_border * 2); |
791 | - vblank_time_us = vblank_lines * line_time_us; |
792 | + (amdgpu_crtc->v_border * 2)); |
793 | + |
794 | + vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; |
795 | break; |
796 | } |
797 | } |
798 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |
799 | index d942654a1de0..e24a8af72d90 100644 |
800 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |
801 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |
802 | @@ -292,7 +292,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file |
803 | type = AMD_IP_BLOCK_TYPE_UVD; |
804 | ring_mask = adev->uvd.ring.ready ? 1 : 0; |
805 | ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; |
806 | - ib_size_alignment = 8; |
807 | + ib_size_alignment = 16; |
808 | break; |
809 | case AMDGPU_HW_IP_VCE: |
810 | type = AMD_IP_BLOCK_TYPE_VCE; |
811 | diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c |
812 | index c1b04e9aab57..172bed946287 100644 |
813 | --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c |
814 | +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c |
815 | @@ -425,16 +425,6 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) |
816 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
817 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
818 | |
819 | - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
820 | - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
821 | - /* don't try to enable hpd on eDP or LVDS avoid breaking the |
822 | - * aux dp channel on imac and help (but not completely fix) |
823 | - * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
824 | - * also avoid interrupt storms during dpms. |
825 | - */ |
826 | - continue; |
827 | - } |
828 | - |
829 | switch (amdgpu_connector->hpd.hpd) { |
830 | case AMDGPU_HPD_1: |
831 | idx = 0; |
832 | @@ -458,6 +448,19 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) |
833 | continue; |
834 | } |
835 | |
836 | + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
837 | + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
838 | + /* don't try to enable hpd on eDP or LVDS avoid breaking the |
839 | + * aux dp channel on imac and help (but not completely fix) |
840 | + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
841 | + * also avoid interrupt storms during dpms. |
842 | + */ |
843 | + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); |
844 | + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); |
845 | + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); |
846 | + continue; |
847 | + } |
848 | + |
849 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); |
850 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); |
851 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); |
852 | diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c |
853 | index d4bf133908b1..67c7c05a751c 100644 |
854 | --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c |
855 | +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c |
856 | @@ -443,16 +443,6 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) |
857 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
858 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
859 | |
860 | - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
861 | - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
862 | - /* don't try to enable hpd on eDP or LVDS avoid breaking the |
863 | - * aux dp channel on imac and help (but not completely fix) |
864 | - * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
865 | - * also avoid interrupt storms during dpms. |
866 | - */ |
867 | - continue; |
868 | - } |
869 | - |
870 | switch (amdgpu_connector->hpd.hpd) { |
871 | case AMDGPU_HPD_1: |
872 | idx = 0; |
873 | @@ -476,6 +466,19 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) |
874 | continue; |
875 | } |
876 | |
877 | + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
878 | + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
879 | + /* don't try to enable hpd on eDP or LVDS avoid breaking the |
880 | + * aux dp channel on imac and help (but not completely fix) |
881 | + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
882 | + * also avoid interrupt storms during dpms. |
883 | + */ |
884 | + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); |
885 | + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); |
886 | + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); |
887 | + continue; |
888 | + } |
889 | + |
890 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); |
891 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); |
892 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); |
893 | @@ -3109,6 +3112,7 @@ static int dce_v11_0_sw_fini(void *handle) |
894 | |
895 | dce_v11_0_afmt_fini(adev); |
896 | |
897 | + drm_mode_config_cleanup(adev->ddev); |
898 | adev->mode_info.mode_config_initialized = false; |
899 | |
900 | return 0; |
901 | diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c |
902 | index 4fdfab1e9200..ea07c50369b4 100644 |
903 | --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c |
904 | +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c |
905 | @@ -395,15 +395,6 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) |
906 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
907 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
908 | |
909 | - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
910 | - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
911 | - /* don't try to enable hpd on eDP or LVDS avoid breaking the |
912 | - * aux dp channel on imac and help (but not completely fix) |
913 | - * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
914 | - * also avoid interrupt storms during dpms. |
915 | - */ |
916 | - continue; |
917 | - } |
918 | switch (amdgpu_connector->hpd.hpd) { |
919 | case AMDGPU_HPD_1: |
920 | WREG32(mmDC_HPD1_CONTROL, tmp); |
921 | @@ -426,6 +417,45 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) |
922 | default: |
923 | break; |
924 | } |
925 | + |
926 | + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
927 | + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
928 | + /* don't try to enable hpd on eDP or LVDS avoid breaking the |
929 | + * aux dp channel on imac and help (but not completely fix) |
930 | + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
931 | + * also avoid interrupt storms during dpms. |
932 | + */ |
933 | + u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; |
934 | + |
935 | + switch (amdgpu_connector->hpd.hpd) { |
936 | + case AMDGPU_HPD_1: |
937 | + dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL; |
938 | + break; |
939 | + case AMDGPU_HPD_2: |
940 | + dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL; |
941 | + break; |
942 | + case AMDGPU_HPD_3: |
943 | + dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL; |
944 | + break; |
945 | + case AMDGPU_HPD_4: |
946 | + dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL; |
947 | + break; |
948 | + case AMDGPU_HPD_5: |
949 | + dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL; |
950 | + break; |
951 | + case AMDGPU_HPD_6: |
952 | + dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL; |
953 | + break; |
954 | + default: |
955 | + continue; |
956 | + } |
957 | + |
958 | + dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); |
959 | + dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; |
960 | + WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); |
961 | + continue; |
962 | + } |
963 | + |
964 | dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); |
965 | amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); |
966 | } |
967 | diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c |
968 | index 635fc4b48184..92b117843875 100644 |
969 | --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c |
970 | +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c |
971 | @@ -262,6 +262,8 @@ static const pem_event_action * const display_config_change_event[] = { |
972 | unblock_adjust_power_state_tasks, |
973 | set_cpu_power_state, |
974 | notify_hw_power_source_tasks, |
975 | + get_2d_performance_state_tasks, |
976 | + set_performance_state_tasks, |
977 | /* updateDALConfigurationTasks, |
978 | variBrightDisplayConfigurationChangeTasks, */ |
979 | adjust_power_state_tasks, |
980 | diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c |
981 | index a46225c0fc01..d6bee727497c 100644 |
982 | --- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c |
983 | +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c |
984 | @@ -100,11 +100,12 @@ int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip) |
985 | if (requested == NULL) |
986 | return 0; |
987 | |
988 | + phm_apply_state_adjust_rules(hwmgr, requested, pcurrent); |
989 | + |
990 | if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal))) |
991 | equal = false; |
992 | |
993 | if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) { |
994 | - phm_apply_state_adjust_rules(hwmgr, requested, pcurrent); |
995 | phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware); |
996 | hwmgr->current_ps = requested; |
997 | } |
998 | diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c |
999 | index 780589b420a4..9c4387d79d11 100644 |
1000 | --- a/drivers/gpu/drm/drm_prime.c |
1001 | +++ b/drivers/gpu/drm/drm_prime.c |
1002 | @@ -335,14 +335,17 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { |
1003 | * using the PRIME helpers. |
1004 | */ |
1005 | struct dma_buf *drm_gem_prime_export(struct drm_device *dev, |
1006 | - struct drm_gem_object *obj, int flags) |
1007 | + struct drm_gem_object *obj, |
1008 | + int flags) |
1009 | { |
1010 | - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
1011 | - |
1012 | - exp_info.ops = &drm_gem_prime_dmabuf_ops; |
1013 | - exp_info.size = obj->size; |
1014 | - exp_info.flags = flags; |
1015 | - exp_info.priv = obj; |
1016 | + struct dma_buf_export_info exp_info = { |
1017 | + .exp_name = KBUILD_MODNAME, /* white lie for debug */ |
1018 | + .owner = dev->driver->fops->owner, |
1019 | + .ops = &drm_gem_prime_dmabuf_ops, |
1020 | + .size = obj->size, |
1021 | + .flags = flags, |
1022 | + .priv = obj, |
1023 | + }; |
1024 | |
1025 | if (dev->driver->gem_prime_res_obj) |
1026 | exp_info.resv = dev->driver->gem_prime_res_obj(obj); |
1027 | diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c |
1028 | index 7882387f9bff..5fc8ebdf40b2 100644 |
1029 | --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c |
1030 | +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c |
1031 | @@ -330,6 +330,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) |
1032 | const char *pix_clk_in_name; |
1033 | const struct of_device_id *id; |
1034 | int ret; |
1035 | + u8 div_ratio_shift = 0; |
1036 | |
1037 | fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL); |
1038 | if (!fsl_dev) |
1039 | @@ -382,11 +383,14 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) |
1040 | pix_clk_in = fsl_dev->clk; |
1041 | } |
1042 | |
1043 | + if (of_property_read_bool(dev->of_node, "big-endian")) |
1044 | + div_ratio_shift = 24; |
1045 | + |
1046 | pix_clk_in_name = __clk_get_name(pix_clk_in); |
1047 | snprintf(pix_clk_name, sizeof(pix_clk_name), "%s_pix", pix_clk_in_name); |
1048 | fsl_dev->pix_clk = clk_register_divider(dev, pix_clk_name, |
1049 | pix_clk_in_name, 0, base + DCU_DIV_RATIO, |
1050 | - 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL); |
1051 | + div_ratio_shift, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL); |
1052 | if (IS_ERR(fsl_dev->pix_clk)) { |
1053 | dev_err(dev, "failed to register pix clk\n"); |
1054 | ret = PTR_ERR(fsl_dev->pix_clk); |
1055 | diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h |
1056 | index f68c78918d63..84a00105871d 100644 |
1057 | --- a/drivers/gpu/drm/i915/i915_drv.h |
1058 | +++ b/drivers/gpu/drm/i915/i915_drv.h |
1059 | @@ -631,6 +631,8 @@ struct drm_i915_display_funcs { |
1060 | struct intel_crtc_state *crtc_state); |
1061 | void (*crtc_enable)(struct drm_crtc *crtc); |
1062 | void (*crtc_disable)(struct drm_crtc *crtc); |
1063 | + void (*update_crtcs)(struct drm_atomic_state *state, |
1064 | + unsigned int *crtc_vblank_mask); |
1065 | void (*audio_codec_enable)(struct drm_connector *connector, |
1066 | struct intel_encoder *encoder, |
1067 | const struct drm_display_mode *adjusted_mode); |
1068 | @@ -1965,11 +1967,11 @@ struct drm_i915_private { |
1069 | struct vlv_s0ix_state vlv_s0ix_state; |
1070 | |
1071 | enum { |
1072 | - I915_SKL_SAGV_UNKNOWN = 0, |
1073 | - I915_SKL_SAGV_DISABLED, |
1074 | - I915_SKL_SAGV_ENABLED, |
1075 | - I915_SKL_SAGV_NOT_CONTROLLED |
1076 | - } skl_sagv_status; |
1077 | + I915_SAGV_UNKNOWN = 0, |
1078 | + I915_SAGV_DISABLED, |
1079 | + I915_SAGV_ENABLED, |
1080 | + I915_SAGV_NOT_CONTROLLED |
1081 | + } sagv_status; |
1082 | |
1083 | struct { |
1084 | /* |
1085 | @@ -2280,21 +2282,19 @@ struct drm_i915_gem_object { |
1086 | /** Record of address bit 17 of each page at last unbind. */ |
1087 | unsigned long *bit_17; |
1088 | |
1089 | - union { |
1090 | - /** for phy allocated objects */ |
1091 | - struct drm_dma_handle *phys_handle; |
1092 | - |
1093 | - struct i915_gem_userptr { |
1094 | - uintptr_t ptr; |
1095 | - unsigned read_only :1; |
1096 | - unsigned workers :4; |
1097 | + struct i915_gem_userptr { |
1098 | + uintptr_t ptr; |
1099 | + unsigned read_only :1; |
1100 | + unsigned workers :4; |
1101 | #define I915_GEM_USERPTR_MAX_WORKERS 15 |
1102 | |
1103 | - struct i915_mm_struct *mm; |
1104 | - struct i915_mmu_object *mmu_object; |
1105 | - struct work_struct *work; |
1106 | - } userptr; |
1107 | - }; |
1108 | + struct i915_mm_struct *mm; |
1109 | + struct i915_mmu_object *mmu_object; |
1110 | + struct work_struct *work; |
1111 | + } userptr; |
1112 | + |
1113 | + /** for phys allocated objects */ |
1114 | + struct drm_dma_handle *phys_handle; |
1115 | }; |
1116 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) |
1117 | |
1118 | diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c |
1119 | index 66be299a1486..2bb69f3c5b84 100644 |
1120 | --- a/drivers/gpu/drm/i915/i915_gem_stolen.c |
1121 | +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c |
1122 | @@ -115,17 +115,28 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) |
1123 | |
1124 | base = bsm & INTEL_BSM_MASK; |
1125 | } else if (IS_I865G(dev)) { |
1126 | + u32 tseg_size = 0; |
1127 | u16 toud = 0; |
1128 | + u8 tmp; |
1129 | + |
1130 | + pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), |
1131 | + I845_ESMRAMC, &tmp); |
1132 | + |
1133 | + if (tmp & TSEG_ENABLE) { |
1134 | + switch (tmp & I845_TSEG_SIZE_MASK) { |
1135 | + case I845_TSEG_SIZE_512K: |
1136 | + tseg_size = KB(512); |
1137 | + break; |
1138 | + case I845_TSEG_SIZE_1M: |
1139 | + tseg_size = MB(1); |
1140 | + break; |
1141 | + } |
1142 | + } |
1143 | |
1144 | - /* |
1145 | - * FIXME is the graphics stolen memory region |
1146 | - * always at TOUD? Ie. is it always the last |
1147 | - * one to be allocated by the BIOS? |
1148 | - */ |
1149 | pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0), |
1150 | I865_TOUD, &toud); |
1151 | |
1152 | - base = toud << 16; |
1153 | + base = (toud << 16) + tseg_size; |
1154 | } else if (IS_I85X(dev)) { |
1155 | u32 tseg_size = 0; |
1156 | u32 tom; |
1157 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
1158 | index 175595fc3e45..e9a64fba6333 100644 |
1159 | --- a/drivers/gpu/drm/i915/intel_display.c |
1160 | +++ b/drivers/gpu/drm/i915/intel_display.c |
1161 | @@ -2980,6 +2980,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane, |
1162 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
1163 | struct drm_framebuffer *fb = plane_state->base.fb; |
1164 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
1165 | + const struct skl_wm_values *wm = &dev_priv->wm.skl_results; |
1166 | int pipe = intel_crtc->pipe; |
1167 | u32 plane_ctl, stride_div, stride; |
1168 | u32 tile_height, plane_offset, plane_size; |
1169 | @@ -3031,6 +3032,9 @@ static void skylake_update_primary_plane(struct drm_plane *plane, |
1170 | intel_crtc->adjusted_x = x_offset; |
1171 | intel_crtc->adjusted_y = y_offset; |
1172 | |
1173 | + if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) |
1174 | + skl_write_plane_wm(intel_crtc, wm, 0); |
1175 | + |
1176 | I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); |
1177 | I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset); |
1178 | I915_WRITE(PLANE_SIZE(pipe, 0), plane_size); |
1179 | @@ -3061,7 +3065,15 @@ static void skylake_disable_primary_plane(struct drm_plane *primary, |
1180 | { |
1181 | struct drm_device *dev = crtc->dev; |
1182 | struct drm_i915_private *dev_priv = to_i915(dev); |
1183 | - int pipe = to_intel_crtc(crtc)->pipe; |
1184 | + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1185 | + int pipe = intel_crtc->pipe; |
1186 | + |
1187 | + /* |
1188 | + * We only populate skl_results on watermark updates, and if the |
1189 | + * plane's visiblity isn't actually changing neither is its watermarks. |
1190 | + */ |
1191 | + if (!to_intel_plane_state(crtc->primary->state)->visible) |
1192 | + skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0); |
1193 | |
1194 | I915_WRITE(PLANE_CTL(pipe, 0), 0); |
1195 | I915_WRITE(PLANE_SURF(pipe, 0), 0); |
1196 | @@ -8995,6 +9007,24 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, |
1197 | if (intel_crtc_has_dp_encoder(crtc_state)) |
1198 | dpll |= DPLL_SDVO_HIGH_SPEED; |
1199 | |
1200 | + /* |
1201 | + * The high speed IO clock is only really required for |
1202 | + * SDVO/HDMI/DP, but we also enable it for CRT to make it |
1203 | + * possible to share the DPLL between CRT and HDMI. Enabling |
1204 | + * the clock needlessly does no real harm, except use up a |
1205 | + * bit of power potentially. |
1206 | + * |
1207 | + * We'll limit this to IVB with 3 pipes, since it has only two |
1208 | + * DPLLs and so DPLL sharing is the only way to get three pipes |
1209 | + * driving PCH ports at the same time. On SNB we could do this, |
1210 | + * and potentially avoid enabling the second DPLL, but it's not |
1211 | + * clear if it''s a win or loss power wise. No point in doing |
1212 | + * this on ILK at all since it has a fixed DPLL<->pipe mapping. |
1213 | + */ |
1214 | + if (INTEL_INFO(dev_priv)->num_pipes == 3 && |
1215 | + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) |
1216 | + dpll |= DPLL_SDVO_HIGH_SPEED; |
1217 | + |
1218 | /* compute bitmask from p1 value */ |
1219 | dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
1220 | /* also FPA1 */ |
1221 | @@ -10306,9 +10336,13 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, |
1222 | struct drm_device *dev = crtc->dev; |
1223 | struct drm_i915_private *dev_priv = to_i915(dev); |
1224 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1225 | + const struct skl_wm_values *wm = &dev_priv->wm.skl_results; |
1226 | int pipe = intel_crtc->pipe; |
1227 | uint32_t cntl = 0; |
1228 | |
1229 | + if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc)) |
1230 | + skl_write_cursor_wm(intel_crtc, wm); |
1231 | + |
1232 | if (plane_state && plane_state->visible) { |
1233 | cntl = MCURSOR_GAMMA_ENABLE; |
1234 | switch (plane_state->base.crtc_w) { |
1235 | @@ -12956,16 +12990,23 @@ static void verify_wm_state(struct drm_crtc *crtc, |
1236 | hw_entry->start, hw_entry->end); |
1237 | } |
1238 | |
1239 | - /* cursor */ |
1240 | - hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; |
1241 | - sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; |
1242 | - |
1243 | - if (!skl_ddb_entry_equal(hw_entry, sw_entry)) { |
1244 | - DRM_ERROR("mismatch in DDB state pipe %c cursor " |
1245 | - "(expected (%u,%u), found (%u,%u))\n", |
1246 | - pipe_name(pipe), |
1247 | - sw_entry->start, sw_entry->end, |
1248 | - hw_entry->start, hw_entry->end); |
1249 | + /* |
1250 | + * cursor |
1251 | + * If the cursor plane isn't active, we may not have updated it's ddb |
1252 | + * allocation. In that case since the ddb allocation will be updated |
1253 | + * once the plane becomes visible, we can skip this check |
1254 | + */ |
1255 | + if (intel_crtc->cursor_addr) { |
1256 | + hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; |
1257 | + sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; |
1258 | + |
1259 | + if (!skl_ddb_entry_equal(hw_entry, sw_entry)) { |
1260 | + DRM_ERROR("mismatch in DDB state pipe %c cursor " |
1261 | + "(expected (%u,%u), found (%u,%u))\n", |
1262 | + pipe_name(pipe), |
1263 | + sw_entry->start, sw_entry->end, |
1264 | + hw_entry->start, hw_entry->end); |
1265 | + } |
1266 | } |
1267 | } |
1268 | |
1269 | @@ -13671,6 +13712,111 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) |
1270 | return false; |
1271 | } |
1272 | |
1273 | +static void intel_update_crtc(struct drm_crtc *crtc, |
1274 | + struct drm_atomic_state *state, |
1275 | + struct drm_crtc_state *old_crtc_state, |
1276 | + unsigned int *crtc_vblank_mask) |
1277 | +{ |
1278 | + struct drm_device *dev = crtc->dev; |
1279 | + struct drm_i915_private *dev_priv = to_i915(dev); |
1280 | + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1281 | + struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state); |
1282 | + bool modeset = needs_modeset(crtc->state); |
1283 | + |
1284 | + if (modeset) { |
1285 | + update_scanline_offset(intel_crtc); |
1286 | + dev_priv->display.crtc_enable(crtc); |
1287 | + } else { |
1288 | + intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); |
1289 | + } |
1290 | + |
1291 | + if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { |
1292 | + intel_fbc_enable( |
1293 | + intel_crtc, pipe_config, |
1294 | + to_intel_plane_state(crtc->primary->state)); |
1295 | + } |
1296 | + |
1297 | + drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); |
1298 | + |
1299 | + if (needs_vblank_wait(pipe_config)) |
1300 | + *crtc_vblank_mask |= drm_crtc_mask(crtc); |
1301 | +} |
1302 | + |
1303 | +static void intel_update_crtcs(struct drm_atomic_state *state, |
1304 | + unsigned int *crtc_vblank_mask) |
1305 | +{ |
1306 | + struct drm_crtc *crtc; |
1307 | + struct drm_crtc_state *old_crtc_state; |
1308 | + int i; |
1309 | + |
1310 | + for_each_crtc_in_state(state, crtc, old_crtc_state, i) { |
1311 | + if (!crtc->state->active) |
1312 | + continue; |
1313 | + |
1314 | + intel_update_crtc(crtc, state, old_crtc_state, |
1315 | + crtc_vblank_mask); |
1316 | + } |
1317 | +} |
1318 | + |
1319 | +static void skl_update_crtcs(struct drm_atomic_state *state, |
1320 | + unsigned int *crtc_vblank_mask) |
1321 | +{ |
1322 | + struct drm_device *dev = state->dev; |
1323 | + struct drm_i915_private *dev_priv = to_i915(dev); |
1324 | + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); |
1325 | + struct drm_crtc *crtc; |
1326 | + struct drm_crtc_state *old_crtc_state; |
1327 | + struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; |
1328 | + struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; |
1329 | + unsigned int updated = 0; |
1330 | + bool progress; |
1331 | + enum pipe pipe; |
1332 | + |
1333 | + /* |
1334 | + * Whenever the number of active pipes changes, we need to make sure we |
1335 | + * update the pipes in the right order so that their ddb allocations |
1336 | + * never overlap with eachother inbetween CRTC updates. Otherwise we'll |
1337 | + * cause pipe underruns and other bad stuff. |
1338 | + */ |
1339 | + do { |
1340 | + int i; |
1341 | + progress = false; |
1342 | + |
1343 | + for_each_crtc_in_state(state, crtc, old_crtc_state, i) { |
1344 | + bool vbl_wait = false; |
1345 | + unsigned int cmask = drm_crtc_mask(crtc); |
1346 | + pipe = to_intel_crtc(crtc)->pipe; |
1347 | + |
1348 | + if (updated & cmask || !crtc->state->active) |
1349 | + continue; |
1350 | + if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb, |
1351 | + pipe)) |
1352 | + continue; |
1353 | + |
1354 | + updated |= cmask; |
1355 | + |
1356 | + /* |
1357 | + * If this is an already active pipe, it's DDB changed, |
1358 | + * and this isn't the last pipe that needs updating |
1359 | + * then we need to wait for a vblank to pass for the |
1360 | + * new ddb allocation to take effect. |
1361 | + */ |
1362 | + if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) && |
1363 | + !crtc->state->active_changed && |
1364 | + intel_state->wm_results.dirty_pipes != updated) |
1365 | + vbl_wait = true; |
1366 | + |
1367 | + intel_update_crtc(crtc, state, old_crtc_state, |
1368 | + crtc_vblank_mask); |
1369 | + |
1370 | + if (vbl_wait) |
1371 | + intel_wait_for_vblank(dev, pipe); |
1372 | + |
1373 | + progress = true; |
1374 | + } |
1375 | + } while (progress); |
1376 | +} |
1377 | + |
1378 | static void intel_atomic_commit_tail(struct drm_atomic_state *state) |
1379 | { |
1380 | struct drm_device *dev = state->dev; |
1381 | @@ -13763,23 +13909,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) |
1382 | * SKL workaround: bspec recommends we disable the SAGV when we |
1383 | * have more then one pipe enabled |
1384 | */ |
1385 | - if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state)) |
1386 | - skl_disable_sagv(dev_priv); |
1387 | + if (!intel_can_enable_sagv(state)) |
1388 | + intel_disable_sagv(dev_priv); |
1389 | |
1390 | intel_modeset_verify_disabled(dev); |
1391 | } |
1392 | |
1393 | - /* Now enable the clocks, plane, pipe, and connectors that we set up. */ |
1394 | + /* Complete the events for pipes that have now been disabled */ |
1395 | for_each_crtc_in_state(state, crtc, old_crtc_state, i) { |
1396 | - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1397 | bool modeset = needs_modeset(crtc->state); |
1398 | - struct intel_crtc_state *pipe_config = |
1399 | - to_intel_crtc_state(crtc->state); |
1400 | - |
1401 | - if (modeset && crtc->state->active) { |
1402 | - update_scanline_offset(to_intel_crtc(crtc)); |
1403 | - dev_priv->display.crtc_enable(crtc); |
1404 | - } |
1405 | |
1406 | /* Complete events for now disable pipes here. */ |
1407 | if (modeset && !crtc->state->active && crtc->state->event) { |
1408 | @@ -13789,21 +13927,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) |
1409 | |
1410 | crtc->state->event = NULL; |
1411 | } |
1412 | - |
1413 | - if (!modeset) |
1414 | - intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); |
1415 | - |
1416 | - if (crtc->state->active && |
1417 | - drm_atomic_get_existing_plane_state(state, crtc->primary)) |
1418 | - intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state)); |
1419 | - |
1420 | - if (crtc->state->active) |
1421 | - drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); |
1422 | - |
1423 | - if (pipe_config->base.active && needs_vblank_wait(pipe_config)) |
1424 | - crtc_vblank_mask |= 1 << i; |
1425 | } |
1426 | |
1427 | + /* Now enable the clocks, plane, pipe, and connectors that we set up. */ |
1428 | + dev_priv->display.update_crtcs(state, &crtc_vblank_mask); |
1429 | + |
1430 | /* FIXME: We should call drm_atomic_helper_commit_hw_done() here |
1431 | * already, but still need the state for the delayed optimization. To |
1432 | * fix this: |
1433 | @@ -13839,9 +13967,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) |
1434 | intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); |
1435 | } |
1436 | |
1437 | - if (IS_SKYLAKE(dev_priv) && intel_state->modeset && |
1438 | - skl_can_enable_sagv(state)) |
1439 | - skl_enable_sagv(dev_priv); |
1440 | + if (intel_state->modeset && intel_can_enable_sagv(state)) |
1441 | + intel_enable_sagv(dev_priv); |
1442 | |
1443 | drm_atomic_helper_commit_hw_done(state); |
1444 | |
1445 | @@ -14221,10 +14348,12 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, |
1446 | struct drm_crtc_state *old_crtc_state) |
1447 | { |
1448 | struct drm_device *dev = crtc->dev; |
1449 | + struct drm_i915_private *dev_priv = to_i915(dev); |
1450 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1451 | struct intel_crtc_state *old_intel_state = |
1452 | to_intel_crtc_state(old_crtc_state); |
1453 | bool modeset = needs_modeset(crtc->state); |
1454 | + enum pipe pipe = intel_crtc->pipe; |
1455 | |
1456 | /* Perform vblank evasion around commit operation */ |
1457 | intel_pipe_update_start(intel_crtc); |
1458 | @@ -14239,8 +14368,12 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, |
1459 | |
1460 | if (to_intel_crtc_state(crtc->state)->update_pipe) |
1461 | intel_update_pipe_config(intel_crtc, old_intel_state); |
1462 | - else if (INTEL_INFO(dev)->gen >= 9) |
1463 | + else if (INTEL_GEN(dev_priv) >= 9) { |
1464 | skl_detach_scalers(intel_crtc); |
1465 | + |
1466 | + I915_WRITE(PIPE_WM_LINETIME(pipe), |
1467 | + dev_priv->wm.skl_hw.wm_linetime[pipe]); |
1468 | + } |
1469 | } |
1470 | |
1471 | static void intel_finish_crtc_commit(struct drm_crtc *crtc, |
1472 | @@ -15347,6 +15480,11 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) |
1473 | skl_modeset_calc_cdclk; |
1474 | } |
1475 | |
1476 | + if (dev_priv->info.gen >= 9) |
1477 | + dev_priv->display.update_crtcs = skl_update_crtcs; |
1478 | + else |
1479 | + dev_priv->display.update_crtcs = intel_update_crtcs; |
1480 | + |
1481 | switch (INTEL_INFO(dev_priv)->gen) { |
1482 | case 2: |
1483 | dev_priv->display.queue_flip = intel_gen2_queue_flip; |
1484 | diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c |
1485 | index 21b04c3eda41..1ca155f4d368 100644 |
1486 | --- a/drivers/gpu/drm/i915/intel_dp.c |
1487 | +++ b/drivers/gpu/drm/i915/intel_dp.c |
1488 | @@ -4148,7 +4148,7 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv, |
1489 | * |
1490 | * Return %true if @port is connected, %false otherwise. |
1491 | */ |
1492 | -bool intel_digital_port_connected(struct drm_i915_private *dev_priv, |
1493 | +static bool intel_digital_port_connected(struct drm_i915_private *dev_priv, |
1494 | struct intel_digital_port *port) |
1495 | { |
1496 | if (HAS_PCH_IBX(dev_priv)) |
1497 | @@ -4207,7 +4207,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) |
1498 | intel_dp->has_audio = false; |
1499 | } |
1500 | |
1501 | -static void |
1502 | +static enum drm_connector_status |
1503 | intel_dp_long_pulse(struct intel_connector *intel_connector) |
1504 | { |
1505 | struct drm_connector *connector = &intel_connector->base; |
1506 | @@ -4232,7 +4232,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) |
1507 | else |
1508 | status = connector_status_disconnected; |
1509 | |
1510 | - if (status != connector_status_connected) { |
1511 | + if (status == connector_status_disconnected) { |
1512 | intel_dp->compliance_test_active = 0; |
1513 | intel_dp->compliance_test_type = 0; |
1514 | intel_dp->compliance_test_data = 0; |
1515 | @@ -4284,8 +4284,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) |
1516 | intel_dp->aux.i2c_defer_count = 0; |
1517 | |
1518 | intel_dp_set_edid(intel_dp); |
1519 | - |
1520 | - status = connector_status_connected; |
1521 | + if (is_edp(intel_dp) || intel_connector->detect_edid) |
1522 | + status = connector_status_connected; |
1523 | intel_dp->detect_done = true; |
1524 | |
1525 | /* Try to read the source of the interrupt */ |
1526 | @@ -4303,12 +4303,11 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) |
1527 | } |
1528 | |
1529 | out: |
1530 | - if ((status != connector_status_connected) && |
1531 | - (intel_dp->is_mst == false)) |
1532 | + if (status != connector_status_connected && !intel_dp->is_mst) |
1533 | intel_dp_unset_edid(intel_dp); |
1534 | |
1535 | intel_display_power_put(to_i915(dev), power_domain); |
1536 | - return; |
1537 | + return status; |
1538 | } |
1539 | |
1540 | static enum drm_connector_status |
1541 | @@ -4317,7 +4316,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) |
1542 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
1543 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1544 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
1545 | - struct intel_connector *intel_connector = to_intel_connector(connector); |
1546 | + enum drm_connector_status status = connector->status; |
1547 | |
1548 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
1549 | connector->base.id, connector->name); |
1550 | @@ -4332,14 +4331,11 @@ intel_dp_detect(struct drm_connector *connector, bool force) |
1551 | |
1552 | /* If full detect is not performed yet, do a full detect */ |
1553 | if (!intel_dp->detect_done) |
1554 | - intel_dp_long_pulse(intel_dp->attached_connector); |
1555 | + status = intel_dp_long_pulse(intel_dp->attached_connector); |
1556 | |
1557 | intel_dp->detect_done = false; |
1558 | |
1559 | - if (is_edp(intel_dp) || intel_connector->detect_edid) |
1560 | - return connector_status_connected; |
1561 | - else |
1562 | - return connector_status_disconnected; |
1563 | + return status; |
1564 | } |
1565 | |
1566 | static void |
1567 | @@ -4696,36 +4692,34 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) |
1568 | port_name(intel_dig_port->port), |
1569 | long_hpd ? "long" : "short"); |
1570 | |
1571 | + if (long_hpd) { |
1572 | + intel_dp->detect_done = false; |
1573 | + return IRQ_NONE; |
1574 | + } |
1575 | + |
1576 | power_domain = intel_display_port_aux_power_domain(intel_encoder); |
1577 | intel_display_power_get(dev_priv, power_domain); |
1578 | |
1579 | - if (long_hpd) { |
1580 | - intel_dp_long_pulse(intel_dp->attached_connector); |
1581 | - if (intel_dp->is_mst) |
1582 | - ret = IRQ_HANDLED; |
1583 | - goto put_power; |
1584 | - |
1585 | - } else { |
1586 | - if (intel_dp->is_mst) { |
1587 | - if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { |
1588 | - /* |
1589 | - * If we were in MST mode, and device is not |
1590 | - * there, get out of MST mode |
1591 | - */ |
1592 | - DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", |
1593 | - intel_dp->is_mst, intel_dp->mst_mgr.mst_state); |
1594 | - intel_dp->is_mst = false; |
1595 | - drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, |
1596 | - intel_dp->is_mst); |
1597 | - goto put_power; |
1598 | - } |
1599 | + if (intel_dp->is_mst) { |
1600 | + if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { |
1601 | + /* |
1602 | + * If we were in MST mode, and device is not |
1603 | + * there, get out of MST mode |
1604 | + */ |
1605 | + DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", |
1606 | + intel_dp->is_mst, intel_dp->mst_mgr.mst_state); |
1607 | + intel_dp->is_mst = false; |
1608 | + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, |
1609 | + intel_dp->is_mst); |
1610 | + intel_dp->detect_done = false; |
1611 | + goto put_power; |
1612 | } |
1613 | + } |
1614 | |
1615 | - if (!intel_dp->is_mst) { |
1616 | - if (!intel_dp_short_pulse(intel_dp)) { |
1617 | - intel_dp_long_pulse(intel_dp->attached_connector); |
1618 | - goto put_power; |
1619 | - } |
1620 | + if (!intel_dp->is_mst) { |
1621 | + if (!intel_dp_short_pulse(intel_dp)) { |
1622 | + intel_dp->detect_done = false; |
1623 | + goto put_power; |
1624 | } |
1625 | } |
1626 | |
1627 | diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h |
1628 | index ff399b9a5c1f..9a58800cba3b 100644 |
1629 | --- a/drivers/gpu/drm/i915/intel_drv.h |
1630 | +++ b/drivers/gpu/drm/i915/intel_drv.h |
1631 | @@ -236,6 +236,7 @@ struct intel_panel { |
1632 | bool enabled; |
1633 | bool combination_mode; /* gen 2/4 only */ |
1634 | bool active_low_pwm; |
1635 | + bool alternate_pwm_increment; /* lpt+ */ |
1636 | |
1637 | /* PWM chip */ |
1638 | bool util_pin_active_low; /* bxt+ */ |
1639 | @@ -1387,8 +1388,6 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp); |
1640 | void intel_edp_drrs_invalidate(struct drm_device *dev, |
1641 | unsigned frontbuffer_bits); |
1642 | void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits); |
1643 | -bool intel_digital_port_connected(struct drm_i915_private *dev_priv, |
1644 | - struct intel_digital_port *port); |
1645 | |
1646 | void |
1647 | intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, |
1648 | @@ -1716,9 +1715,21 @@ void ilk_wm_get_hw_state(struct drm_device *dev); |
1649 | void skl_wm_get_hw_state(struct drm_device *dev); |
1650 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, |
1651 | struct skl_ddb_allocation *ddb /* out */); |
1652 | -bool skl_can_enable_sagv(struct drm_atomic_state *state); |
1653 | -int skl_enable_sagv(struct drm_i915_private *dev_priv); |
1654 | -int skl_disable_sagv(struct drm_i915_private *dev_priv); |
1655 | +bool intel_can_enable_sagv(struct drm_atomic_state *state); |
1656 | +int intel_enable_sagv(struct drm_i915_private *dev_priv); |
1657 | +int intel_disable_sagv(struct drm_i915_private *dev_priv); |
1658 | +bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old, |
1659 | + const struct skl_ddb_allocation *new, |
1660 | + enum pipe pipe); |
1661 | +bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state, |
1662 | + const struct skl_ddb_allocation *old, |
1663 | + const struct skl_ddb_allocation *new, |
1664 | + enum pipe pipe); |
1665 | +void skl_write_cursor_wm(struct intel_crtc *intel_crtc, |
1666 | + const struct skl_wm_values *wm); |
1667 | +void skl_write_plane_wm(struct intel_crtc *intel_crtc, |
1668 | + const struct skl_wm_values *wm, |
1669 | + int plane); |
1670 | uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); |
1671 | bool ilk_disable_lp_wm(struct drm_device *dev); |
1672 | int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); |
1673 | diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c |
1674 | index 4df9f384910c..c3aa9e670d15 100644 |
1675 | --- a/drivers/gpu/drm/i915/intel_hdmi.c |
1676 | +++ b/drivers/gpu/drm/i915/intel_hdmi.c |
1677 | @@ -1422,24 +1422,22 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid) |
1678 | } |
1679 | |
1680 | static bool |
1681 | -intel_hdmi_set_edid(struct drm_connector *connector, bool force) |
1682 | +intel_hdmi_set_edid(struct drm_connector *connector) |
1683 | { |
1684 | struct drm_i915_private *dev_priv = to_i915(connector->dev); |
1685 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
1686 | - struct edid *edid = NULL; |
1687 | + struct edid *edid; |
1688 | bool connected = false; |
1689 | |
1690 | - if (force) { |
1691 | - intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); |
1692 | + intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); |
1693 | |
1694 | - edid = drm_get_edid(connector, |
1695 | - intel_gmbus_get_adapter(dev_priv, |
1696 | - intel_hdmi->ddc_bus)); |
1697 | + edid = drm_get_edid(connector, |
1698 | + intel_gmbus_get_adapter(dev_priv, |
1699 | + intel_hdmi->ddc_bus)); |
1700 | |
1701 | - intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); |
1702 | + intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); |
1703 | |
1704 | - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); |
1705 | - } |
1706 | + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); |
1707 | |
1708 | to_intel_connector(connector)->detect_edid = edid; |
1709 | if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { |
1710 | @@ -1465,37 +1463,16 @@ static enum drm_connector_status |
1711 | intel_hdmi_detect(struct drm_connector *connector, bool force) |
1712 | { |
1713 | enum drm_connector_status status; |
1714 | - struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
1715 | struct drm_i915_private *dev_priv = to_i915(connector->dev); |
1716 | - bool live_status = false; |
1717 | - unsigned int try; |
1718 | |
1719 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
1720 | connector->base.id, connector->name); |
1721 | |
1722 | intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); |
1723 | |
1724 | - for (try = 0; !live_status && try < 9; try++) { |
1725 | - if (try) |
1726 | - msleep(10); |
1727 | - live_status = intel_digital_port_connected(dev_priv, |
1728 | - hdmi_to_dig_port(intel_hdmi)); |
1729 | - } |
1730 | - |
1731 | - if (!live_status) { |
1732 | - DRM_DEBUG_KMS("HDMI live status down\n"); |
1733 | - /* |
1734 | - * Live status register is not reliable on all intel platforms. |
1735 | - * So consider live_status only for certain platforms, for |
1736 | - * others, read EDID to determine presence of sink. |
1737 | - */ |
1738 | - if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv)) |
1739 | - live_status = true; |
1740 | - } |
1741 | - |
1742 | intel_hdmi_unset_edid(connector); |
1743 | |
1744 | - if (intel_hdmi_set_edid(connector, live_status)) { |
1745 | + if (intel_hdmi_set_edid(connector)) { |
1746 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
1747 | |
1748 | hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI; |
1749 | @@ -1521,7 +1498,7 @@ intel_hdmi_force(struct drm_connector *connector) |
1750 | if (connector->status != connector_status_connected) |
1751 | return; |
1752 | |
1753 | - intel_hdmi_set_edid(connector, true); |
1754 | + intel_hdmi_set_edid(connector); |
1755 | hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI; |
1756 | } |
1757 | |
1758 | diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c |
1759 | index 96c65d77e886..9a2393a6b277 100644 |
1760 | --- a/drivers/gpu/drm/i915/intel_panel.c |
1761 | +++ b/drivers/gpu/drm/i915/intel_panel.c |
1762 | @@ -841,7 +841,7 @@ static void lpt_enable_backlight(struct intel_connector *connector) |
1763 | { |
1764 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1765 | struct intel_panel *panel = &connector->panel; |
1766 | - u32 pch_ctl1, pch_ctl2; |
1767 | + u32 pch_ctl1, pch_ctl2, schicken; |
1768 | |
1769 | pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); |
1770 | if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { |
1771 | @@ -850,6 +850,22 @@ static void lpt_enable_backlight(struct intel_connector *connector) |
1772 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); |
1773 | } |
1774 | |
1775 | + if (HAS_PCH_LPT(dev_priv)) { |
1776 | + schicken = I915_READ(SOUTH_CHICKEN2); |
1777 | + if (panel->backlight.alternate_pwm_increment) |
1778 | + schicken |= LPT_PWM_GRANULARITY; |
1779 | + else |
1780 | + schicken &= ~LPT_PWM_GRANULARITY; |
1781 | + I915_WRITE(SOUTH_CHICKEN2, schicken); |
1782 | + } else { |
1783 | + schicken = I915_READ(SOUTH_CHICKEN1); |
1784 | + if (panel->backlight.alternate_pwm_increment) |
1785 | + schicken |= SPT_PWM_GRANULARITY; |
1786 | + else |
1787 | + schicken &= ~SPT_PWM_GRANULARITY; |
1788 | + I915_WRITE(SOUTH_CHICKEN1, schicken); |
1789 | + } |
1790 | + |
1791 | pch_ctl2 = panel->backlight.max << 16; |
1792 | I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2); |
1793 | |
1794 | @@ -1242,10 +1258,10 @@ static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) |
1795 | */ |
1796 | static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) |
1797 | { |
1798 | - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1799 | + struct intel_panel *panel = &connector->panel; |
1800 | u32 mul; |
1801 | |
1802 | - if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY) |
1803 | + if (panel->backlight.alternate_pwm_increment) |
1804 | mul = 128; |
1805 | else |
1806 | mul = 16; |
1807 | @@ -1261,9 +1277,10 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) |
1808 | static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) |
1809 | { |
1810 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1811 | + struct intel_panel *panel = &connector->panel; |
1812 | u32 mul, clock; |
1813 | |
1814 | - if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY) |
1815 | + if (panel->backlight.alternate_pwm_increment) |
1816 | mul = 16; |
1817 | else |
1818 | mul = 128; |
1819 | @@ -1414,6 +1431,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus |
1820 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1821 | struct intel_panel *panel = &connector->panel; |
1822 | u32 pch_ctl1, pch_ctl2, val; |
1823 | + bool alt; |
1824 | + |
1825 | + if (HAS_PCH_LPT(dev_priv)) |
1826 | + alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY; |
1827 | + else |
1828 | + alt = I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY; |
1829 | + panel->backlight.alternate_pwm_increment = alt; |
1830 | |
1831 | pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); |
1832 | panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; |
1833 | diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c |
1834 | index 2d2481392824..e59a28cb3158 100644 |
1835 | --- a/drivers/gpu/drm/i915/intel_pm.c |
1836 | +++ b/drivers/gpu/drm/i915/intel_pm.c |
1837 | @@ -2119,32 +2119,34 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) |
1838 | GEN9_MEM_LATENCY_LEVEL_MASK; |
1839 | |
1840 | /* |
1841 | + * If a level n (n > 1) has a 0us latency, all levels m (m >= n) |
1842 | + * need to be disabled. We make sure to sanitize the values out |
1843 | + * of the punit to satisfy this requirement. |
1844 | + */ |
1845 | + for (level = 1; level <= max_level; level++) { |
1846 | + if (wm[level] == 0) { |
1847 | + for (i = level + 1; i <= max_level; i++) |
1848 | + wm[i] = 0; |
1849 | + break; |
1850 | + } |
1851 | + } |
1852 | + |
1853 | + /* |
1854 | * WaWmMemoryReadLatency:skl |
1855 | * |
1856 | * punit doesn't take into account the read latency so we need |
1857 | - * to add 2us to the various latency levels we retrieve from |
1858 | - * the punit. |
1859 | - * - W0 is a bit special in that it's the only level that |
1860 | - * can't be disabled if we want to have display working, so |
1861 | - * we always add 2us there. |
1862 | - * - For levels >=1, punit returns 0us latency when they are |
1863 | - * disabled, so we respect that and don't add 2us then |
1864 | - * |
1865 | - * Additionally, if a level n (n > 1) has a 0us latency, all |
1866 | - * levels m (m >= n) need to be disabled. We make sure to |
1867 | - * sanitize the values out of the punit to satisfy this |
1868 | - * requirement. |
1869 | + * to add 2us to the various latency levels we retrieve from the |
1870 | + * punit when level 0 response data us 0us. |
1871 | */ |
1872 | - wm[0] += 2; |
1873 | - for (level = 1; level <= max_level; level++) |
1874 | - if (wm[level] != 0) |
1875 | + if (wm[0] == 0) { |
1876 | + wm[0] += 2; |
1877 | + for (level = 1; level <= max_level; level++) { |
1878 | + if (wm[level] == 0) |
1879 | + break; |
1880 | wm[level] += 2; |
1881 | - else { |
1882 | - for (i = level + 1; i <= max_level; i++) |
1883 | - wm[i] = 0; |
1884 | - |
1885 | - break; |
1886 | } |
1887 | + } |
1888 | + |
1889 | } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
1890 | uint64_t sskpd = I915_READ64(MCH_SSKPD); |
1891 | |
1892 | @@ -2876,6 +2878,19 @@ skl_wm_plane_id(const struct intel_plane *plane) |
1893 | } |
1894 | } |
1895 | |
1896 | +static bool |
1897 | +intel_has_sagv(struct drm_i915_private *dev_priv) |
1898 | +{ |
1899 | + if (IS_KABYLAKE(dev_priv)) |
1900 | + return true; |
1901 | + |
1902 | + if (IS_SKYLAKE(dev_priv) && |
1903 | + dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED) |
1904 | + return true; |
1905 | + |
1906 | + return false; |
1907 | +} |
1908 | + |
1909 | /* |
1910 | * SAGV dynamically adjusts the system agent voltage and clock frequencies |
1911 | * depending on power and performance requirements. The display engine access |
1912 | @@ -2888,12 +2903,14 @@ skl_wm_plane_id(const struct intel_plane *plane) |
1913 | * - We're not using an interlaced display configuration |
1914 | */ |
1915 | int |
1916 | -skl_enable_sagv(struct drm_i915_private *dev_priv) |
1917 | +intel_enable_sagv(struct drm_i915_private *dev_priv) |
1918 | { |
1919 | int ret; |
1920 | |
1921 | - if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED || |
1922 | - dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED) |
1923 | + if (!intel_has_sagv(dev_priv)) |
1924 | + return 0; |
1925 | + |
1926 | + if (dev_priv->sagv_status == I915_SAGV_ENABLED) |
1927 | return 0; |
1928 | |
1929 | DRM_DEBUG_KMS("Enabling the SAGV\n"); |
1930 | @@ -2909,21 +2926,21 @@ skl_enable_sagv(struct drm_i915_private *dev_priv) |
1931 | * Some skl systems, pre-release machines in particular, |
1932 | * don't actually have an SAGV. |
1933 | */ |
1934 | - if (ret == -ENXIO) { |
1935 | + if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { |
1936 | DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); |
1937 | - dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED; |
1938 | + dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; |
1939 | return 0; |
1940 | } else if (ret < 0) { |
1941 | DRM_ERROR("Failed to enable the SAGV\n"); |
1942 | return ret; |
1943 | } |
1944 | |
1945 | - dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED; |
1946 | + dev_priv->sagv_status = I915_SAGV_ENABLED; |
1947 | return 0; |
1948 | } |
1949 | |
1950 | static int |
1951 | -skl_do_sagv_disable(struct drm_i915_private *dev_priv) |
1952 | +intel_do_sagv_disable(struct drm_i915_private *dev_priv) |
1953 | { |
1954 | int ret; |
1955 | uint32_t temp = GEN9_SAGV_DISABLE; |
1956 | @@ -2937,19 +2954,21 @@ skl_do_sagv_disable(struct drm_i915_private *dev_priv) |
1957 | } |
1958 | |
1959 | int |
1960 | -skl_disable_sagv(struct drm_i915_private *dev_priv) |
1961 | +intel_disable_sagv(struct drm_i915_private *dev_priv) |
1962 | { |
1963 | int ret, result; |
1964 | |
1965 | - if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED || |
1966 | - dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED) |
1967 | + if (!intel_has_sagv(dev_priv)) |
1968 | + return 0; |
1969 | + |
1970 | + if (dev_priv->sagv_status == I915_SAGV_DISABLED) |
1971 | return 0; |
1972 | |
1973 | DRM_DEBUG_KMS("Disabling the SAGV\n"); |
1974 | mutex_lock(&dev_priv->rps.hw_lock); |
1975 | |
1976 | /* bspec says to keep retrying for at least 1 ms */ |
1977 | - ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1); |
1978 | + ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1); |
1979 | mutex_unlock(&dev_priv->rps.hw_lock); |
1980 | |
1981 | if (ret == -ETIMEDOUT) { |
1982 | @@ -2961,20 +2980,20 @@ skl_disable_sagv(struct drm_i915_private *dev_priv) |
1983 | * Some skl systems, pre-release machines in particular, |
1984 | * don't actually have an SAGV. |
1985 | */ |
1986 | - if (result == -ENXIO) { |
1987 | + if (IS_SKYLAKE(dev_priv) && result == -ENXIO) { |
1988 | DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); |
1989 | - dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED; |
1990 | + dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; |
1991 | return 0; |
1992 | } else if (result < 0) { |
1993 | DRM_ERROR("Failed to disable the SAGV\n"); |
1994 | return result; |
1995 | } |
1996 | |
1997 | - dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED; |
1998 | + dev_priv->sagv_status = I915_SAGV_DISABLED; |
1999 | return 0; |
2000 | } |
2001 | |
2002 | -bool skl_can_enable_sagv(struct drm_atomic_state *state) |
2003 | +bool intel_can_enable_sagv(struct drm_atomic_state *state) |
2004 | { |
2005 | struct drm_device *dev = state->dev; |
2006 | struct drm_i915_private *dev_priv = to_i915(dev); |
2007 | @@ -2983,6 +3002,9 @@ bool skl_can_enable_sagv(struct drm_atomic_state *state) |
2008 | enum pipe pipe; |
2009 | int level, plane; |
2010 | |
2011 | + if (!intel_has_sagv(dev_priv)) |
2012 | + return false; |
2013 | + |
2014 | /* |
2015 | * SKL workaround: bspec recommends we disable the SAGV when we have |
2016 | * more then one pipe enabled |
2017 | @@ -3473,29 +3495,14 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latenc |
2018 | } |
2019 | |
2020 | static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, |
2021 | - uint32_t horiz_pixels, uint8_t cpp, |
2022 | - uint64_t tiling, uint32_t latency) |
2023 | + uint32_t latency, uint32_t plane_blocks_per_line) |
2024 | { |
2025 | uint32_t ret; |
2026 | - uint32_t plane_bytes_per_line, plane_blocks_per_line; |
2027 | uint32_t wm_intermediate_val; |
2028 | |
2029 | if (latency == 0) |
2030 | return UINT_MAX; |
2031 | |
2032 | - plane_bytes_per_line = horiz_pixels * cpp; |
2033 | - |
2034 | - if (tiling == I915_FORMAT_MOD_Y_TILED || |
2035 | - tiling == I915_FORMAT_MOD_Yf_TILED) { |
2036 | - plane_bytes_per_line *= 4; |
2037 | - plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); |
2038 | - plane_blocks_per_line /= 4; |
2039 | - } else if (tiling == DRM_FORMAT_MOD_NONE) { |
2040 | - plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; |
2041 | - } else { |
2042 | - plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); |
2043 | - } |
2044 | - |
2045 | wm_intermediate_val = latency * pixel_rate; |
2046 | ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) * |
2047 | plane_blocks_per_line; |
2048 | @@ -3546,6 +3553,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, |
2049 | uint8_t cpp; |
2050 | uint32_t width = 0, height = 0; |
2051 | uint32_t plane_pixel_rate; |
2052 | + uint32_t y_tile_minimum, y_min_scanlines; |
2053 | |
2054 | if (latency == 0 || !cstate->base.active || !intel_pstate->visible) { |
2055 | *enabled = false; |
2056 | @@ -3561,38 +3569,51 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, |
2057 | cpp = drm_format_plane_cpp(fb->pixel_format, 0); |
2058 | plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); |
2059 | |
2060 | + if (intel_rotation_90_or_270(pstate->rotation)) { |
2061 | + int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? |
2062 | + drm_format_plane_cpp(fb->pixel_format, 1) : |
2063 | + drm_format_plane_cpp(fb->pixel_format, 0); |
2064 | + |
2065 | + switch (cpp) { |
2066 | + case 1: |
2067 | + y_min_scanlines = 16; |
2068 | + break; |
2069 | + case 2: |
2070 | + y_min_scanlines = 8; |
2071 | + break; |
2072 | + default: |
2073 | + WARN(1, "Unsupported pixel depth for rotation"); |
2074 | + case 4: |
2075 | + y_min_scanlines = 4; |
2076 | + break; |
2077 | + } |
2078 | + } else { |
2079 | + y_min_scanlines = 4; |
2080 | + } |
2081 | + |
2082 | + plane_bytes_per_line = width * cpp; |
2083 | + if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || |
2084 | + fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { |
2085 | + plane_blocks_per_line = |
2086 | + DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512); |
2087 | + plane_blocks_per_line /= y_min_scanlines; |
2088 | + } else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) { |
2089 | + plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) |
2090 | + + 1; |
2091 | + } else { |
2092 | + plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); |
2093 | + } |
2094 | + |
2095 | method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); |
2096 | method2 = skl_wm_method2(plane_pixel_rate, |
2097 | cstate->base.adjusted_mode.crtc_htotal, |
2098 | - width, |
2099 | - cpp, |
2100 | - fb->modifier[0], |
2101 | - latency); |
2102 | + latency, |
2103 | + plane_blocks_per_line); |
2104 | |
2105 | - plane_bytes_per_line = width * cpp; |
2106 | - plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); |
2107 | + y_tile_minimum = plane_blocks_per_line * y_min_scanlines; |
2108 | |
2109 | if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || |
2110 | fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { |
2111 | - uint32_t min_scanlines = 4; |
2112 | - uint32_t y_tile_minimum; |
2113 | - if (intel_rotation_90_or_270(pstate->rotation)) { |
2114 | - int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? |
2115 | - drm_format_plane_cpp(fb->pixel_format, 1) : |
2116 | - drm_format_plane_cpp(fb->pixel_format, 0); |
2117 | - |
2118 | - switch (cpp) { |
2119 | - case 1: |
2120 | - min_scanlines = 16; |
2121 | - break; |
2122 | - case 2: |
2123 | - min_scanlines = 8; |
2124 | - break; |
2125 | - case 8: |
2126 | - WARN(1, "Unsupported pixel depth for rotation"); |
2127 | - } |
2128 | - } |
2129 | - y_tile_minimum = plane_blocks_per_line * min_scanlines; |
2130 | selected_result = max(method2, y_tile_minimum); |
2131 | } else { |
2132 | if ((ddb_allocation / plane_blocks_per_line) >= 1) |
2133 | @@ -3606,10 +3627,12 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, |
2134 | |
2135 | if (level >= 1 && level <= 7) { |
2136 | if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || |
2137 | - fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) |
2138 | - res_lines += 4; |
2139 | - else |
2140 | + fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { |
2141 | + res_blocks += y_tile_minimum; |
2142 | + res_lines += y_min_scanlines; |
2143 | + } else { |
2144 | res_blocks++; |
2145 | + } |
2146 | } |
2147 | |
2148 | if (res_blocks >= ddb_allocation || res_lines > 31) { |
2149 | @@ -3828,183 +3851,82 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, |
2150 | I915_WRITE(reg, 0); |
2151 | } |
2152 | |
2153 | -static void skl_write_wm_values(struct drm_i915_private *dev_priv, |
2154 | - const struct skl_wm_values *new) |
2155 | +void skl_write_plane_wm(struct intel_crtc *intel_crtc, |
2156 | + const struct skl_wm_values *wm, |
2157 | + int plane) |
2158 | { |
2159 | - struct drm_device *dev = &dev_priv->drm; |
2160 | - struct intel_crtc *crtc; |
2161 | - |
2162 | - for_each_intel_crtc(dev, crtc) { |
2163 | - int i, level, max_level = ilk_wm_max_level(dev); |
2164 | - enum pipe pipe = crtc->pipe; |
2165 | - |
2166 | - if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0) |
2167 | - continue; |
2168 | - if (!crtc->active) |
2169 | - continue; |
2170 | - |
2171 | - I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]); |
2172 | - |
2173 | - for (level = 0; level <= max_level; level++) { |
2174 | - for (i = 0; i < intel_num_planes(crtc); i++) |
2175 | - I915_WRITE(PLANE_WM(pipe, i, level), |
2176 | - new->plane[pipe][i][level]); |
2177 | - I915_WRITE(CUR_WM(pipe, level), |
2178 | - new->plane[pipe][PLANE_CURSOR][level]); |
2179 | - } |
2180 | - for (i = 0; i < intel_num_planes(crtc); i++) |
2181 | - I915_WRITE(PLANE_WM_TRANS(pipe, i), |
2182 | - new->plane_trans[pipe][i]); |
2183 | - I915_WRITE(CUR_WM_TRANS(pipe), |
2184 | - new->plane_trans[pipe][PLANE_CURSOR]); |
2185 | - |
2186 | - for (i = 0; i < intel_num_planes(crtc); i++) { |
2187 | - skl_ddb_entry_write(dev_priv, |
2188 | - PLANE_BUF_CFG(pipe, i), |
2189 | - &new->ddb.plane[pipe][i]); |
2190 | - skl_ddb_entry_write(dev_priv, |
2191 | - PLANE_NV12_BUF_CFG(pipe, i), |
2192 | - &new->ddb.y_plane[pipe][i]); |
2193 | - } |
2194 | + struct drm_crtc *crtc = &intel_crtc->base; |
2195 | + struct drm_device *dev = crtc->dev; |
2196 | + struct drm_i915_private *dev_priv = to_i915(dev); |
2197 | + int level, max_level = ilk_wm_max_level(dev); |
2198 | + enum pipe pipe = intel_crtc->pipe; |
2199 | |
2200 | - skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), |
2201 | - &new->ddb.plane[pipe][PLANE_CURSOR]); |
2202 | + for (level = 0; level <= max_level; level++) { |
2203 | + I915_WRITE(PLANE_WM(pipe, plane, level), |
2204 | + wm->plane[pipe][plane][level]); |
2205 | } |
2206 | -} |
2207 | + I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]); |
2208 | |
2209 | -/* |
2210 | - * When setting up a new DDB allocation arrangement, we need to correctly |
2211 | - * sequence the times at which the new allocations for the pipes are taken into |
2212 | - * account or we'll have pipes fetching from space previously allocated to |
2213 | - * another pipe. |
2214 | - * |
2215 | - * Roughly the sequence looks like: |
2216 | - * 1. re-allocate the pipe(s) with the allocation being reduced and not |
2217 | - * overlapping with a previous light-up pipe (another way to put it is: |
2218 | - * pipes with their new allocation strickly included into their old ones). |
2219 | - * 2. re-allocate the other pipes that get their allocation reduced |
2220 | - * 3. allocate the pipes having their allocation increased |
2221 | - * |
2222 | - * Steps 1. and 2. are here to take care of the following case: |
2223 | - * - Initially DDB looks like this: |
2224 | - * | B | C | |
2225 | - * - enable pipe A. |
2226 | - * - pipe B has a reduced DDB allocation that overlaps with the old pipe C |
2227 | - * allocation |
2228 | - * | A | B | C | |
2229 | - * |
2230 | - * We need to sequence the re-allocation: C, B, A (and not B, C, A). |
2231 | - */ |
2232 | + skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane), |
2233 | + &wm->ddb.plane[pipe][plane]); |
2234 | + skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane), |
2235 | + &wm->ddb.y_plane[pipe][plane]); |
2236 | +} |
2237 | |
2238 | -static void |
2239 | -skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass) |
2240 | +void skl_write_cursor_wm(struct intel_crtc *intel_crtc, |
2241 | + const struct skl_wm_values *wm) |
2242 | { |
2243 | - int plane; |
2244 | - |
2245 | - DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass); |
2246 | + struct drm_crtc *crtc = &intel_crtc->base; |
2247 | + struct drm_device *dev = crtc->dev; |
2248 | + struct drm_i915_private *dev_priv = to_i915(dev); |
2249 | + int level, max_level = ilk_wm_max_level(dev); |
2250 | + enum pipe pipe = intel_crtc->pipe; |
2251 | |
2252 | - for_each_plane(dev_priv, pipe, plane) { |
2253 | - I915_WRITE(PLANE_SURF(pipe, plane), |
2254 | - I915_READ(PLANE_SURF(pipe, plane))); |
2255 | + for (level = 0; level <= max_level; level++) { |
2256 | + I915_WRITE(CUR_WM(pipe, level), |
2257 | + wm->plane[pipe][PLANE_CURSOR][level]); |
2258 | } |
2259 | - I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); |
2260 | + I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]); |
2261 | + |
2262 | + skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), |
2263 | + &wm->ddb.plane[pipe][PLANE_CURSOR]); |
2264 | } |
2265 | |
2266 | -static bool |
2267 | -skl_ddb_allocation_included(const struct skl_ddb_allocation *old, |
2268 | - const struct skl_ddb_allocation *new, |
2269 | - enum pipe pipe) |
2270 | +bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old, |
2271 | + const struct skl_ddb_allocation *new, |
2272 | + enum pipe pipe) |
2273 | { |
2274 | - uint16_t old_size, new_size; |
2275 | - |
2276 | - old_size = skl_ddb_entry_size(&old->pipe[pipe]); |
2277 | - new_size = skl_ddb_entry_size(&new->pipe[pipe]); |
2278 | - |
2279 | - return old_size != new_size && |
2280 | - new->pipe[pipe].start >= old->pipe[pipe].start && |
2281 | - new->pipe[pipe].end <= old->pipe[pipe].end; |
2282 | + return new->pipe[pipe].start == old->pipe[pipe].start && |
2283 | + new->pipe[pipe].end == old->pipe[pipe].end; |
2284 | } |
2285 | |
2286 | -static void skl_flush_wm_values(struct drm_i915_private *dev_priv, |
2287 | - struct skl_wm_values *new_values) |
2288 | +static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, |
2289 | + const struct skl_ddb_entry *b) |
2290 | { |
2291 | - struct drm_device *dev = &dev_priv->drm; |
2292 | - struct skl_ddb_allocation *cur_ddb, *new_ddb; |
2293 | - bool reallocated[I915_MAX_PIPES] = {}; |
2294 | - struct intel_crtc *crtc; |
2295 | - enum pipe pipe; |
2296 | - |
2297 | - new_ddb = &new_values->ddb; |
2298 | - cur_ddb = &dev_priv->wm.skl_hw.ddb; |
2299 | - |
2300 | - /* |
2301 | - * First pass: flush the pipes with the new allocation contained into |
2302 | - * the old space. |
2303 | - * |
2304 | - * We'll wait for the vblank on those pipes to ensure we can safely |
2305 | - * re-allocate the freed space without this pipe fetching from it. |
2306 | - */ |
2307 | - for_each_intel_crtc(dev, crtc) { |
2308 | - if (!crtc->active) |
2309 | - continue; |
2310 | - |
2311 | - pipe = crtc->pipe; |
2312 | - |
2313 | - if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe)) |
2314 | - continue; |
2315 | - |
2316 | - skl_wm_flush_pipe(dev_priv, pipe, 1); |
2317 | - intel_wait_for_vblank(dev, pipe); |
2318 | - |
2319 | - reallocated[pipe] = true; |
2320 | - } |
2321 | - |
2322 | + return a->start < b->end && b->start < a->end; |
2323 | +} |
2324 | |
2325 | - /* |
2326 | - * Second pass: flush the pipes that are having their allocation |
2327 | - * reduced, but overlapping with a previous allocation. |
2328 | - * |
2329 | - * Here as well we need to wait for the vblank to make sure the freed |
2330 | - * space is not used anymore. |
2331 | - */ |
2332 | - for_each_intel_crtc(dev, crtc) { |
2333 | - if (!crtc->active) |
2334 | - continue; |
2335 | +bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state, |
2336 | + const struct skl_ddb_allocation *old, |
2337 | + const struct skl_ddb_allocation *new, |
2338 | + enum pipe pipe) |
2339 | +{ |
2340 | + struct drm_device *dev = state->dev; |
2341 | + struct intel_crtc *intel_crtc; |
2342 | + enum pipe otherp; |
2343 | |
2344 | - pipe = crtc->pipe; |
2345 | + for_each_intel_crtc(dev, intel_crtc) { |
2346 | + otherp = intel_crtc->pipe; |
2347 | |
2348 | - if (reallocated[pipe]) |
2349 | + if (otherp == pipe) |
2350 | continue; |
2351 | |
2352 | - if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) < |
2353 | - skl_ddb_entry_size(&cur_ddb->pipe[pipe])) { |
2354 | - skl_wm_flush_pipe(dev_priv, pipe, 2); |
2355 | - intel_wait_for_vblank(dev, pipe); |
2356 | - reallocated[pipe] = true; |
2357 | - } |
2358 | + if (skl_ddb_entries_overlap(&new->pipe[pipe], |
2359 | + &old->pipe[otherp])) |
2360 | + return true; |
2361 | } |
2362 | |
2363 | - /* |
2364 | - * Third pass: flush the pipes that got more space allocated. |
2365 | - * |
2366 | - * We don't need to actively wait for the update here, next vblank |
2367 | - * will just get more DDB space with the correct WM values. |
2368 | - */ |
2369 | - for_each_intel_crtc(dev, crtc) { |
2370 | - if (!crtc->active) |
2371 | - continue; |
2372 | - |
2373 | - pipe = crtc->pipe; |
2374 | - |
2375 | - /* |
2376 | - * At this point, only the pipes more space than before are |
2377 | - * left to re-allocate. |
2378 | - */ |
2379 | - if (reallocated[pipe]) |
2380 | - continue; |
2381 | - |
2382 | - skl_wm_flush_pipe(dev_priv, pipe, 3); |
2383 | - } |
2384 | + return false; |
2385 | } |
2386 | |
2387 | static int skl_update_pipe_wm(struct drm_crtc_state *cstate, |
2388 | @@ -4041,6 +3963,41 @@ pipes_modified(struct drm_atomic_state *state) |
2389 | return ret; |
2390 | } |
2391 | |
2392 | +int |
2393 | +skl_ddb_add_affected_planes(struct intel_crtc_state *cstate) |
2394 | +{ |
2395 | + struct drm_atomic_state *state = cstate->base.state; |
2396 | + struct drm_device *dev = state->dev; |
2397 | + struct drm_crtc *crtc = cstate->base.crtc; |
2398 | + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2399 | + struct drm_i915_private *dev_priv = to_i915(dev); |
2400 | + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); |
2401 | + struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; |
2402 | + struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; |
2403 | + struct drm_plane_state *plane_state; |
2404 | + struct drm_plane *plane; |
2405 | + enum pipe pipe = intel_crtc->pipe; |
2406 | + int id; |
2407 | + |
2408 | + WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc)); |
2409 | + |
2410 | + drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) { |
2411 | + id = skl_wm_plane_id(to_intel_plane(plane)); |
2412 | + |
2413 | + if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id], |
2414 | + &new_ddb->plane[pipe][id]) && |
2415 | + skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id], |
2416 | + &new_ddb->y_plane[pipe][id])) |
2417 | + continue; |
2418 | + |
2419 | + plane_state = drm_atomic_get_plane_state(state, plane); |
2420 | + if (IS_ERR(plane_state)) |
2421 | + return PTR_ERR(plane_state); |
2422 | + } |
2423 | + |
2424 | + return 0; |
2425 | +} |
2426 | + |
2427 | static int |
2428 | skl_compute_ddb(struct drm_atomic_state *state) |
2429 | { |
2430 | @@ -4105,6 +4062,10 @@ skl_compute_ddb(struct drm_atomic_state *state) |
2431 | if (ret) |
2432 | return ret; |
2433 | |
2434 | + ret = skl_ddb_add_affected_planes(cstate); |
2435 | + if (ret) |
2436 | + return ret; |
2437 | + |
2438 | ret = drm_atomic_add_affected_planes(state, &intel_crtc->base); |
2439 | if (ret) |
2440 | return ret; |
2441 | @@ -4206,7 +4167,7 @@ static void skl_update_wm(struct drm_crtc *crtc) |
2442 | struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw; |
2443 | struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); |
2444 | struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; |
2445 | - int pipe; |
2446 | + enum pipe pipe = intel_crtc->pipe; |
2447 | |
2448 | if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) |
2449 | return; |
2450 | @@ -4215,15 +4176,22 @@ static void skl_update_wm(struct drm_crtc *crtc) |
2451 | |
2452 | mutex_lock(&dev_priv->wm.wm_mutex); |
2453 | |
2454 | - skl_write_wm_values(dev_priv, results); |
2455 | - skl_flush_wm_values(dev_priv, results); |
2456 | - |
2457 | /* |
2458 | - * Store the new configuration (but only for the pipes that have |
2459 | - * changed; the other values weren't recomputed). |
2460 | + * If this pipe isn't active already, we're going to be enabling it |
2461 | + * very soon. Since it's safe to update a pipe's ddb allocation while |
2462 | + * the pipe's shut off, just do so here. Already active pipes will have |
2463 | + * their watermarks updated once we update their planes. |
2464 | */ |
2465 | - for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes) |
2466 | - skl_copy_wm_for_pipe(hw_vals, results, pipe); |
2467 | + if (crtc->state->active_changed) { |
2468 | + int plane; |
2469 | + |
2470 | + for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) |
2471 | + skl_write_plane_wm(intel_crtc, results, plane); |
2472 | + |
2473 | + skl_write_cursor_wm(intel_crtc, results); |
2474 | + } |
2475 | + |
2476 | + skl_copy_wm_for_pipe(hw_vals, results, pipe); |
2477 | |
2478 | mutex_unlock(&dev_priv->wm.wm_mutex); |
2479 | } |
2480 | diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c |
2481 | index 7c08e4f29032..4178849631ad 100644 |
2482 | --- a/drivers/gpu/drm/i915/intel_sprite.c |
2483 | +++ b/drivers/gpu/drm/i915/intel_sprite.c |
2484 | @@ -203,6 +203,9 @@ skl_update_plane(struct drm_plane *drm_plane, |
2485 | struct intel_plane *intel_plane = to_intel_plane(drm_plane); |
2486 | struct drm_framebuffer *fb = plane_state->base.fb; |
2487 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
2488 | + const struct skl_wm_values *wm = &dev_priv->wm.skl_results; |
2489 | + struct drm_crtc *crtc = crtc_state->base.crtc; |
2490 | + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2491 | const int pipe = intel_plane->pipe; |
2492 | const int plane = intel_plane->plane + 1; |
2493 | u32 plane_ctl, stride_div, stride; |
2494 | @@ -238,6 +241,9 @@ skl_update_plane(struct drm_plane *drm_plane, |
2495 | crtc_w--; |
2496 | crtc_h--; |
2497 | |
2498 | + if (wm->dirty_pipes & drm_crtc_mask(crtc)) |
2499 | + skl_write_plane_wm(intel_crtc, wm, plane); |
2500 | + |
2501 | if (key->flags) { |
2502 | I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value); |
2503 | I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value); |
2504 | @@ -308,6 +314,14 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) |
2505 | const int pipe = intel_plane->pipe; |
2506 | const int plane = intel_plane->plane + 1; |
2507 | |
2508 | + /* |
2509 | + * We only populate skl_results on watermark updates, and if the |
2510 | + * plane's visiblity isn't actually changing neither is its watermarks. |
2511 | + */ |
2512 | + if (!to_intel_plane_state(dplane->state)->visible) |
2513 | + skl_write_plane_wm(to_intel_crtc(crtc), |
2514 | + &dev_priv->wm.skl_results, plane); |
2515 | + |
2516 | I915_WRITE(PLANE_CTL(pipe, plane), 0); |
2517 | |
2518 | I915_WRITE(PLANE_SURF(pipe, plane), 0); |
2519 | diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c |
2520 | index ff80a81b1a84..ec28b15f2724 100644 |
2521 | --- a/drivers/gpu/drm/i915/intel_uncore.c |
2522 | +++ b/drivers/gpu/drm/i915/intel_uncore.c |
2523 | @@ -796,10 +796,9 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv, |
2524 | const bool read, |
2525 | const bool before) |
2526 | { |
2527 | - if (WARN(check_for_unclaimed_mmio(dev_priv), |
2528 | - "Unclaimed register detected %s %s register 0x%x\n", |
2529 | - before ? "before" : "after", |
2530 | - read ? "reading" : "writing to", |
2531 | + if (WARN(check_for_unclaimed_mmio(dev_priv) && !before, |
2532 | + "Unclaimed %s register 0x%x\n", |
2533 | + read ? "read from" : "write to", |
2534 | i915_mmio_reg_offset(reg))) |
2535 | i915.mmio_debug--; /* Only report the first N failures */ |
2536 | } |
2537 | diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c |
2538 | index 6a4b020dd0b4..5a26eb4545aa 100644 |
2539 | --- a/drivers/gpu/drm/radeon/r600_dpm.c |
2540 | +++ b/drivers/gpu/drm/radeon/r600_dpm.c |
2541 | @@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) |
2542 | struct drm_device *dev = rdev->ddev; |
2543 | struct drm_crtc *crtc; |
2544 | struct radeon_crtc *radeon_crtc; |
2545 | - u32 line_time_us, vblank_lines; |
2546 | + u32 vblank_in_pixels; |
2547 | u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ |
2548 | |
2549 | if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { |
2550 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
2551 | radeon_crtc = to_radeon_crtc(crtc); |
2552 | if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { |
2553 | - line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / |
2554 | - radeon_crtc->hw_mode.clock; |
2555 | - vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - |
2556 | - radeon_crtc->hw_mode.crtc_vdisplay + |
2557 | - (radeon_crtc->v_border * 2); |
2558 | - vblank_time_us = vblank_lines * line_time_us; |
2559 | + vblank_in_pixels = |
2560 | + radeon_crtc->hw_mode.crtc_htotal * |
2561 | + (radeon_crtc->hw_mode.crtc_vblank_end - |
2562 | + radeon_crtc->hw_mode.crtc_vdisplay + |
2563 | + (radeon_crtc->v_border * 2)); |
2564 | + |
2565 | + vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock; |
2566 | break; |
2567 | } |
2568 | } |
2569 | diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c |
2570 | index a00dd2f74527..554ca7115f98 100644 |
2571 | --- a/drivers/gpu/drm/radeon/radeon_device.c |
2572 | +++ b/drivers/gpu/drm/radeon/radeon_device.c |
2573 | @@ -661,8 +661,9 @@ bool radeon_card_posted(struct radeon_device *rdev) |
2574 | { |
2575 | uint32_t reg; |
2576 | |
2577 | - /* for pass through, always force asic_init */ |
2578 | - if (radeon_device_is_virtual()) |
2579 | + /* for pass through, always force asic_init for CI */ |
2580 | + if (rdev->family >= CHIP_BONAIRE && |
2581 | + radeon_device_is_virtual()) |
2582 | return false; |
2583 | |
2584 | /* required for EFI mode on macbook2,1 which uses an r5xx asic */ |
2585 | diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c |
2586 | index 1f78ec2548ec..89bdf20344ae 100644 |
2587 | --- a/drivers/gpu/drm/radeon/si_dpm.c |
2588 | +++ b/drivers/gpu/drm/radeon/si_dpm.c |
2589 | @@ -4112,7 +4112,7 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev, |
2590 | &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) { |
2591 | si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table); |
2592 | |
2593 | - table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = |
2594 | + table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] = |
2595 | cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low); |
2596 | |
2597 | si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay, |
2598 | diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h |
2599 | index 3c779838d9ab..966e3a556011 100644 |
2600 | --- a/drivers/gpu/drm/radeon/sislands_smc.h |
2601 | +++ b/drivers/gpu/drm/radeon/sislands_smc.h |
2602 | @@ -194,6 +194,7 @@ typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE; |
2603 | #define SISLANDS_SMC_VOLTAGEMASK_VDDC 0 |
2604 | #define SISLANDS_SMC_VOLTAGEMASK_MVDD 1 |
2605 | #define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2 |
2606 | +#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3 |
2607 | #define SISLANDS_SMC_VOLTAGEMASK_MAX 4 |
2608 | |
2609 | struct SISLANDS_SMC_VOLTAGEMASKTABLE |
2610 | diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h |
2611 | index 428e24919ef1..f696b752886b 100644 |
2612 | --- a/drivers/gpu/drm/vc4/vc4_drv.h |
2613 | +++ b/drivers/gpu/drm/vc4/vc4_drv.h |
2614 | @@ -122,9 +122,16 @@ to_vc4_dev(struct drm_device *dev) |
2615 | struct vc4_bo { |
2616 | struct drm_gem_cma_object base; |
2617 | |
2618 | - /* seqno of the last job to render to this BO. */ |
2619 | + /* seqno of the last job to render using this BO. */ |
2620 | uint64_t seqno; |
2621 | |
2622 | + /* seqno of the last job to use the RCL to write to this BO. |
2623 | + * |
2624 | + * Note that this doesn't include binner overflow memory |
2625 | + * writes. |
2626 | + */ |
2627 | + uint64_t write_seqno; |
2628 | + |
2629 | /* List entry for the BO's position in either |
2630 | * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list |
2631 | */ |
2632 | @@ -216,6 +223,9 @@ struct vc4_exec_info { |
2633 | /* Sequence number for this bin/render job. */ |
2634 | uint64_t seqno; |
2635 | |
2636 | + /* Latest write_seqno of any BO that binning depends on. */ |
2637 | + uint64_t bin_dep_seqno; |
2638 | + |
2639 | /* Last current addresses the hardware was processing when the |
2640 | * hangcheck timer checked on us. |
2641 | */ |
2642 | @@ -230,6 +240,13 @@ struct vc4_exec_info { |
2643 | struct drm_gem_cma_object **bo; |
2644 | uint32_t bo_count; |
2645 | |
2646 | + /* List of BOs that are being written by the RCL. Other than |
2647 | + * the binner temporary storage, this is all the BOs written |
2648 | + * by the job. |
2649 | + */ |
2650 | + struct drm_gem_cma_object *rcl_write_bo[4]; |
2651 | + uint32_t rcl_write_bo_count; |
2652 | + |
2653 | /* Pointers for our position in vc4->job_list */ |
2654 | struct list_head head; |
2655 | |
2656 | diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c |
2657 | index b262c5c26f10..ae1609e739ef 100644 |
2658 | --- a/drivers/gpu/drm/vc4/vc4_gem.c |
2659 | +++ b/drivers/gpu/drm/vc4/vc4_gem.c |
2660 | @@ -471,6 +471,11 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) |
2661 | list_for_each_entry(bo, &exec->unref_list, unref_head) { |
2662 | bo->seqno = seqno; |
2663 | } |
2664 | + |
2665 | + for (i = 0; i < exec->rcl_write_bo_count; i++) { |
2666 | + bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); |
2667 | + bo->write_seqno = seqno; |
2668 | + } |
2669 | } |
2670 | |
2671 | /* Queues a struct vc4_exec_info for execution. If no job is |
2672 | @@ -673,6 +678,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) |
2673 | goto fail; |
2674 | |
2675 | ret = vc4_validate_shader_recs(dev, exec); |
2676 | + if (ret) |
2677 | + goto fail; |
2678 | + |
2679 | + /* Block waiting on any previous rendering into the CS's VBO, |
2680 | + * IB, or textures, so that pixels are actually written by the |
2681 | + * time we try to read them. |
2682 | + */ |
2683 | + ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true); |
2684 | |
2685 | fail: |
2686 | drm_free_large(temp); |
2687 | diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c |
2688 | index 0f12418725e5..08886a309757 100644 |
2689 | --- a/drivers/gpu/drm/vc4/vc4_render_cl.c |
2690 | +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c |
2691 | @@ -45,6 +45,8 @@ struct vc4_rcl_setup { |
2692 | |
2693 | struct drm_gem_cma_object *rcl; |
2694 | u32 next_offset; |
2695 | + |
2696 | + u32 next_write_bo_index; |
2697 | }; |
2698 | |
2699 | static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val) |
2700 | @@ -407,6 +409,8 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec, |
2701 | if (!*obj) |
2702 | return -EINVAL; |
2703 | |
2704 | + exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; |
2705 | + |
2706 | if (surf->offset & 0xf) { |
2707 | DRM_ERROR("MSAA write must be 16b aligned.\n"); |
2708 | return -EINVAL; |
2709 | @@ -417,7 +421,8 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec, |
2710 | |
2711 | static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, |
2712 | struct drm_gem_cma_object **obj, |
2713 | - struct drm_vc4_submit_rcl_surface *surf) |
2714 | + struct drm_vc4_submit_rcl_surface *surf, |
2715 | + bool is_write) |
2716 | { |
2717 | uint8_t tiling = VC4_GET_FIELD(surf->bits, |
2718 | VC4_LOADSTORE_TILE_BUFFER_TILING); |
2719 | @@ -440,6 +445,9 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, |
2720 | if (!*obj) |
2721 | return -EINVAL; |
2722 | |
2723 | + if (is_write) |
2724 | + exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; |
2725 | + |
2726 | if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { |
2727 | if (surf == &exec->args->zs_write) { |
2728 | DRM_ERROR("general zs write may not be a full-res.\n"); |
2729 | @@ -542,6 +550,8 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, |
2730 | if (!*obj) |
2731 | return -EINVAL; |
2732 | |
2733 | + exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; |
2734 | + |
2735 | if (tiling > VC4_TILING_FORMAT_LT) { |
2736 | DRM_ERROR("Bad tiling format\n"); |
2737 | return -EINVAL; |
2738 | @@ -599,15 +609,18 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) |
2739 | if (ret) |
2740 | return ret; |
2741 | |
2742 | - ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read); |
2743 | + ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read, |
2744 | + false); |
2745 | if (ret) |
2746 | return ret; |
2747 | |
2748 | - ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read); |
2749 | + ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read, |
2750 | + false); |
2751 | if (ret) |
2752 | return ret; |
2753 | |
2754 | - ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write); |
2755 | + ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write, |
2756 | + true); |
2757 | if (ret) |
2758 | return ret; |
2759 | |
2760 | diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c |
2761 | index 9ce1d0adf882..26503e307438 100644 |
2762 | --- a/drivers/gpu/drm/vc4/vc4_validate.c |
2763 | +++ b/drivers/gpu/drm/vc4/vc4_validate.c |
2764 | @@ -267,6 +267,9 @@ validate_indexed_prim_list(VALIDATE_ARGS) |
2765 | if (!ib) |
2766 | return -EINVAL; |
2767 | |
2768 | + exec->bin_dep_seqno = max(exec->bin_dep_seqno, |
2769 | + to_vc4_bo(&ib->base)->write_seqno); |
2770 | + |
2771 | if (offset > ib->base.size || |
2772 | (ib->base.size - offset) / index_size < length) { |
2773 | DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n", |
2774 | @@ -555,8 +558,7 @@ static bool |
2775 | reloc_tex(struct vc4_exec_info *exec, |
2776 | void *uniform_data_u, |
2777 | struct vc4_texture_sample_info *sample, |
2778 | - uint32_t texture_handle_index) |
2779 | - |
2780 | + uint32_t texture_handle_index, bool is_cs) |
2781 | { |
2782 | struct drm_gem_cma_object *tex; |
2783 | uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]); |
2784 | @@ -714,6 +716,11 @@ reloc_tex(struct vc4_exec_info *exec, |
2785 | |
2786 | *validated_p0 = tex->paddr + p0; |
2787 | |
2788 | + if (is_cs) { |
2789 | + exec->bin_dep_seqno = max(exec->bin_dep_seqno, |
2790 | + to_vc4_bo(&tex->base)->write_seqno); |
2791 | + } |
2792 | + |
2793 | return true; |
2794 | fail: |
2795 | DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0); |
2796 | @@ -835,7 +842,8 @@ validate_gl_shader_rec(struct drm_device *dev, |
2797 | if (!reloc_tex(exec, |
2798 | uniform_data_u, |
2799 | &validated_shader->texture_samples[tex], |
2800 | - texture_handles_u[tex])) { |
2801 | + texture_handles_u[tex], |
2802 | + i == 2)) { |
2803 | return -EINVAL; |
2804 | } |
2805 | } |
2806 | @@ -867,6 +875,9 @@ validate_gl_shader_rec(struct drm_device *dev, |
2807 | uint32_t stride = *(uint8_t *)(pkt_u + o + 5); |
2808 | uint32_t max_index; |
2809 | |
2810 | + exec->bin_dep_seqno = max(exec->bin_dep_seqno, |
2811 | + to_vc4_bo(&vbo->base)->write_seqno); |
2812 | + |
2813 | if (state->addr & 0x8) |
2814 | stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff; |
2815 | |
2816 | diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
2817 | index dc5beff2b4aa..8a15c4aa84c1 100644 |
2818 | --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
2819 | +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
2820 | @@ -34,6 +34,24 @@ |
2821 | |
2822 | #define VMW_RES_HT_ORDER 12 |
2823 | |
2824 | + /** |
2825 | + * enum vmw_resource_relocation_type - Relocation type for resources |
2826 | + * |
2827 | + * @vmw_res_rel_normal: Traditional relocation. The resource id in the |
2828 | + * command stream is replaced with the actual id after validation. |
2829 | + * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced |
2830 | + * with a NOP. |
2831 | + * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id |
2832 | + * after validation is -1, the command is replaced with a NOP. Otherwise no |
2833 | + * action. |
2834 | + */ |
2835 | +enum vmw_resource_relocation_type { |
2836 | + vmw_res_rel_normal, |
2837 | + vmw_res_rel_nop, |
2838 | + vmw_res_rel_cond_nop, |
2839 | + vmw_res_rel_max |
2840 | +}; |
2841 | + |
2842 | /** |
2843 | * struct vmw_resource_relocation - Relocation info for resources |
2844 | * |
2845 | @@ -41,11 +59,13 @@ |
2846 | * @res: Non-ref-counted pointer to the resource. |
2847 | * @offset: Offset of 4 byte entries into the command buffer where the |
2848 | * id that needs fixup is located. |
2849 | + * @rel_type: Type of relocation. |
2850 | */ |
2851 | struct vmw_resource_relocation { |
2852 | struct list_head head; |
2853 | const struct vmw_resource *res; |
2854 | - unsigned long offset; |
2855 | + u32 offset:29; |
2856 | + enum vmw_resource_relocation_type rel_type:3; |
2857 | }; |
2858 | |
2859 | /** |
2860 | @@ -410,10 +430,13 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, |
2861 | * @res: The resource. |
2862 | * @offset: Offset into the command buffer currently being parsed where the |
2863 | * id that needs fixup is located. Granularity is 4 bytes. |
2864 | + * @rel_type: Relocation type. |
2865 | */ |
2866 | static int vmw_resource_relocation_add(struct list_head *list, |
2867 | const struct vmw_resource *res, |
2868 | - unsigned long offset) |
2869 | + unsigned long offset, |
2870 | + enum vmw_resource_relocation_type |
2871 | + rel_type) |
2872 | { |
2873 | struct vmw_resource_relocation *rel; |
2874 | |
2875 | @@ -425,6 +448,7 @@ static int vmw_resource_relocation_add(struct list_head *list, |
2876 | |
2877 | rel->res = res; |
2878 | rel->offset = offset; |
2879 | + rel->rel_type = rel_type; |
2880 | list_add_tail(&rel->head, list); |
2881 | |
2882 | return 0; |
2883 | @@ -459,11 +483,23 @@ static void vmw_resource_relocations_apply(uint32_t *cb, |
2884 | { |
2885 | struct vmw_resource_relocation *rel; |
2886 | |
2887 | + /* Validate the struct vmw_resource_relocation member size */ |
2888 | + BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29)); |
2889 | + BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3)); |
2890 | + |
2891 | list_for_each_entry(rel, list, head) { |
2892 | - if (likely(rel->res != NULL)) |
2893 | + switch (rel->rel_type) { |
2894 | + case vmw_res_rel_normal: |
2895 | cb[rel->offset] = rel->res->id; |
2896 | - else |
2897 | + break; |
2898 | + case vmw_res_rel_nop: |
2899 | cb[rel->offset] = SVGA_3D_CMD_NOP; |
2900 | + break; |
2901 | + default: |
2902 | + if (rel->res->id == -1) |
2903 | + cb[rel->offset] = SVGA_3D_CMD_NOP; |
2904 | + break; |
2905 | + } |
2906 | } |
2907 | } |
2908 | |
2909 | @@ -655,7 +691,8 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, |
2910 | *p_val = NULL; |
2911 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
2912 | res, |
2913 | - id_loc - sw_context->buf_start); |
2914 | + id_loc - sw_context->buf_start, |
2915 | + vmw_res_rel_normal); |
2916 | if (unlikely(ret != 0)) |
2917 | return ret; |
2918 | |
2919 | @@ -721,7 +758,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, |
2920 | |
2921 | return vmw_resource_relocation_add |
2922 | (&sw_context->res_relocations, res, |
2923 | - id_loc - sw_context->buf_start); |
2924 | + id_loc - sw_context->buf_start, |
2925 | + vmw_res_rel_normal); |
2926 | } |
2927 | |
2928 | ret = vmw_user_resource_lookup_handle(dev_priv, |
2929 | @@ -2144,7 +2182,8 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, |
2930 | |
2931 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
2932 | NULL, &cmd->header.id - |
2933 | - sw_context->buf_start); |
2934 | + sw_context->buf_start, |
2935 | + vmw_res_rel_nop); |
2936 | |
2937 | return 0; |
2938 | } |
2939 | @@ -2189,7 +2228,8 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, |
2940 | |
2941 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
2942 | NULL, &cmd->header.id - |
2943 | - sw_context->buf_start); |
2944 | + sw_context->buf_start, |
2945 | + vmw_res_rel_nop); |
2946 | |
2947 | return 0; |
2948 | } |
2949 | @@ -2848,8 +2888,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, |
2950 | * @header: Pointer to the command header in the command stream. |
2951 | * |
2952 | * Check that the view exists, and if it was not created using this |
2953 | - * command batch, make sure it's validated (present in the device) so that |
2954 | - * the remove command will not confuse the device. |
2955 | + * command batch, conditionally make this command a NOP. |
2956 | */ |
2957 | static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, |
2958 | struct vmw_sw_context *sw_context, |
2959 | @@ -2877,10 +2916,15 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, |
2960 | return ret; |
2961 | |
2962 | /* |
2963 | - * Add view to the validate list iff it was not created using this |
2964 | - * command batch. |
2965 | + * If the view wasn't created during this command batch, it might |
2966 | + * have been removed due to a context swapout, so add a |
2967 | + * relocation to conditionally make this command a NOP to avoid |
2968 | + * device errors. |
2969 | */ |
2970 | - return vmw_view_res_val_add(sw_context, view); |
2971 | + return vmw_resource_relocation_add(&sw_context->res_relocations, |
2972 | + view, |
2973 | + &cmd->header.id - sw_context->buf_start, |
2974 | + vmw_res_rel_cond_nop); |
2975 | } |
2976 | |
2977 | /** |
2978 | @@ -3848,14 +3892,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, |
2979 | int ret; |
2980 | |
2981 | *header = NULL; |
2982 | - if (!dev_priv->cman || kernel_commands) |
2983 | - return kernel_commands; |
2984 | - |
2985 | if (command_size > SVGA_CB_MAX_SIZE) { |
2986 | DRM_ERROR("Command buffer is too large.\n"); |
2987 | return ERR_PTR(-EINVAL); |
2988 | } |
2989 | |
2990 | + if (!dev_priv->cman || kernel_commands) |
2991 | + return kernel_commands; |
2992 | + |
2993 | /* If possible, add a little space for fencing. */ |
2994 | cmdbuf_size = command_size + 512; |
2995 | cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); |
2996 | diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
2997 | index 4ed9a4fdfea7..e92b09d32605 100644 |
2998 | --- a/drivers/hid/hid-ids.h |
2999 | +++ b/drivers/hid/hid-ids.h |
3000 | @@ -64,6 +64,9 @@ |
3001 | #define USB_VENDOR_ID_AKAI 0x2011 |
3002 | #define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715 |
3003 | |
3004 | +#define USB_VENDOR_ID_AKAI_09E8 0x09E8 |
3005 | +#define USB_DEVICE_ID_AKAI_09E8_MIDIMIX 0x0031 |
3006 | + |
3007 | #define USB_VENDOR_ID_ALCOR 0x058f |
3008 | #define USB_DEVICE_ID_ALCOR_USBRS232 0x9720 |
3009 | |
3010 | diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c |
3011 | index b4b8c6abb03e..bb400081efe4 100644 |
3012 | --- a/drivers/hid/usbhid/hid-quirks.c |
3013 | +++ b/drivers/hid/usbhid/hid-quirks.c |
3014 | @@ -56,6 +56,7 @@ static const struct hid_blacklist { |
3015 | |
3016 | { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET }, |
3017 | { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS }, |
3018 | + { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS }, |
3019 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, |
3020 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, |
3021 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, |
3022 | diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c |
3023 | index 9e02ac963cd0..3978cbb6b038 100644 |
3024 | --- a/drivers/hwtracing/coresight/coresight-tmc.c |
3025 | +++ b/drivers/hwtracing/coresight/coresight-tmc.c |
3026 | @@ -388,9 +388,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) |
3027 | err_misc_register: |
3028 | coresight_unregister(drvdata->csdev); |
3029 | err_devm_kzalloc: |
3030 | - if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) |
3031 | - dma_free_coherent(dev, drvdata->size, |
3032 | - drvdata->vaddr, drvdata->paddr); |
3033 | return ret; |
3034 | } |
3035 | |
3036 | diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c |
3037 | index 0fde593ec0d9..5f7968232564 100644 |
3038 | --- a/drivers/iio/dac/ad5755.c |
3039 | +++ b/drivers/iio/dac/ad5755.c |
3040 | @@ -655,7 +655,7 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev) |
3041 | |
3042 | devnr = 0; |
3043 | for_each_child_of_node(np, pp) { |
3044 | - if (devnr > AD5755_NUM_CHANNELS) { |
3045 | + if (devnr >= AD5755_NUM_CHANNELS) { |
3046 | dev_err(dev, |
3047 | "There is to many channels defined in DT\n"); |
3048 | goto error_out; |
3049 | diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c |
3050 | index 20c40f780964..18cf2e29e4d5 100644 |
3051 | --- a/drivers/iio/light/us5182d.c |
3052 | +++ b/drivers/iio/light/us5182d.c |
3053 | @@ -894,7 +894,7 @@ static int us5182d_probe(struct i2c_client *client, |
3054 | goto out_err; |
3055 | |
3056 | if (data->default_continuous) { |
3057 | - pm_runtime_set_active(&client->dev); |
3058 | + ret = pm_runtime_set_active(&client->dev); |
3059 | if (ret < 0) |
3060 | goto out_err; |
3061 | } |
3062 | diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c |
3063 | index 4e4d8317c281..c17c9dd7cde1 100644 |
3064 | --- a/drivers/infiniband/hw/hfi1/qp.c |
3065 | +++ b/drivers/infiniband/hw/hfi1/qp.c |
3066 | @@ -808,6 +808,13 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
3067 | kfree(priv); |
3068 | return ERR_PTR(-ENOMEM); |
3069 | } |
3070 | + iowait_init( |
3071 | + &priv->s_iowait, |
3072 | + 1, |
3073 | + _hfi1_do_send, |
3074 | + iowait_sleep, |
3075 | + iowait_wakeup, |
3076 | + iowait_sdma_drained); |
3077 | setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp); |
3078 | qp->s_timer.function = hfi1_rc_timeout; |
3079 | return priv; |
3080 | @@ -873,13 +880,6 @@ void notify_qp_reset(struct rvt_qp *qp) |
3081 | { |
3082 | struct hfi1_qp_priv *priv = qp->priv; |
3083 | |
3084 | - iowait_init( |
3085 | - &priv->s_iowait, |
3086 | - 1, |
3087 | - _hfi1_do_send, |
3088 | - iowait_sleep, |
3089 | - iowait_wakeup, |
3090 | - iowait_sdma_drained); |
3091 | priv->r_adefered = 0; |
3092 | clear_ahg(qp); |
3093 | } |
3094 | diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c |
3095 | index e19537cf44ab..bff8707a2f1f 100644 |
3096 | --- a/drivers/infiniband/hw/mlx5/main.c |
3097 | +++ b/drivers/infiniband/hw/mlx5/main.c |
3098 | @@ -1843,6 +1843,7 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de |
3099 | &leftovers_specs[LEFTOVERS_UC].flow_attr, |
3100 | dst); |
3101 | if (IS_ERR(handler_ucast)) { |
3102 | + mlx5_del_flow_rule(handler->rule); |
3103 | kfree(handler); |
3104 | handler = handler_ucast; |
3105 | } else { |
3106 | diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h |
3107 | index bbf0a163aeab..54bb655f5332 100644 |
3108 | --- a/drivers/infiniband/hw/qib/qib.h |
3109 | +++ b/drivers/infiniband/hw/qib/qib.h |
3110 | @@ -1131,7 +1131,6 @@ extern spinlock_t qib_devs_lock; |
3111 | extern struct qib_devdata *qib_lookup(int unit); |
3112 | extern u32 qib_cpulist_count; |
3113 | extern unsigned long *qib_cpulist; |
3114 | -extern u16 qpt_mask; |
3115 | extern unsigned qib_cc_table_size; |
3116 | |
3117 | int qib_init(struct qib_devdata *, int); |
3118 | diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c |
3119 | index f9b8cd2354d1..99d31efe4c2f 100644 |
3120 | --- a/drivers/infiniband/hw/qib/qib_qp.c |
3121 | +++ b/drivers/infiniband/hw/qib/qib_qp.c |
3122 | @@ -41,14 +41,6 @@ |
3123 | |
3124 | #include "qib.h" |
3125 | |
3126 | -/* |
3127 | - * mask field which was present in now deleted qib_qpn_table |
3128 | - * is not present in rvt_qpn_table. Defining the same field |
3129 | - * as qpt_mask here instead of adding the mask field to |
3130 | - * rvt_qpn_table. |
3131 | - */ |
3132 | -u16 qpt_mask; |
3133 | - |
3134 | static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, |
3135 | struct rvt_qpn_map *map, unsigned off) |
3136 | { |
3137 | @@ -57,7 +49,7 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, |
3138 | |
3139 | static inline unsigned find_next_offset(struct rvt_qpn_table *qpt, |
3140 | struct rvt_qpn_map *map, unsigned off, |
3141 | - unsigned n) |
3142 | + unsigned n, u16 qpt_mask) |
3143 | { |
3144 | if (qpt_mask) { |
3145 | off++; |
3146 | @@ -179,6 +171,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, |
3147 | struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi); |
3148 | struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata, |
3149 | verbs_dev); |
3150 | + u16 qpt_mask = dd->qpn_mask; |
3151 | |
3152 | if (type == IB_QPT_SMI || type == IB_QPT_GSI) { |
3153 | unsigned n; |
3154 | @@ -215,7 +208,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, |
3155 | goto bail; |
3156 | } |
3157 | offset = find_next_offset(qpt, map, offset, |
3158 | - dd->n_krcv_queues); |
3159 | + dd->n_krcv_queues, qpt_mask); |
3160 | qpn = mk_qpn(qpt, map, offset); |
3161 | /* |
3162 | * This test differs from alloc_pidmap(). |
3163 | diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c |
3164 | index fd1dfbce5539..b2b845f9f7df 100644 |
3165 | --- a/drivers/infiniband/hw/qib/qib_verbs.c |
3166 | +++ b/drivers/infiniband/hw/qib/qib_verbs.c |
3167 | @@ -1606,8 +1606,6 @@ int qib_register_ib_device(struct qib_devdata *dd) |
3168 | /* Only need to initialize non-zero fields. */ |
3169 | setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev); |
3170 | |
3171 | - qpt_mask = dd->qpn_mask; |
3172 | - |
3173 | INIT_LIST_HEAD(&dev->piowait); |
3174 | INIT_LIST_HEAD(&dev->dmawait); |
3175 | INIT_LIST_HEAD(&dev->txwait); |
3176 | diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c |
3177 | index 870b4f212fbc..5911c534cc18 100644 |
3178 | --- a/drivers/infiniband/sw/rdmavt/qp.c |
3179 | +++ b/drivers/infiniband/sw/rdmavt/qp.c |
3180 | @@ -501,12 +501,9 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) |
3181 | */ |
3182 | static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
3183 | enum ib_qp_type type) |
3184 | - __releases(&qp->s_lock) |
3185 | - __releases(&qp->s_hlock) |
3186 | - __releases(&qp->r_lock) |
3187 | - __acquires(&qp->r_lock) |
3188 | - __acquires(&qp->s_hlock) |
3189 | - __acquires(&qp->s_lock) |
3190 | + __must_hold(&qp->r_lock) |
3191 | + __must_hold(&qp->s_hlock) |
3192 | + __must_hold(&qp->s_lock) |
3193 | { |
3194 | if (qp->state != IB_QPS_RESET) { |
3195 | qp->state = IB_QPS_RESET; |
3196 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c |
3197 | index 618f18436618..c65e17fae24e 100644 |
3198 | --- a/drivers/net/ethernet/intel/i40e/i40e_client.c |
3199 | +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c |
3200 | @@ -1009,7 +1009,6 @@ int i40e_unregister_client(struct i40e_client *client) |
3201 | if (!i40e_client_is_registered(client)) { |
3202 | pr_info("i40e: Client %s has not been registered\n", |
3203 | client->name); |
3204 | - mutex_unlock(&i40e_client_mutex); |
3205 | ret = -ENODEV; |
3206 | goto out; |
3207 | } |
3208 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c |
3209 | index dad15b6c66dd..c74d16409941 100644 |
3210 | --- a/drivers/net/ethernet/intel/i40e/i40e_main.c |
3211 | +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c |
3212 | @@ -7990,45 +7990,34 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) |
3213 | static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, |
3214 | u8 *lut, u16 lut_size) |
3215 | { |
3216 | - struct i40e_aqc_get_set_rss_key_data rss_key; |
3217 | struct i40e_pf *pf = vsi->back; |
3218 | struct i40e_hw *hw = &pf->hw; |
3219 | - bool pf_lut = false; |
3220 | - u8 *rss_lut; |
3221 | - int ret, i; |
3222 | - |
3223 | - memcpy(&rss_key, seed, sizeof(rss_key)); |
3224 | - |
3225 | - rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); |
3226 | - if (!rss_lut) |
3227 | - return -ENOMEM; |
3228 | - |
3229 | - /* Populate the LUT with max no. of queues in round robin fashion */ |
3230 | - for (i = 0; i < vsi->rss_table_size; i++) |
3231 | - rss_lut[i] = i % vsi->rss_size; |
3232 | + int ret = 0; |
3233 | |
3234 | - ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key); |
3235 | - if (ret) { |
3236 | - dev_info(&pf->pdev->dev, |
3237 | - "Cannot set RSS key, err %s aq_err %s\n", |
3238 | - i40e_stat_str(&pf->hw, ret), |
3239 | - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); |
3240 | - goto config_rss_aq_out; |
3241 | + if (seed) { |
3242 | + struct i40e_aqc_get_set_rss_key_data *seed_dw = |
3243 | + (struct i40e_aqc_get_set_rss_key_data *)seed; |
3244 | + ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); |
3245 | + if (ret) { |
3246 | + dev_info(&pf->pdev->dev, |
3247 | + "Cannot set RSS key, err %s aq_err %s\n", |
3248 | + i40e_stat_str(hw, ret), |
3249 | + i40e_aq_str(hw, hw->aq.asq_last_status)); |
3250 | + return ret; |
3251 | + } |
3252 | } |
3253 | + if (lut) { |
3254 | + bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; |
3255 | |
3256 | - if (vsi->type == I40E_VSI_MAIN) |
3257 | - pf_lut = true; |
3258 | - |
3259 | - ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut, |
3260 | - vsi->rss_table_size); |
3261 | - if (ret) |
3262 | - dev_info(&pf->pdev->dev, |
3263 | - "Cannot set RSS lut, err %s aq_err %s\n", |
3264 | - i40e_stat_str(&pf->hw, ret), |
3265 | - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); |
3266 | - |
3267 | -config_rss_aq_out: |
3268 | - kfree(rss_lut); |
3269 | + ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); |
3270 | + if (ret) { |
3271 | + dev_info(&pf->pdev->dev, |
3272 | + "Cannot set RSS lut, err %s aq_err %s\n", |
3273 | + i40e_stat_str(hw, ret), |
3274 | + i40e_aq_str(hw, hw->aq.asq_last_status)); |
3275 | + return ret; |
3276 | + } |
3277 | + } |
3278 | return ret; |
3279 | } |
3280 | |
3281 | diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c |
3282 | index 24c8d65bcf34..09ca63466504 100644 |
3283 | --- a/drivers/net/wireless/ath/ath10k/htt_rx.c |
3284 | +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c |
3285 | @@ -2394,6 +2394,8 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) |
3286 | skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q); |
3287 | spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags); |
3288 | |
3289 | + ath10k_mac_tx_push_pending(ar); |
3290 | + |
3291 | spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags); |
3292 | skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); |
3293 | spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); |
3294 | diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c |
3295 | index 0bbd0a00edcc..146365b93ff5 100644 |
3296 | --- a/drivers/net/wireless/ath/ath10k/mac.c |
3297 | +++ b/drivers/net/wireless/ath/ath10k/mac.c |
3298 | @@ -3777,7 +3777,9 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, |
3299 | enum ath10k_hw_txrx_mode txmode; |
3300 | enum ath10k_mac_tx_path txpath; |
3301 | struct sk_buff *skb; |
3302 | + struct ieee80211_hdr *hdr; |
3303 | size_t skb_len; |
3304 | + bool is_mgmt, is_presp; |
3305 | int ret; |
3306 | |
3307 | spin_lock_bh(&ar->htt.tx_lock); |
3308 | @@ -3801,6 +3803,22 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, |
3309 | skb_len = skb->len; |
3310 | txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); |
3311 | txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); |
3312 | + is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); |
3313 | + |
3314 | + if (is_mgmt) { |
3315 | + hdr = (struct ieee80211_hdr *)skb->data; |
3316 | + is_presp = ieee80211_is_probe_resp(hdr->frame_control); |
3317 | + |
3318 | + spin_lock_bh(&ar->htt.tx_lock); |
3319 | + ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); |
3320 | + |
3321 | + if (ret) { |
3322 | + ath10k_htt_tx_dec_pending(htt); |
3323 | + spin_unlock_bh(&ar->htt.tx_lock); |
3324 | + return ret; |
3325 | + } |
3326 | + spin_unlock_bh(&ar->htt.tx_lock); |
3327 | + } |
3328 | |
3329 | ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb); |
3330 | if (unlikely(ret)) { |
3331 | @@ -3808,6 +3826,8 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, |
3332 | |
3333 | spin_lock_bh(&ar->htt.tx_lock); |
3334 | ath10k_htt_tx_dec_pending(htt); |
3335 | + if (is_mgmt) |
3336 | + ath10k_htt_tx_mgmt_dec_pending(htt); |
3337 | spin_unlock_bh(&ar->htt.tx_lock); |
3338 | |
3339 | return ret; |
3340 | @@ -6538,7 +6558,7 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, |
3341 | goto exit; |
3342 | } |
3343 | |
3344 | - ath10k_mac_update_bss_chan_survey(ar, survey->channel); |
3345 | + ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); |
3346 | |
3347 | spin_lock_bh(&ar->data_lock); |
3348 | memcpy(survey, ar_survey, sizeof(*survey)); |
3349 | diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c |
3350 | index b29a86a26c13..28ff5cb4ec28 100644 |
3351 | --- a/drivers/net/wireless/ath/ath10k/txrx.c |
3352 | +++ b/drivers/net/wireless/ath/ath10k/txrx.c |
3353 | @@ -119,8 +119,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, |
3354 | ieee80211_tx_status(htt->ar->hw, msdu); |
3355 | /* we do not own the msdu anymore */ |
3356 | |
3357 | - ath10k_mac_tx_push_pending(ar); |
3358 | - |
3359 | return 0; |
3360 | } |
3361 | |
3362 | diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h |
3363 | index 3ef468893b3f..f67cc198bc0e 100644 |
3364 | --- a/drivers/net/wireless/ath/ath10k/wmi.h |
3365 | +++ b/drivers/net/wireless/ath/ath10k/wmi.h |
3366 | @@ -180,6 +180,7 @@ enum wmi_service { |
3367 | WMI_SERVICE_MESH_NON_11S, |
3368 | WMI_SERVICE_PEER_STATS, |
3369 | WMI_SERVICE_RESTRT_CHNL_SUPPORT, |
3370 | + WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, |
3371 | WMI_SERVICE_TX_MODE_PUSH_ONLY, |
3372 | WMI_SERVICE_TX_MODE_PUSH_PULL, |
3373 | WMI_SERVICE_TX_MODE_DYNAMIC, |
3374 | @@ -305,6 +306,7 @@ enum wmi_10_4_service { |
3375 | WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT, |
3376 | WMI_10_4_SERVICE_PEER_STATS, |
3377 | WMI_10_4_SERVICE_MESH_11S, |
3378 | + WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, |
3379 | WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY, |
3380 | WMI_10_4_SERVICE_TX_MODE_PUSH_PULL, |
3381 | WMI_10_4_SERVICE_TX_MODE_DYNAMIC, |
3382 | @@ -402,6 +404,7 @@ static inline char *wmi_service_name(int service_id) |
3383 | SVCSTR(WMI_SERVICE_MESH_NON_11S); |
3384 | SVCSTR(WMI_SERVICE_PEER_STATS); |
3385 | SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT); |
3386 | + SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT); |
3387 | SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY); |
3388 | SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL); |
3389 | SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC); |
3390 | @@ -652,6 +655,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, |
3391 | WMI_SERVICE_PEER_STATS, len); |
3392 | SVCMAP(WMI_10_4_SERVICE_MESH_11S, |
3393 | WMI_SERVICE_MESH_11S, len); |
3394 | + SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, |
3395 | + WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len); |
3396 | SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY, |
3397 | WMI_SERVICE_TX_MODE_PUSH_ONLY, len); |
3398 | SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL, |
3399 | diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c |
3400 | index 43f8f7d45ddb..adba3b003f55 100644 |
3401 | --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c |
3402 | +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c |
3403 | @@ -564,11 +564,16 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, |
3404 | __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP)); |
3405 | __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP)); |
3406 | |
3407 | - /* If OEM did not fuse address - get it from OTP */ |
3408 | - if (!mac_addr0 && !mac_addr1) { |
3409 | - mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP)); |
3410 | - mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP)); |
3411 | - } |
3412 | + iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); |
3413 | + /* |
3414 | + * If the OEM fused a valid address, use it instead of the one in the |
3415 | + * OTP |
3416 | + */ |
3417 | + if (is_valid_ether_addr(data->hw_addr)) |
3418 | + return; |
3419 | + |
3420 | + mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP)); |
3421 | + mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP)); |
3422 | |
3423 | iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); |
3424 | } |
3425 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c |
3426 | index 7e0cdbf8bf74..794c57486e02 100644 |
3427 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c |
3428 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c |
3429 | @@ -1214,9 +1214,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm) |
3430 | } |
3431 | |
3432 | /* TODO: read the budget from BIOS / Platform NVM */ |
3433 | - if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) |
3434 | + if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) { |
3435 | ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, |
3436 | mvm->cooling_dev.cur_state); |
3437 | + if (ret) |
3438 | + goto error; |
3439 | + } |
3440 | #else |
3441 | /* Initialize tx backoffs to the minimal possible */ |
3442 | iwl_mvm_tt_tx_backoff(mvm, 0); |
3443 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c |
3444 | index 69c42ce45b8a..d742d27d8de0 100644 |
3445 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c |
3446 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c |
3447 | @@ -539,6 +539,11 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
3448 | iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, |
3449 | IWL_MVM_OFFCHANNEL_QUEUE, |
3450 | IWL_MAX_TID_COUNT, 0); |
3451 | + else |
3452 | + iwl_mvm_disable_txq(mvm, |
3453 | + IWL_MVM_DQA_P2P_DEVICE_QUEUE, |
3454 | + vif->hw_queue[0], IWL_MAX_TID_COUNT, |
3455 | + 0); |
3456 | |
3457 | break; |
3458 | case NL80211_IFTYPE_AP: |
3459 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c |
3460 | index df6c32caa5f0..afb7eb60e454 100644 |
3461 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c |
3462 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c |
3463 | @@ -598,9 +598,10 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, |
3464 | |
3465 | mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
3466 | |
3467 | - /* not a data packet */ |
3468 | - if (!ieee80211_is_data_qos(hdr->frame_control) || |
3469 | - is_multicast_ether_addr(hdr->addr1)) |
3470 | + /* not a data packet or a bar */ |
3471 | + if (!ieee80211_is_back_req(hdr->frame_control) && |
3472 | + (!ieee80211_is_data_qos(hdr->frame_control) || |
3473 | + is_multicast_ether_addr(hdr->addr1))) |
3474 | return false; |
3475 | |
3476 | if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) |
3477 | @@ -624,6 +625,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, |
3478 | |
3479 | spin_lock_bh(&buffer->lock); |
3480 | |
3481 | + if (ieee80211_is_back_req(hdr->frame_control)) { |
3482 | + iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn); |
3483 | + goto drop; |
3484 | + } |
3485 | + |
3486 | /* |
3487 | * If there was a significant jump in the nssn - adjust. |
3488 | * If the SN is smaller than the NSSN it might need to first go into |
3489 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c |
3490 | index 3130b9c68a74..e933c12d80aa 100644 |
3491 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c |
3492 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c |
3493 | @@ -576,9 +576,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, |
3494 | ret); |
3495 | |
3496 | /* Make sure the SCD wrptr is correctly set before reconfiguring */ |
3497 | - iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac], |
3498 | - cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF, |
3499 | - ssn, wdg_timeout); |
3500 | + iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); |
3501 | |
3502 | /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ |
3503 | |
3504 | @@ -1270,9 +1268,31 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, |
3505 | ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); |
3506 | |
3507 | /* If DQA is supported - the queues can be disabled now */ |
3508 | - if (iwl_mvm_is_dqa_supported(mvm)) |
3509 | + if (iwl_mvm_is_dqa_supported(mvm)) { |
3510 | + u8 reserved_txq = mvm_sta->reserved_queue; |
3511 | + enum iwl_mvm_queue_status *status; |
3512 | + |
3513 | iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); |
3514 | |
3515 | + /* |
3516 | + * If no traffic has gone through the reserved TXQ - it |
3517 | + * is still marked as IWL_MVM_QUEUE_RESERVED, and |
3518 | + * should be manually marked as free again |
3519 | + */ |
3520 | + spin_lock_bh(&mvm->queue_info_lock); |
3521 | + status = &mvm->queue_info[reserved_txq].status; |
3522 | + if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && |
3523 | + (*status != IWL_MVM_QUEUE_FREE), |
3524 | + "sta_id %d reserved txq %d status %d", |
3525 | + mvm_sta->sta_id, reserved_txq, *status)) { |
3526 | + spin_unlock_bh(&mvm->queue_info_lock); |
3527 | + return -EINVAL; |
3528 | + } |
3529 | + |
3530 | + *status = IWL_MVM_QUEUE_FREE; |
3531 | + spin_unlock_bh(&mvm->queue_info_lock); |
3532 | + } |
3533 | + |
3534 | if (vif->type == NL80211_IFTYPE_STATION && |
3535 | mvmvif->ap_sta_id == mvm_sta->sta_id) { |
3536 | /* if associated - we can't remove the AP STA now */ |
3537 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c |
3538 | index b3a87a31de30..a0c1e3d07db5 100644 |
3539 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c |
3540 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c |
3541 | @@ -903,9 +903,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, |
3542 | tid = IWL_MAX_TID_COUNT; |
3543 | } |
3544 | |
3545 | - if (iwl_mvm_is_dqa_supported(mvm)) |
3546 | + if (iwl_mvm_is_dqa_supported(mvm)) { |
3547 | txq_id = mvmsta->tid_data[tid].txq_id; |
3548 | |
3549 | + if (ieee80211_is_mgmt(fc)) |
3550 | + tx_cmd->tid_tspec = IWL_TID_NON_QOS; |
3551 | + } |
3552 | + |
3553 | /* Copy MAC header from skb into command buffer */ |
3554 | memcpy(tx_cmd->hdr, hdr, hdrlen); |
3555 | |
3556 | diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c |
3557 | index 1c7b00630b90..b89596c18b41 100644 |
3558 | --- a/drivers/net/wireless/marvell/mwifiex/join.c |
3559 | +++ b/drivers/net/wireless/marvell/mwifiex/join.c |
3560 | @@ -669,9 +669,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, |
3561 | priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN, |
3562 | sizeof(priv->assoc_rsp_buf)); |
3563 | |
3564 | - memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size); |
3565 | - |
3566 | assoc_rsp->a_id = cpu_to_le16(aid); |
3567 | + memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size); |
3568 | |
3569 | if (status_code) { |
3570 | priv->adapter->dbg.num_cmd_assoc_failure++; |
3571 | diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c |
3572 | index a422f3306d4d..7e394d485f54 100644 |
3573 | --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c |
3574 | +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c |
3575 | @@ -708,7 +708,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) |
3576 | |
3577 | case EVENT_EXT_SCAN_REPORT: |
3578 | mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n"); |
3579 | - if (adapter->ext_scan && !priv->scan_aborting) |
3580 | + /* We intend to skip this event during suspend, but handle |
3581 | + * it in interface disabled case |
3582 | + */ |
3583 | + if (adapter->ext_scan && (!priv->scan_aborting || |
3584 | + !netif_running(priv->netdev))) |
3585 | ret = mwifiex_handle_event_ext_scan_report(priv, |
3586 | adapter->event_skb->data); |
3587 | |
3588 | diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c |
3589 | index 7cf26c6124d1..6005e14213ca 100644 |
3590 | --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c |
3591 | +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c |
3592 | @@ -831,8 +831,10 @@ int rt2x00usb_probe(struct usb_interface *usb_intf, |
3593 | rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev, |
3594 | sizeof(struct usb_anchor), |
3595 | GFP_KERNEL); |
3596 | - if (!rt2x00dev->anchor) |
3597 | + if (!rt2x00dev->anchor) { |
3598 | + retval = -ENOMEM; |
3599 | goto exit_free_reg; |
3600 | + } |
3601 | |
3602 | init_usb_anchor(rt2x00dev->anchor); |
3603 | return 0; |
3604 | diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c |
3605 | index 935866fe5ec2..a8b6949a8778 100644 |
3606 | --- a/drivers/nvdimm/bus.c |
3607 | +++ b/drivers/nvdimm/bus.c |
3608 | @@ -217,6 +217,8 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, |
3609 | return rc; |
3610 | if (cmd_rc < 0) |
3611 | return cmd_rc; |
3612 | + |
3613 | + nvdimm_clear_from_poison_list(nvdimm_bus, phys, len); |
3614 | return clear_err.cleared; |
3615 | } |
3616 | EXPORT_SYMBOL_GPL(nvdimm_clear_poison); |
3617 | diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c |
3618 | index 4d7bbd2df5c0..7ceba08774b6 100644 |
3619 | --- a/drivers/nvdimm/core.c |
3620 | +++ b/drivers/nvdimm/core.c |
3621 | @@ -547,11 +547,12 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region, |
3622 | } |
3623 | EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate); |
3624 | |
3625 | -static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) |
3626 | +static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length, |
3627 | + gfp_t flags) |
3628 | { |
3629 | struct nd_poison *pl; |
3630 | |
3631 | - pl = kzalloc(sizeof(*pl), GFP_KERNEL); |
3632 | + pl = kzalloc(sizeof(*pl), flags); |
3633 | if (!pl) |
3634 | return -ENOMEM; |
3635 | |
3636 | @@ -567,7 +568,7 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) |
3637 | struct nd_poison *pl; |
3638 | |
3639 | if (list_empty(&nvdimm_bus->poison_list)) |
3640 | - return add_poison(nvdimm_bus, addr, length); |
3641 | + return add_poison(nvdimm_bus, addr, length, GFP_KERNEL); |
3642 | |
3643 | /* |
3644 | * There is a chance this is a duplicate, check for those first. |
3645 | @@ -587,7 +588,7 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) |
3646 | * as any overlapping ranges will get resolved when the list is consumed |
3647 | * and converted to badblocks |
3648 | */ |
3649 | - return add_poison(nvdimm_bus, addr, length); |
3650 | + return add_poison(nvdimm_bus, addr, length, GFP_KERNEL); |
3651 | } |
3652 | |
3653 | int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) |
3654 | @@ -602,6 +603,70 @@ int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) |
3655 | } |
3656 | EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison); |
3657 | |
3658 | +void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus, |
3659 | + phys_addr_t start, unsigned int len) |
3660 | +{ |
3661 | + struct list_head *poison_list = &nvdimm_bus->poison_list; |
3662 | + u64 clr_end = start + len - 1; |
3663 | + struct nd_poison *pl, *next; |
3664 | + |
3665 | + nvdimm_bus_lock(&nvdimm_bus->dev); |
3666 | + WARN_ON_ONCE(list_empty(poison_list)); |
3667 | + |
3668 | + /* |
3669 | + * [start, clr_end] is the poison interval being cleared. |
3670 | + * [pl->start, pl_end] is the poison_list entry we're comparing |
3671 | + * the above interval against. The poison list entry may need |
3672 | + * to be modified (update either start or length), deleted, or |
3673 | + * split into two based on the overlap characteristics |
3674 | + */ |
3675 | + |
3676 | + list_for_each_entry_safe(pl, next, poison_list, list) { |
3677 | + u64 pl_end = pl->start + pl->length - 1; |
3678 | + |
3679 | + /* Skip intervals with no intersection */ |
3680 | + if (pl_end < start) |
3681 | + continue; |
3682 | + if (pl->start > clr_end) |
3683 | + continue; |
3684 | + /* Delete completely overlapped poison entries */ |
3685 | + if ((pl->start >= start) && (pl_end <= clr_end)) { |
3686 | + list_del(&pl->list); |
3687 | + kfree(pl); |
3688 | + continue; |
3689 | + } |
3690 | + /* Adjust start point of partially cleared entries */ |
3691 | + if ((start <= pl->start) && (clr_end > pl->start)) { |
3692 | + pl->length -= clr_end - pl->start + 1; |
3693 | + pl->start = clr_end + 1; |
3694 | + continue; |
3695 | + } |
3696 | + /* Adjust pl->length for partial clearing at the tail end */ |
3697 | + if ((pl->start < start) && (pl_end <= clr_end)) { |
3698 | + /* pl->start remains the same */ |
3699 | + pl->length = start - pl->start; |
3700 | + continue; |
3701 | + } |
3702 | + /* |
3703 | + * If clearing in the middle of an entry, we split it into |
3704 | + * two by modifying the current entry to represent one half of |
3705 | + * the split, and adding a new entry for the second half. |
3706 | + */ |
3707 | + if ((pl->start < start) && (pl_end > clr_end)) { |
3708 | + u64 new_start = clr_end + 1; |
3709 | + u64 new_len = pl_end - new_start + 1; |
3710 | + |
3711 | + /* Add new entry covering the right half */ |
3712 | + add_poison(nvdimm_bus, new_start, new_len, GFP_NOIO); |
3713 | + /* Adjust this entry to cover the left half */ |
3714 | + pl->length = start - pl->start; |
3715 | + continue; |
3716 | + } |
3717 | + } |
3718 | + nvdimm_bus_unlock(&nvdimm_bus->dev); |
3719 | +} |
3720 | +EXPORT_SYMBOL_GPL(nvdimm_clear_from_poison_list); |
3721 | + |
3722 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
3723 | int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) |
3724 | { |
3725 | diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c |
3726 | index ef9893fa3176..4f5e567fd7e0 100644 |
3727 | --- a/drivers/pci/host/pci-aardvark.c |
3728 | +++ b/drivers/pci/host/pci-aardvark.c |
3729 | @@ -848,7 +848,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) |
3730 | int err, res_valid = 0; |
3731 | struct device *dev = &pcie->pdev->dev; |
3732 | struct device_node *np = dev->of_node; |
3733 | - struct resource_entry *win; |
3734 | + struct resource_entry *win, *tmp; |
3735 | resource_size_t iobase; |
3736 | |
3737 | INIT_LIST_HEAD(&pcie->resources); |
3738 | @@ -862,7 +862,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) |
3739 | if (err) |
3740 | goto out_release_res; |
3741 | |
3742 | - resource_list_for_each_entry(win, &pcie->resources) { |
3743 | + resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { |
3744 | struct resource *res = win->res; |
3745 | |
3746 | switch (resource_type(res)) { |
3747 | @@ -874,9 +874,11 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) |
3748 | lower_32_bits(res->start), |
3749 | OB_PCIE_IO); |
3750 | err = pci_remap_iospace(res, iobase); |
3751 | - if (err) |
3752 | + if (err) { |
3753 | dev_warn(dev, "error %d: failed to map resource %pR\n", |
3754 | err, res); |
3755 | + resource_list_destroy_entry(win); |
3756 | + } |
3757 | break; |
3758 | case IORESOURCE_MEM: |
3759 | advk_pcie_set_ob_win(pcie, 0, |
3760 | diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c |
3761 | index 9d9d34e959b6..61eb4d46eb50 100644 |
3762 | --- a/drivers/pci/host/pci-host-common.c |
3763 | +++ b/drivers/pci/host/pci-host-common.c |
3764 | @@ -29,7 +29,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev, |
3765 | int err, res_valid = 0; |
3766 | struct device_node *np = dev->of_node; |
3767 | resource_size_t iobase; |
3768 | - struct resource_entry *win; |
3769 | + struct resource_entry *win, *tmp; |
3770 | |
3771 | err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase); |
3772 | if (err) |
3773 | @@ -39,15 +39,17 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev, |
3774 | if (err) |
3775 | return err; |
3776 | |
3777 | - resource_list_for_each_entry(win, resources) { |
3778 | + resource_list_for_each_entry_safe(win, tmp, resources) { |
3779 | struct resource *res = win->res; |
3780 | |
3781 | switch (resource_type(res)) { |
3782 | case IORESOURCE_IO: |
3783 | err = pci_remap_iospace(res, iobase); |
3784 | - if (err) |
3785 | + if (err) { |
3786 | dev_warn(dev, "error %d: failed to map resource %pR\n", |
3787 | err, res); |
3788 | + resource_list_destroy_entry(win); |
3789 | + } |
3790 | break; |
3791 | case IORESOURCE_MEM: |
3792 | res_valid |= !(res->flags & IORESOURCE_PREFETCH); |
3793 | diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c |
3794 | index 84d650d892e7..7ec1e800096a 100644 |
3795 | --- a/drivers/pci/host/pci-tegra.c |
3796 | +++ b/drivers/pci/host/pci-tegra.c |
3797 | @@ -621,7 +621,11 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) |
3798 | if (err < 0) |
3799 | return err; |
3800 | |
3801 | - pci_add_resource_offset(&sys->resources, &pcie->pio, sys->io_offset); |
3802 | + err = pci_remap_iospace(&pcie->pio, pcie->io.start); |
3803 | + if (!err) |
3804 | + pci_add_resource_offset(&sys->resources, &pcie->pio, |
3805 | + sys->io_offset); |
3806 | + |
3807 | pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); |
3808 | pci_add_resource_offset(&sys->resources, &pcie->prefetch, |
3809 | sys->mem_offset); |
3810 | @@ -631,7 +635,6 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) |
3811 | if (err < 0) |
3812 | return err; |
3813 | |
3814 | - pci_remap_iospace(&pcie->pio, pcie->io.start); |
3815 | return 1; |
3816 | } |
3817 | |
3818 | diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c |
3819 | index f234405770ab..b7dc07002f13 100644 |
3820 | --- a/drivers/pci/host/pci-versatile.c |
3821 | +++ b/drivers/pci/host/pci-versatile.c |
3822 | @@ -74,7 +74,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, |
3823 | int err, mem = 1, res_valid = 0; |
3824 | struct device_node *np = dev->of_node; |
3825 | resource_size_t iobase; |
3826 | - struct resource_entry *win; |
3827 | + struct resource_entry *win, *tmp; |
3828 | |
3829 | err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase); |
3830 | if (err) |
3831 | @@ -84,15 +84,17 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, |
3832 | if (err) |
3833 | goto out_release_res; |
3834 | |
3835 | - resource_list_for_each_entry(win, res) { |
3836 | + resource_list_for_each_entry_safe(win, tmp, res) { |
3837 | struct resource *res = win->res; |
3838 | |
3839 | switch (resource_type(res)) { |
3840 | case IORESOURCE_IO: |
3841 | err = pci_remap_iospace(res, iobase); |
3842 | - if (err) |
3843 | + if (err) { |
3844 | dev_warn(dev, "error %d: failed to map resource %pR\n", |
3845 | err, res); |
3846 | + resource_list_destroy_entry(win); |
3847 | + } |
3848 | break; |
3849 | case IORESOURCE_MEM: |
3850 | res_valid |= !(res->flags & IORESOURCE_PREFETCH); |
3851 | diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c |
3852 | index 12afce19890b..2a500f270c01 100644 |
3853 | --- a/drivers/pci/host/pcie-designware.c |
3854 | +++ b/drivers/pci/host/pcie-designware.c |
3855 | @@ -436,7 +436,7 @@ int dw_pcie_host_init(struct pcie_port *pp) |
3856 | struct resource *cfg_res; |
3857 | int i, ret; |
3858 | LIST_HEAD(res); |
3859 | - struct resource_entry *win; |
3860 | + struct resource_entry *win, *tmp; |
3861 | |
3862 | cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); |
3863 | if (cfg_res) { |
3864 | @@ -457,17 +457,20 @@ int dw_pcie_host_init(struct pcie_port *pp) |
3865 | goto error; |
3866 | |
3867 | /* Get the I/O and memory ranges from DT */ |
3868 | - resource_list_for_each_entry(win, &res) { |
3869 | + resource_list_for_each_entry_safe(win, tmp, &res) { |
3870 | switch (resource_type(win->res)) { |
3871 | case IORESOURCE_IO: |
3872 | - pp->io = win->res; |
3873 | - pp->io->name = "I/O"; |
3874 | - pp->io_size = resource_size(pp->io); |
3875 | - pp->io_bus_addr = pp->io->start - win->offset; |
3876 | - ret = pci_remap_iospace(pp->io, pp->io_base); |
3877 | - if (ret) |
3878 | + ret = pci_remap_iospace(win->res, pp->io_base); |
3879 | + if (ret) { |
3880 | dev_warn(pp->dev, "error %d: failed to map resource %pR\n", |
3881 | - ret, pp->io); |
3882 | + ret, win->res); |
3883 | + resource_list_destroy_entry(win); |
3884 | + } else { |
3885 | + pp->io = win->res; |
3886 | + pp->io->name = "I/O"; |
3887 | + pp->io_size = resource_size(pp->io); |
3888 | + pp->io_bus_addr = pp->io->start - win->offset; |
3889 | + } |
3890 | break; |
3891 | case IORESOURCE_MEM: |
3892 | pp->mem = win->res; |
3893 | diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c |
3894 | index 65db7a221509..5f7fcc971cae 100644 |
3895 | --- a/drivers/pci/host/pcie-rcar.c |
3896 | +++ b/drivers/pci/host/pcie-rcar.c |
3897 | @@ -945,7 +945,7 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci) |
3898 | struct device *dev = pci->dev; |
3899 | struct device_node *np = dev->of_node; |
3900 | resource_size_t iobase; |
3901 | - struct resource_entry *win; |
3902 | + struct resource_entry *win, *tmp; |
3903 | |
3904 | err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, &iobase); |
3905 | if (err) |
3906 | @@ -955,14 +955,17 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci) |
3907 | if (err) |
3908 | goto out_release_res; |
3909 | |
3910 | - resource_list_for_each_entry(win, &pci->resources) { |
3911 | + resource_list_for_each_entry_safe(win, tmp, &pci->resources) { |
3912 | struct resource *res = win->res; |
3913 | |
3914 | if (resource_type(res) == IORESOURCE_IO) { |
3915 | err = pci_remap_iospace(res, iobase); |
3916 | - if (err) |
3917 | + if (err) { |
3918 | dev_warn(dev, "error %d: failed to map resource %pR\n", |
3919 | err, res); |
3920 | + |
3921 | + resource_list_destroy_entry(win); |
3922 | + } |
3923 | } |
3924 | } |
3925 | |
3926 | diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c |
3927 | index 51c42d746883..775c88303017 100644 |
3928 | --- a/drivers/pinctrl/qcom/pinctrl-msm.c |
3929 | +++ b/drivers/pinctrl/qcom/pinctrl-msm.c |
3930 | @@ -156,7 +156,7 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev, |
3931 | spin_lock_irqsave(&pctrl->lock, flags); |
3932 | |
3933 | val = readl(pctrl->regs + g->ctl_reg); |
3934 | - val &= mask; |
3935 | + val &= ~mask; |
3936 | val |= i << g->mux_bit; |
3937 | writel(val, pctrl->regs + g->ctl_reg); |
3938 | |
3939 | diff --git a/drivers/power/bq24257_charger.c b/drivers/power/bq24257_charger.c |
3940 | index 1fea2c7ef97f..6fc31bdc639b 100644 |
3941 | --- a/drivers/power/bq24257_charger.c |
3942 | +++ b/drivers/power/bq24257_charger.c |
3943 | @@ -1068,6 +1068,12 @@ static int bq24257_probe(struct i2c_client *client, |
3944 | return ret; |
3945 | } |
3946 | |
3947 | + ret = bq24257_power_supply_init(bq); |
3948 | + if (ret < 0) { |
3949 | + dev_err(dev, "Failed to register power supply\n"); |
3950 | + return ret; |
3951 | + } |
3952 | + |
3953 | ret = devm_request_threaded_irq(dev, client->irq, NULL, |
3954 | bq24257_irq_handler_thread, |
3955 | IRQF_TRIGGER_FALLING | |
3956 | @@ -1078,12 +1084,6 @@ static int bq24257_probe(struct i2c_client *client, |
3957 | return ret; |
3958 | } |
3959 | |
3960 | - ret = bq24257_power_supply_init(bq); |
3961 | - if (ret < 0) { |
3962 | - dev_err(dev, "Failed to register power supply\n"); |
3963 | - return ret; |
3964 | - } |
3965 | - |
3966 | ret = sysfs_create_group(&bq->charger->dev.kobj, &bq24257_attr_group); |
3967 | if (ret < 0) { |
3968 | dev_err(dev, "Can't create sysfs entries\n"); |
3969 | diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c |
3970 | index 6b1577c73fe7..285b4006f44b 100644 |
3971 | --- a/drivers/s390/char/con3270.c |
3972 | +++ b/drivers/s390/char/con3270.c |
3973 | @@ -124,7 +124,12 @@ con3270_create_status(struct con3270 *cp) |
3974 | static void |
3975 | con3270_update_string(struct con3270 *cp, struct string *s, int nr) |
3976 | { |
3977 | - if (s->len >= cp->view.cols - 5) |
3978 | + if (s->len < 4) { |
3979 | + /* This indicates a bug, but printing a warning would |
3980 | + * cause a deadlock. */ |
3981 | + return; |
3982 | + } |
3983 | + if (s->string[s->len - 4] != TO_RA) |
3984 | return; |
3985 | raw3270_buffer_address(cp->view.dev, s->string + s->len - 3, |
3986 | cp->view.cols * (nr + 1)); |
3987 | @@ -460,11 +465,11 @@ con3270_cline_end(struct con3270 *cp) |
3988 | cp->cline->len + 4 : cp->view.cols; |
3989 | s = con3270_alloc_string(cp, size); |
3990 | memcpy(s->string, cp->cline->string, cp->cline->len); |
3991 | - if (s->len < cp->view.cols - 5) { |
3992 | + if (cp->cline->len < cp->view.cols - 5) { |
3993 | s->string[s->len - 4] = TO_RA; |
3994 | s->string[s->len - 1] = 0; |
3995 | } else { |
3996 | - while (--size > cp->cline->len) |
3997 | + while (--size >= cp->cline->len) |
3998 | s->string[size] = cp->view.ascebc[' ']; |
3999 | } |
4000 | /* Replace cline with allocated line s and reset cline. */ |
4001 | diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c |
4002 | index 940e725bde1e..11674698b36d 100644 |
4003 | --- a/drivers/s390/cio/chsc.c |
4004 | +++ b/drivers/s390/cio/chsc.c |
4005 | @@ -95,12 +95,13 @@ struct chsc_ssd_area { |
4006 | int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) |
4007 | { |
4008 | struct chsc_ssd_area *ssd_area; |
4009 | + unsigned long flags; |
4010 | int ccode; |
4011 | int ret; |
4012 | int i; |
4013 | int mask; |
4014 | |
4015 | - spin_lock_irq(&chsc_page_lock); |
4016 | + spin_lock_irqsave(&chsc_page_lock, flags); |
4017 | memset(chsc_page, 0, PAGE_SIZE); |
4018 | ssd_area = chsc_page; |
4019 | ssd_area->request.length = 0x0010; |
4020 | @@ -144,7 +145,7 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) |
4021 | ssd->fla[i] = ssd_area->fla[i]; |
4022 | } |
4023 | out: |
4024 | - spin_unlock_irq(&chsc_page_lock); |
4025 | + spin_unlock_irqrestore(&chsc_page_lock, flags); |
4026 | return ret; |
4027 | } |
4028 | |
4029 | @@ -832,9 +833,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable) |
4030 | u32 fmt : 4; |
4031 | u32 : 16; |
4032 | } __attribute__ ((packed)) *secm_area; |
4033 | + unsigned long flags; |
4034 | int ret, ccode; |
4035 | |
4036 | - spin_lock_irq(&chsc_page_lock); |
4037 | + spin_lock_irqsave(&chsc_page_lock, flags); |
4038 | memset(chsc_page, 0, PAGE_SIZE); |
4039 | secm_area = chsc_page; |
4040 | secm_area->request.length = 0x0050; |
4041 | @@ -864,7 +866,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable) |
4042 | CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", |
4043 | secm_area->response.code); |
4044 | out: |
4045 | - spin_unlock_irq(&chsc_page_lock); |
4046 | + spin_unlock_irqrestore(&chsc_page_lock, flags); |
4047 | return ret; |
4048 | } |
4049 | |
4050 | @@ -992,6 +994,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, |
4051 | |
4052 | int chsc_get_channel_measurement_chars(struct channel_path *chp) |
4053 | { |
4054 | + unsigned long flags; |
4055 | int ccode, ret; |
4056 | |
4057 | struct { |
4058 | @@ -1021,7 +1024,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) |
4059 | if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm) |
4060 | return -EINVAL; |
4061 | |
4062 | - spin_lock_irq(&chsc_page_lock); |
4063 | + spin_lock_irqsave(&chsc_page_lock, flags); |
4064 | memset(chsc_page, 0, PAGE_SIZE); |
4065 | scmc_area = chsc_page; |
4066 | scmc_area->request.length = 0x0010; |
4067 | @@ -1053,7 +1056,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) |
4068 | chsc_initialize_cmg_chars(chp, scmc_area->cmcv, |
4069 | (struct cmg_chars *) &scmc_area->data); |
4070 | out: |
4071 | - spin_unlock_irq(&chsc_page_lock); |
4072 | + spin_unlock_irqrestore(&chsc_page_lock, flags); |
4073 | return ret; |
4074 | } |
4075 | |
4076 | @@ -1134,6 +1137,7 @@ struct css_chsc_char css_chsc_characteristics; |
4077 | int __init |
4078 | chsc_determine_css_characteristics(void) |
4079 | { |
4080 | + unsigned long flags; |
4081 | int result; |
4082 | struct { |
4083 | struct chsc_header request; |
4084 | @@ -1146,7 +1150,7 @@ chsc_determine_css_characteristics(void) |
4085 | u32 chsc_char[508]; |
4086 | } __attribute__ ((packed)) *scsc_area; |
4087 | |
4088 | - spin_lock_irq(&chsc_page_lock); |
4089 | + spin_lock_irqsave(&chsc_page_lock, flags); |
4090 | memset(chsc_page, 0, PAGE_SIZE); |
4091 | scsc_area = chsc_page; |
4092 | scsc_area->request.length = 0x0010; |
4093 | @@ -1168,7 +1172,7 @@ chsc_determine_css_characteristics(void) |
4094 | CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", |
4095 | scsc_area->response.code); |
4096 | exit: |
4097 | - spin_unlock_irq(&chsc_page_lock); |
4098 | + spin_unlock_irqrestore(&chsc_page_lock, flags); |
4099 | return result; |
4100 | } |
4101 | |
4102 | diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c |
4103 | index 661bb94e2548..228b99ee0483 100644 |
4104 | --- a/drivers/scsi/cxlflash/main.c |
4105 | +++ b/drivers/scsi/cxlflash/main.c |
4106 | @@ -823,17 +823,6 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) |
4107 | } |
4108 | |
4109 | /** |
4110 | - * cxlflash_shutdown() - shutdown handler |
4111 | - * @pdev: PCI device associated with the host. |
4112 | - */ |
4113 | -static void cxlflash_shutdown(struct pci_dev *pdev) |
4114 | -{ |
4115 | - struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
4116 | - |
4117 | - notify_shutdown(cfg, false); |
4118 | -} |
4119 | - |
4120 | -/** |
4121 | * cxlflash_remove() - PCI entry point to tear down host |
4122 | * @pdev: PCI device associated with the host. |
4123 | * |
4124 | @@ -844,6 +833,11 @@ static void cxlflash_remove(struct pci_dev *pdev) |
4125 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
4126 | ulong lock_flags; |
4127 | |
4128 | + if (!pci_is_enabled(pdev)) { |
4129 | + pr_debug("%s: Device is disabled\n", __func__); |
4130 | + return; |
4131 | + } |
4132 | + |
4133 | /* If a Task Management Function is active, wait for it to complete |
4134 | * before continuing with remove. |
4135 | */ |
4136 | @@ -2685,7 +2679,7 @@ static struct pci_driver cxlflash_driver = { |
4137 | .id_table = cxlflash_pci_table, |
4138 | .probe = cxlflash_probe, |
4139 | .remove = cxlflash_remove, |
4140 | - .shutdown = cxlflash_shutdown, |
4141 | + .shutdown = cxlflash_remove, |
4142 | .err_handler = &cxlflash_err_handler, |
4143 | }; |
4144 | |
4145 | diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
4146 | index cd91a684c945..4cb79902e7a8 100644 |
4147 | --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
4148 | +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
4149 | @@ -4701,7 +4701,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) |
4150 | le16_to_cpu(mpi_reply->DevHandle)); |
4151 | mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); |
4152 | |
4153 | - if (!(ioc->logging_level & MPT_DEBUG_REPLY) && |
4154 | + if ((ioc->logging_level & MPT_DEBUG_REPLY) && |
4155 | ((scmd->sense_buffer[2] == UNIT_ATTENTION) || |
4156 | (scmd->sense_buffer[2] == MEDIUM_ERROR) || |
4157 | (scmd->sense_buffer[2] == HARDWARE_ERROR))) |
4158 | diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c |
4159 | index 9e9dadb52b3d..eec5e3f6e06b 100644 |
4160 | --- a/drivers/spi/spi-fsl-dspi.c |
4161 | +++ b/drivers/spi/spi-fsl-dspi.c |
4162 | @@ -760,7 +760,6 @@ static int dspi_remove(struct platform_device *pdev) |
4163 | /* Disconnect from the SPI framework */ |
4164 | clk_disable_unprepare(dspi->clk); |
4165 | spi_unregister_master(dspi->master); |
4166 | - spi_master_put(dspi->master); |
4167 | |
4168 | return 0; |
4169 | } |
4170 | diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig |
4171 | index 19c1572f1525..800245eac390 100644 |
4172 | --- a/drivers/staging/android/ion/Kconfig |
4173 | +++ b/drivers/staging/android/ion/Kconfig |
4174 | @@ -36,6 +36,7 @@ config ION_TEGRA |
4175 | config ION_HISI |
4176 | tristate "Ion for Hisilicon" |
4177 | depends on ARCH_HISI && ION |
4178 | + select ION_OF |
4179 | help |
4180 | Choose this option if you wish to use ion on Hisilicon Platform. |
4181 | |
4182 | diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c |
4183 | index a8822fe2bd60..f4cee811cabd 100644 |
4184 | --- a/drivers/staging/ks7010/ks_hostif.c |
4185 | +++ b/drivers/staging/ks7010/ks_hostif.c |
4186 | @@ -69,16 +69,20 @@ inline u32 get_DWORD(struct ks_wlan_private *priv) |
4187 | return data; |
4188 | } |
4189 | |
4190 | -void ks_wlan_hw_wakeup_task(struct work_struct *work) |
4191 | +static void ks_wlan_hw_wakeup_task(struct work_struct *work) |
4192 | { |
4193 | struct ks_wlan_private *priv = |
4194 | container_of(work, struct ks_wlan_private, ks_wlan_wakeup_task); |
4195 | int ps_status = atomic_read(&priv->psstatus.status); |
4196 | + long time_left; |
4197 | |
4198 | if (ps_status == PS_SNOOZE) { |
4199 | ks_wlan_hw_wakeup_request(priv); |
4200 | - if (!wait_for_completion_interruptible_timeout(&priv->psstatus.wakeup_wait, HZ / 50)) { /* 20ms timeout */ |
4201 | - DPRINTK(1, "wake up timeout !!!\n"); |
4202 | + time_left = wait_for_completion_interruptible_timeout( |
4203 | + &priv->psstatus.wakeup_wait, |
4204 | + msecs_to_jiffies(20)); |
4205 | + if (time_left <= 0) { |
4206 | + DPRINTK(1, "wake up timeout or interrupted !!!\n"); |
4207 | schedule_work(&priv->ks_wlan_wakeup_task); |
4208 | return; |
4209 | } |
4210 | @@ -1505,7 +1509,7 @@ void hostif_infrastructure_set_request(struct ks_wlan_private *priv) |
4211 | ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL); |
4212 | } |
4213 | |
4214 | -void hostif_infrastructure_set2_request(struct ks_wlan_private *priv) |
4215 | +static void hostif_infrastructure_set2_request(struct ks_wlan_private *priv) |
4216 | { |
4217 | struct hostif_infrastructure_set2_request_t *pp; |
4218 | uint16_t capability; |
4219 | diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c |
4220 | index 77485235c615..32d3a9c07aa3 100644 |
4221 | --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c |
4222 | +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c |
4223 | @@ -670,13 +670,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr) |
4224 | u8 res = _SUCCESS; |
4225 | |
4226 | |
4227 | - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); |
4228 | + ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); |
4229 | if (!ph2c) { |
4230 | res = _FAIL; |
4231 | goto exit; |
4232 | } |
4233 | |
4234 | - paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_KERNEL); |
4235 | + paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_ATOMIC); |
4236 | if (!paddbareq_parm) { |
4237 | kfree(ph2c); |
4238 | res = _FAIL; |
4239 | diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c |
4240 | index ccb4e067661a..e29d4bd5dcec 100644 |
4241 | --- a/drivers/staging/sm750fb/ddk750_mode.c |
4242 | +++ b/drivers/staging/sm750fb/ddk750_mode.c |
4243 | @@ -63,7 +63,7 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam, |
4244 | dispControl |= (CRT_DISPLAY_CTRL_CRTSELECT | CRT_DISPLAY_CTRL_RGBBIT); |
4245 | |
4246 | /* Set bit 14 of display controller */ |
4247 | - dispControl = DISPLAY_CTRL_CLOCK_PHASE; |
4248 | + dispControl |= DISPLAY_CTRL_CLOCK_PHASE; |
4249 | |
4250 | POKE32(CRT_DISPLAY_CTRL, dispControl); |
4251 | |
4252 | diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c |
4253 | index 915facbf552e..e1134a4d97f3 100644 |
4254 | --- a/drivers/uio/uio_dmem_genirq.c |
4255 | +++ b/drivers/uio/uio_dmem_genirq.c |
4256 | @@ -229,7 +229,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev) |
4257 | ++uiomem; |
4258 | } |
4259 | |
4260 | - priv->dmem_region_start = i; |
4261 | + priv->dmem_region_start = uiomem - &uioinfo->mem[0]; |
4262 | priv->num_dmem_regions = pdata->num_dynamic_regions; |
4263 | |
4264 | for (i = 0; i < pdata->num_dynamic_regions; ++i) { |
4265 | diff --git a/fs/9p/acl.c b/fs/9p/acl.c |
4266 | index 5b6a1743ea17..b3c2cc79c20d 100644 |
4267 | --- a/fs/9p/acl.c |
4268 | +++ b/fs/9p/acl.c |
4269 | @@ -276,32 +276,26 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler, |
4270 | switch (handler->flags) { |
4271 | case ACL_TYPE_ACCESS: |
4272 | if (acl) { |
4273 | - umode_t mode = inode->i_mode; |
4274 | - retval = posix_acl_equiv_mode(acl, &mode); |
4275 | - if (retval < 0) |
4276 | + struct iattr iattr; |
4277 | + |
4278 | + retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl); |
4279 | + if (retval) |
4280 | goto err_out; |
4281 | - else { |
4282 | - struct iattr iattr; |
4283 | - if (retval == 0) { |
4284 | - /* |
4285 | - * ACL can be represented |
4286 | - * by the mode bits. So don't |
4287 | - * update ACL. |
4288 | - */ |
4289 | - acl = NULL; |
4290 | - value = NULL; |
4291 | - size = 0; |
4292 | - } |
4293 | - /* Updte the mode bits */ |
4294 | - iattr.ia_mode = ((mode & S_IALLUGO) | |
4295 | - (inode->i_mode & ~S_IALLUGO)); |
4296 | - iattr.ia_valid = ATTR_MODE; |
4297 | - /* FIXME should we update ctime ? |
4298 | - * What is the following setxattr update the |
4299 | - * mode ? |
4300 | + if (!acl) { |
4301 | + /* |
4302 | + * ACL can be represented |
4303 | + * by the mode bits. So don't |
4304 | + * update ACL. |
4305 | */ |
4306 | - v9fs_vfs_setattr_dotl(dentry, &iattr); |
4307 | + value = NULL; |
4308 | + size = 0; |
4309 | } |
4310 | + iattr.ia_valid = ATTR_MODE; |
4311 | + /* FIXME should we update ctime ? |
4312 | + * What is the following setxattr update the |
4313 | + * mode ? |
4314 | + */ |
4315 | + v9fs_vfs_setattr_dotl(dentry, &iattr); |
4316 | } |
4317 | break; |
4318 | case ACL_TYPE_DEFAULT: |
4319 | diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c |
4320 | index 53bb7af4e5f0..247b8dfaf6e5 100644 |
4321 | --- a/fs/btrfs/acl.c |
4322 | +++ b/fs/btrfs/acl.c |
4323 | @@ -79,11 +79,9 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans, |
4324 | case ACL_TYPE_ACCESS: |
4325 | name = XATTR_NAME_POSIX_ACL_ACCESS; |
4326 | if (acl) { |
4327 | - ret = posix_acl_equiv_mode(acl, &inode->i_mode); |
4328 | - if (ret < 0) |
4329 | + ret = posix_acl_update_mode(inode, &inode->i_mode, &acl); |
4330 | + if (ret) |
4331 | return ret; |
4332 | - if (ret == 0) |
4333 | - acl = NULL; |
4334 | } |
4335 | ret = 0; |
4336 | break; |
4337 | diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c |
4338 | index 4f67227f69a5..d0b6b342dff9 100644 |
4339 | --- a/fs/ceph/acl.c |
4340 | +++ b/fs/ceph/acl.c |
4341 | @@ -95,11 +95,9 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
4342 | case ACL_TYPE_ACCESS: |
4343 | name = XATTR_NAME_POSIX_ACL_ACCESS; |
4344 | if (acl) { |
4345 | - ret = posix_acl_equiv_mode(acl, &new_mode); |
4346 | - if (ret < 0) |
4347 | + ret = posix_acl_update_mode(inode, &new_mode, &acl); |
4348 | + if (ret) |
4349 | goto out; |
4350 | - if (ret == 0) |
4351 | - acl = NULL; |
4352 | } |
4353 | break; |
4354 | case ACL_TYPE_DEFAULT: |
4355 | diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c |
4356 | index 42f1d1814083..e725aa0890e0 100644 |
4357 | --- a/fs/ext2/acl.c |
4358 | +++ b/fs/ext2/acl.c |
4359 | @@ -190,15 +190,11 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
4360 | case ACL_TYPE_ACCESS: |
4361 | name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS; |
4362 | if (acl) { |
4363 | - error = posix_acl_equiv_mode(acl, &inode->i_mode); |
4364 | - if (error < 0) |
4365 | + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); |
4366 | + if (error) |
4367 | return error; |
4368 | - else { |
4369 | - inode->i_ctime = CURRENT_TIME_SEC; |
4370 | - mark_inode_dirty(inode); |
4371 | - if (error == 0) |
4372 | - acl = NULL; |
4373 | - } |
4374 | + inode->i_ctime = CURRENT_TIME_SEC; |
4375 | + mark_inode_dirty(inode); |
4376 | } |
4377 | break; |
4378 | |
4379 | diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c |
4380 | index c6601a476c02..dfa519979038 100644 |
4381 | --- a/fs/ext4/acl.c |
4382 | +++ b/fs/ext4/acl.c |
4383 | @@ -193,15 +193,11 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type, |
4384 | case ACL_TYPE_ACCESS: |
4385 | name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS; |
4386 | if (acl) { |
4387 | - error = posix_acl_equiv_mode(acl, &inode->i_mode); |
4388 | - if (error < 0) |
4389 | + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); |
4390 | + if (error) |
4391 | return error; |
4392 | - else { |
4393 | - inode->i_ctime = ext4_current_time(inode); |
4394 | - ext4_mark_inode_dirty(handle, inode); |
4395 | - if (error == 0) |
4396 | - acl = NULL; |
4397 | - } |
4398 | + inode->i_ctime = ext4_current_time(inode); |
4399 | + ext4_mark_inode_dirty(handle, inode); |
4400 | } |
4401 | break; |
4402 | |
4403 | diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c |
4404 | index 4dcc9e28dc5c..31344247ce89 100644 |
4405 | --- a/fs/f2fs/acl.c |
4406 | +++ b/fs/f2fs/acl.c |
4407 | @@ -210,12 +210,10 @@ static int __f2fs_set_acl(struct inode *inode, int type, |
4408 | case ACL_TYPE_ACCESS: |
4409 | name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; |
4410 | if (acl) { |
4411 | - error = posix_acl_equiv_mode(acl, &inode->i_mode); |
4412 | - if (error < 0) |
4413 | + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); |
4414 | + if (error) |
4415 | return error; |
4416 | set_acl_inode(inode, inode->i_mode); |
4417 | - if (error == 0) |
4418 | - acl = NULL; |
4419 | } |
4420 | break; |
4421 | |
4422 | diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c |
4423 | index 363ba9e9d8d0..2524807ee070 100644 |
4424 | --- a/fs/gfs2/acl.c |
4425 | +++ b/fs/gfs2/acl.c |
4426 | @@ -92,17 +92,11 @@ int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
4427 | if (type == ACL_TYPE_ACCESS) { |
4428 | umode_t mode = inode->i_mode; |
4429 | |
4430 | - error = posix_acl_equiv_mode(acl, &mode); |
4431 | - if (error < 0) |
4432 | + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); |
4433 | + if (error) |
4434 | return error; |
4435 | - |
4436 | - if (error == 0) |
4437 | - acl = NULL; |
4438 | - |
4439 | - if (mode != inode->i_mode) { |
4440 | - inode->i_mode = mode; |
4441 | + if (mode != inode->i_mode) |
4442 | mark_inode_dirty(inode); |
4443 | - } |
4444 | } |
4445 | |
4446 | if (acl) { |
4447 | diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c |
4448 | index ab7ea2506b4d..9b92058a1240 100644 |
4449 | --- a/fs/hfsplus/posix_acl.c |
4450 | +++ b/fs/hfsplus/posix_acl.c |
4451 | @@ -65,8 +65,8 @@ int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl, |
4452 | case ACL_TYPE_ACCESS: |
4453 | xattr_name = XATTR_NAME_POSIX_ACL_ACCESS; |
4454 | if (acl) { |
4455 | - err = posix_acl_equiv_mode(acl, &inode->i_mode); |
4456 | - if (err < 0) |
4457 | + err = posix_acl_update_mode(inode, &inode->i_mode, &acl); |
4458 | + if (err) |
4459 | return err; |
4460 | } |
4461 | err = 0; |
4462 | diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c |
4463 | index bc2693d56298..2a0f2a1044c1 100644 |
4464 | --- a/fs/jffs2/acl.c |
4465 | +++ b/fs/jffs2/acl.c |
4466 | @@ -233,9 +233,10 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
4467 | case ACL_TYPE_ACCESS: |
4468 | xprefix = JFFS2_XPREFIX_ACL_ACCESS; |
4469 | if (acl) { |
4470 | - umode_t mode = inode->i_mode; |
4471 | - rc = posix_acl_equiv_mode(acl, &mode); |
4472 | - if (rc < 0) |
4473 | + umode_t mode; |
4474 | + |
4475 | + rc = posix_acl_update_mode(inode, &mode, &acl); |
4476 | + if (rc) |
4477 | return rc; |
4478 | if (inode->i_mode != mode) { |
4479 | struct iattr attr; |
4480 | @@ -247,8 +248,6 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
4481 | if (rc < 0) |
4482 | return rc; |
4483 | } |
4484 | - if (rc == 0) |
4485 | - acl = NULL; |
4486 | } |
4487 | break; |
4488 | case ACL_TYPE_DEFAULT: |
4489 | diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c |
4490 | index 21fa92ba2c19..3a1e1554a4e3 100644 |
4491 | --- a/fs/jfs/acl.c |
4492 | +++ b/fs/jfs/acl.c |
4493 | @@ -78,13 +78,11 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type, |
4494 | case ACL_TYPE_ACCESS: |
4495 | ea_name = XATTR_NAME_POSIX_ACL_ACCESS; |
4496 | if (acl) { |
4497 | - rc = posix_acl_equiv_mode(acl, &inode->i_mode); |
4498 | - if (rc < 0) |
4499 | + rc = posix_acl_update_mode(inode, &inode->i_mode, &acl); |
4500 | + if (rc) |
4501 | return rc; |
4502 | inode->i_ctime = CURRENT_TIME; |
4503 | mark_inode_dirty(inode); |
4504 | - if (rc == 0) |
4505 | - acl = NULL; |
4506 | } |
4507 | break; |
4508 | case ACL_TYPE_DEFAULT: |
4509 | diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c |
4510 | index 2162434728c0..164307b99405 100644 |
4511 | --- a/fs/ocfs2/acl.c |
4512 | +++ b/fs/ocfs2/acl.c |
4513 | @@ -241,13 +241,11 @@ int ocfs2_set_acl(handle_t *handle, |
4514 | case ACL_TYPE_ACCESS: |
4515 | name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS; |
4516 | if (acl) { |
4517 | - umode_t mode = inode->i_mode; |
4518 | - ret = posix_acl_equiv_mode(acl, &mode); |
4519 | - if (ret < 0) |
4520 | - return ret; |
4521 | + umode_t mode; |
4522 | |
4523 | - if (ret == 0) |
4524 | - acl = NULL; |
4525 | + ret = posix_acl_update_mode(inode, &mode, &acl); |
4526 | + if (ret) |
4527 | + return ret; |
4528 | |
4529 | ret = ocfs2_acl_set_mode(inode, di_bh, |
4530 | handle, mode); |
4531 | diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c |
4532 | index 28f2195cd798..7a3754488312 100644 |
4533 | --- a/fs/orangefs/acl.c |
4534 | +++ b/fs/orangefs/acl.c |
4535 | @@ -73,14 +73,11 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
4536 | case ACL_TYPE_ACCESS: |
4537 | name = XATTR_NAME_POSIX_ACL_ACCESS; |
4538 | if (acl) { |
4539 | - umode_t mode = inode->i_mode; |
4540 | - /* |
4541 | - * can we represent this with the traditional file |
4542 | - * mode permission bits? |
4543 | - */ |
4544 | - error = posix_acl_equiv_mode(acl, &mode); |
4545 | - if (error < 0) { |
4546 | - gossip_err("%s: posix_acl_equiv_mode err: %d\n", |
4547 | + umode_t mode; |
4548 | + |
4549 | + error = posix_acl_update_mode(inode, &mode, &acl); |
4550 | + if (error) { |
4551 | + gossip_err("%s: posix_acl_update_mode err: %d\n", |
4552 | __func__, |
4553 | error); |
4554 | return error; |
4555 | @@ -90,8 +87,6 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
4556 | SetModeFlag(orangefs_inode); |
4557 | inode->i_mode = mode; |
4558 | mark_inode_dirty_sync(inode); |
4559 | - if (error == 0) |
4560 | - acl = NULL; |
4561 | } |
4562 | break; |
4563 | case ACL_TYPE_DEFAULT: |
4564 | diff --git a/fs/posix_acl.c b/fs/posix_acl.c |
4565 | index 59d47ab0791a..bfc3ec388322 100644 |
4566 | --- a/fs/posix_acl.c |
4567 | +++ b/fs/posix_acl.c |
4568 | @@ -626,6 +626,37 @@ no_mem: |
4569 | } |
4570 | EXPORT_SYMBOL_GPL(posix_acl_create); |
4571 | |
4572 | +/** |
4573 | + * posix_acl_update_mode - update mode in set_acl |
4574 | + * |
4575 | + * Update the file mode when setting an ACL: compute the new file permission |
4576 | + * bits based on the ACL. In addition, if the ACL is equivalent to the new |
4577 | + * file mode, set *acl to NULL to indicate that no ACL should be set. |
4578 | + * |
4579 | + * As with chmod, clear the setgit bit if the caller is not in the owning group |
4580 | + * or capable of CAP_FSETID (see inode_change_ok). |
4581 | + * |
4582 | + * Called from set_acl inode operations. |
4583 | + */ |
4584 | +int posix_acl_update_mode(struct inode *inode, umode_t *mode_p, |
4585 | + struct posix_acl **acl) |
4586 | +{ |
4587 | + umode_t mode = inode->i_mode; |
4588 | + int error; |
4589 | + |
4590 | + error = posix_acl_equiv_mode(*acl, &mode); |
4591 | + if (error < 0) |
4592 | + return error; |
4593 | + if (error == 0) |
4594 | + *acl = NULL; |
4595 | + if (!in_group_p(inode->i_gid) && |
4596 | + !capable_wrt_inode_uidgid(inode, CAP_FSETID)) |
4597 | + mode &= ~S_ISGID; |
4598 | + *mode_p = mode; |
4599 | + return 0; |
4600 | +} |
4601 | +EXPORT_SYMBOL(posix_acl_update_mode); |
4602 | + |
4603 | /* |
4604 | * Fix up the uids and gids in posix acl extended attributes in place. |
4605 | */ |
4606 | diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c |
4607 | index dbed42f755e0..27376681c640 100644 |
4608 | --- a/fs/reiserfs/xattr_acl.c |
4609 | +++ b/fs/reiserfs/xattr_acl.c |
4610 | @@ -242,13 +242,9 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode, |
4611 | case ACL_TYPE_ACCESS: |
4612 | name = XATTR_NAME_POSIX_ACL_ACCESS; |
4613 | if (acl) { |
4614 | - error = posix_acl_equiv_mode(acl, &inode->i_mode); |
4615 | - if (error < 0) |
4616 | + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); |
4617 | + if (error) |
4618 | return error; |
4619 | - else { |
4620 | - if (error == 0) |
4621 | - acl = NULL; |
4622 | - } |
4623 | } |
4624 | break; |
4625 | case ACL_TYPE_DEFAULT: |
4626 | diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c |
4627 | index b6e527b8eccb..8a0dec89ca56 100644 |
4628 | --- a/fs/xfs/xfs_acl.c |
4629 | +++ b/fs/xfs/xfs_acl.c |
4630 | @@ -257,16 +257,11 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
4631 | return error; |
4632 | |
4633 | if (type == ACL_TYPE_ACCESS) { |
4634 | - umode_t mode = inode->i_mode; |
4635 | - error = posix_acl_equiv_mode(acl, &mode); |
4636 | - |
4637 | - if (error <= 0) { |
4638 | - acl = NULL; |
4639 | - |
4640 | - if (error < 0) |
4641 | - return error; |
4642 | - } |
4643 | + umode_t mode; |
4644 | |
4645 | + error = posix_acl_update_mode(inode, &mode, &acl); |
4646 | + if (error) |
4647 | + return error; |
4648 | error = xfs_set_mode(inode, mode); |
4649 | if (error) |
4650 | return error; |
4651 | diff --git a/include/drm/drmP.h b/include/drm/drmP.h |
4652 | index d3778652e462..988903a59007 100644 |
4653 | --- a/include/drm/drmP.h |
4654 | +++ b/include/drm/drmP.h |
4655 | @@ -938,7 +938,8 @@ static inline int drm_debugfs_remove_files(const struct drm_info_list *files, |
4656 | #endif |
4657 | |
4658 | extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev, |
4659 | - struct drm_gem_object *obj, int flags); |
4660 | + struct drm_gem_object *obj, |
4661 | + int flags); |
4662 | extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, |
4663 | struct drm_file *file_priv, uint32_t handle, uint32_t flags, |
4664 | int *prime_fd); |
4665 | diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h |
4666 | index c26d4638f665..fe99e6f956e2 100644 |
4667 | --- a/include/linux/hugetlb.h |
4668 | +++ b/include/linux/hugetlb.h |
4669 | @@ -450,8 +450,8 @@ static inline pgoff_t basepage_index(struct page *page) |
4670 | return __basepage_index(page); |
4671 | } |
4672 | |
4673 | -extern void dissolve_free_huge_pages(unsigned long start_pfn, |
4674 | - unsigned long end_pfn); |
4675 | +extern int dissolve_free_huge_pages(unsigned long start_pfn, |
4676 | + unsigned long end_pfn); |
4677 | static inline bool hugepage_migration_supported(struct hstate *h) |
4678 | { |
4679 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
4680 | @@ -518,7 +518,7 @@ static inline pgoff_t basepage_index(struct page *page) |
4681 | { |
4682 | return page->index; |
4683 | } |
4684 | -#define dissolve_free_huge_pages(s, e) do {} while (0) |
4685 | +#define dissolve_free_huge_pages(s, e) 0 |
4686 | #define hugepage_migration_supported(h) false |
4687 | |
4688 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
4689 | diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h |
4690 | index b519e137b9b7..bbfce62a0bd7 100644 |
4691 | --- a/include/linux/libnvdimm.h |
4692 | +++ b/include/linux/libnvdimm.h |
4693 | @@ -129,6 +129,8 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( |
4694 | } |
4695 | |
4696 | int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length); |
4697 | +void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus, |
4698 | + phys_addr_t start, unsigned int len); |
4699 | struct nvdimm_bus *nvdimm_bus_register(struct device *parent, |
4700 | struct nvdimm_bus_descriptor *nfit_desc); |
4701 | void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); |
4702 | diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h |
4703 | index d5d3d741f028..bf1046d0397b 100644 |
4704 | --- a/include/linux/posix_acl.h |
4705 | +++ b/include/linux/posix_acl.h |
4706 | @@ -93,6 +93,7 @@ extern int set_posix_acl(struct inode *, int, struct posix_acl *); |
4707 | extern int posix_acl_chmod(struct inode *, umode_t); |
4708 | extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, |
4709 | struct posix_acl **); |
4710 | +extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **); |
4711 | |
4712 | extern int simple_set_acl(struct inode *, struct posix_acl *, int); |
4713 | extern int simple_acl_create(struct inode *, struct inode *); |
4714 | diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c |
4715 | index abd286afbd27..a4775f3451b9 100644 |
4716 | --- a/kernel/irq/generic-chip.c |
4717 | +++ b/kernel/irq/generic-chip.c |
4718 | @@ -411,8 +411,29 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, |
4719 | } |
4720 | EXPORT_SYMBOL_GPL(irq_map_generic_chip); |
4721 | |
4722 | +static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq) |
4723 | +{ |
4724 | + struct irq_data *data = irq_domain_get_irq_data(d, virq); |
4725 | + struct irq_domain_chip_generic *dgc = d->gc; |
4726 | + unsigned int hw_irq = data->hwirq; |
4727 | + struct irq_chip_generic *gc; |
4728 | + int irq_idx; |
4729 | + |
4730 | + gc = irq_get_domain_generic_chip(d, hw_irq); |
4731 | + if (!gc) |
4732 | + return; |
4733 | + |
4734 | + irq_idx = hw_irq % dgc->irqs_per_chip; |
4735 | + |
4736 | + clear_bit(irq_idx, &gc->installed); |
4737 | + irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL, |
4738 | + NULL); |
4739 | + |
4740 | +} |
4741 | + |
4742 | struct irq_domain_ops irq_generic_chip_ops = { |
4743 | .map = irq_map_generic_chip, |
4744 | + .unmap = irq_unmap_generic_chip, |
4745 | .xlate = irq_domain_xlate_onetwocell, |
4746 | }; |
4747 | EXPORT_SYMBOL_GPL(irq_generic_chip_ops); |
4748 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
4749 | index 603bdd01ec2c..770d83eb3f48 100644 |
4750 | --- a/mm/hugetlb.c |
4751 | +++ b/mm/hugetlb.c |
4752 | @@ -1437,22 +1437,32 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, |
4753 | |
4754 | /* |
4755 | * Dissolve a given free hugepage into free buddy pages. This function does |
4756 | - * nothing for in-use (including surplus) hugepages. |
4757 | + * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the |
4758 | + * number of free hugepages would be reduced below the number of reserved |
4759 | + * hugepages. |
4760 | */ |
4761 | -static void dissolve_free_huge_page(struct page *page) |
4762 | +static int dissolve_free_huge_page(struct page *page) |
4763 | { |
4764 | + int rc = 0; |
4765 | + |
4766 | spin_lock(&hugetlb_lock); |
4767 | if (PageHuge(page) && !page_count(page)) { |
4768 | struct page *head = compound_head(page); |
4769 | struct hstate *h = page_hstate(head); |
4770 | int nid = page_to_nid(head); |
4771 | + if (h->free_huge_pages - h->resv_huge_pages == 0) { |
4772 | + rc = -EBUSY; |
4773 | + goto out; |
4774 | + } |
4775 | list_del(&head->lru); |
4776 | h->free_huge_pages--; |
4777 | h->free_huge_pages_node[nid]--; |
4778 | h->max_huge_pages--; |
4779 | update_and_free_page(h, head); |
4780 | } |
4781 | +out: |
4782 | spin_unlock(&hugetlb_lock); |
4783 | + return rc; |
4784 | } |
4785 | |
4786 | /* |
4787 | @@ -1460,16 +1470,28 @@ static void dissolve_free_huge_page(struct page *page) |
4788 | * make specified memory blocks removable from the system. |
4789 | * Note that this will dissolve a free gigantic hugepage completely, if any |
4790 | * part of it lies within the given range. |
4791 | + * Also note that if dissolve_free_huge_page() returns with an error, all |
4792 | + * free hugepages that were dissolved before that error are lost. |
4793 | */ |
4794 | -void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) |
4795 | +int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) |
4796 | { |
4797 | unsigned long pfn; |
4798 | + struct page *page; |
4799 | + int rc = 0; |
4800 | |
4801 | if (!hugepages_supported()) |
4802 | - return; |
4803 | + return rc; |
4804 | + |
4805 | + for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { |
4806 | + page = pfn_to_page(pfn); |
4807 | + if (PageHuge(page) && !page_count(page)) { |
4808 | + rc = dissolve_free_huge_page(page); |
4809 | + if (rc) |
4810 | + break; |
4811 | + } |
4812 | + } |
4813 | |
4814 | - for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) |
4815 | - dissolve_free_huge_page(pfn_to_page(pfn)); |
4816 | + return rc; |
4817 | } |
4818 | |
4819 | /* |
4820 | diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c |
4821 | index 9d29ba0f7192..962927309b6e 100644 |
4822 | --- a/mm/memory_hotplug.c |
4823 | +++ b/mm/memory_hotplug.c |
4824 | @@ -1945,7 +1945,9 @@ repeat: |
4825 | * dissolve free hugepages in the memory block before doing offlining |
4826 | * actually in order to make hugetlbfs's object counting consistent. |
4827 | */ |
4828 | - dissolve_free_huge_pages(start_pfn, end_pfn); |
4829 | + ret = dissolve_free_huge_pages(start_pfn, end_pfn); |
4830 | + if (ret) |
4831 | + goto failed_removal; |
4832 | /* check again */ |
4833 | offlined_pages = check_pages_isolated(start_pfn, end_pfn); |
4834 | if (offlined_pages < 0) { |
4835 | diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c |
4836 | index 3774b117d365..49b65d481949 100644 |
4837 | --- a/sound/soc/intel/boards/bxt_da7219_max98357a.c |
4838 | +++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c |
4839 | @@ -255,7 +255,7 @@ static struct snd_soc_ops broxton_da7219_ops = { |
4840 | /* broxton digital audio interface glue - connects codec <--> CPU */ |
4841 | static struct snd_soc_dai_link broxton_dais[] = { |
4842 | /* Front End DAI links */ |
4843 | - [BXT_DPCM_AUDIO_PB] |
4844 | + [BXT_DPCM_AUDIO_PB] = |
4845 | { |
4846 | .name = "Bxt Audio Port", |
4847 | .stream_name = "Audio", |
4848 | @@ -271,7 +271,7 @@ static struct snd_soc_dai_link broxton_dais[] = { |
4849 | .dpcm_playback = 1, |
4850 | .ops = &broxton_da7219_fe_ops, |
4851 | }, |
4852 | - [BXT_DPCM_AUDIO_CP] |
4853 | + [BXT_DPCM_AUDIO_CP] = |
4854 | { |
4855 | .name = "Bxt Audio Capture Port", |
4856 | .stream_name = "Audio Record", |
4857 | @@ -286,7 +286,7 @@ static struct snd_soc_dai_link broxton_dais[] = { |
4858 | .dpcm_capture = 1, |
4859 | .ops = &broxton_da7219_fe_ops, |
4860 | }, |
4861 | - [BXT_DPCM_AUDIO_REF_CP] |
4862 | + [BXT_DPCM_AUDIO_REF_CP] = |
4863 | { |
4864 | .name = "Bxt Audio Reference cap", |
4865 | .stream_name = "Refcap", |
4866 | @@ -300,7 +300,7 @@ static struct snd_soc_dai_link broxton_dais[] = { |
4867 | .nonatomic = 1, |
4868 | .dynamic = 1, |
4869 | }, |
4870 | - [BXT_DPCM_AUDIO_HDMI1_PB] |
4871 | + [BXT_DPCM_AUDIO_HDMI1_PB] = |
4872 | { |
4873 | .name = "Bxt HDMI Port1", |
4874 | .stream_name = "Hdmi1", |
4875 | @@ -313,7 +313,7 @@ static struct snd_soc_dai_link broxton_dais[] = { |
4876 | .nonatomic = 1, |
4877 | .dynamic = 1, |
4878 | }, |
4879 | - [BXT_DPCM_AUDIO_HDMI2_PB] |
4880 | + [BXT_DPCM_AUDIO_HDMI2_PB] = |
4881 | { |
4882 | .name = "Bxt HDMI Port2", |
4883 | .stream_name = "Hdmi2", |
4884 | @@ -326,7 +326,7 @@ static struct snd_soc_dai_link broxton_dais[] = { |
4885 | .nonatomic = 1, |
4886 | .dynamic = 1, |
4887 | }, |
4888 | - [BXT_DPCM_AUDIO_HDMI3_PB] |
4889 | + [BXT_DPCM_AUDIO_HDMI3_PB] = |
4890 | { |
4891 | .name = "Bxt HDMI Port3", |
4892 | .stream_name = "Hdmi3", |
4893 | diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c |
4894 | index 253d7bfbf511..d610bdca1608 100644 |
4895 | --- a/sound/soc/intel/boards/bxt_rt298.c |
4896 | +++ b/sound/soc/intel/boards/bxt_rt298.c |
4897 | @@ -271,7 +271,7 @@ static const struct snd_soc_ops broxton_rt286_fe_ops = { |
4898 | /* broxton digital audio interface glue - connects codec <--> CPU */ |
4899 | static struct snd_soc_dai_link broxton_rt298_dais[] = { |
4900 | /* Front End DAI links */ |
4901 | - [BXT_DPCM_AUDIO_PB] |
4902 | + [BXT_DPCM_AUDIO_PB] = |
4903 | { |
4904 | .name = "Bxt Audio Port", |
4905 | .stream_name = "Audio", |
4906 | @@ -286,7 +286,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = { |
4907 | .dpcm_playback = 1, |
4908 | .ops = &broxton_rt286_fe_ops, |
4909 | }, |
4910 | - [BXT_DPCM_AUDIO_CP] |
4911 | + [BXT_DPCM_AUDIO_CP] = |
4912 | { |
4913 | .name = "Bxt Audio Capture Port", |
4914 | .stream_name = "Audio Record", |
4915 | @@ -300,7 +300,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = { |
4916 | .dpcm_capture = 1, |
4917 | .ops = &broxton_rt286_fe_ops, |
4918 | }, |
4919 | - [BXT_DPCM_AUDIO_REF_CP] |
4920 | + [BXT_DPCM_AUDIO_REF_CP] = |
4921 | { |
4922 | .name = "Bxt Audio Reference cap", |
4923 | .stream_name = "refcap", |
4924 | @@ -313,7 +313,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = { |
4925 | .nonatomic = 1, |
4926 | .dynamic = 1, |
4927 | }, |
4928 | - [BXT_DPCM_AUDIO_DMIC_CP] |
4929 | + [BXT_DPCM_AUDIO_DMIC_CP] = |
4930 | { |
4931 | .name = "Bxt Audio DMIC cap", |
4932 | .stream_name = "dmiccap", |
4933 | @@ -327,7 +327,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = { |
4934 | .dynamic = 1, |
4935 | .ops = &broxton_dmic_ops, |
4936 | }, |
4937 | - [BXT_DPCM_AUDIO_HDMI1_PB] |
4938 | + [BXT_DPCM_AUDIO_HDMI1_PB] = |
4939 | { |
4940 | .name = "Bxt HDMI Port1", |
4941 | .stream_name = "Hdmi1", |
4942 | @@ -340,7 +340,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = { |
4943 | .nonatomic = 1, |
4944 | .dynamic = 1, |
4945 | }, |
4946 | - [BXT_DPCM_AUDIO_HDMI2_PB] |
4947 | + [BXT_DPCM_AUDIO_HDMI2_PB] = |
4948 | { |
4949 | .name = "Bxt HDMI Port2", |
4950 | .stream_name = "Hdmi2", |
4951 | @@ -353,7 +353,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = { |
4952 | .nonatomic = 1, |
4953 | .dynamic = 1, |
4954 | }, |
4955 | - [BXT_DPCM_AUDIO_HDMI3_PB] |
4956 | + [BXT_DPCM_AUDIO_HDMI3_PB] = |
4957 | { |
4958 | .name = "Bxt HDMI Port3", |
4959 | .stream_name = "Hdmi3", |
4960 | diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c |
4961 | index d908ff8f9755..801082fdc3e0 100644 |
4962 | --- a/sound/soc/soc-dapm.c |
4963 | +++ b/sound/soc/soc-dapm.c |
4964 | @@ -823,6 +823,7 @@ static int dapm_create_or_share_kcontrol(struct snd_soc_dapm_widget *w, |
4965 | case snd_soc_dapm_switch: |
4966 | case snd_soc_dapm_mixer: |
4967 | case snd_soc_dapm_pga: |
4968 | + case snd_soc_dapm_out_drv: |
4969 | wname_in_long_name = true; |
4970 | kcname_in_long_name = true; |
4971 | break; |
4972 | @@ -3049,6 +3050,9 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol, |
4973 | } |
4974 | mutex_unlock(&card->dapm_mutex); |
4975 | |
4976 | + if (ret) |
4977 | + return ret; |
4978 | + |
4979 | if (invert) |
4980 | ucontrol->value.integer.value[0] = max - val; |
4981 | else |
4982 | @@ -3200,7 +3204,7 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol, |
4983 | if (e->shift_l != e->shift_r) { |
4984 | if (item[1] > e->items) |
4985 | return -EINVAL; |
4986 | - val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_l; |
4987 | + val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r; |
4988 | mask |= e->mask << e->shift_r; |
4989 | } |
4990 | |
4991 | diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c |
4992 | index ee7f15aa46fc..34069076bf8e 100644 |
4993 | --- a/sound/soc/soc-topology.c |
4994 | +++ b/sound/soc/soc-topology.c |
4995 | @@ -1475,6 +1475,7 @@ widget: |
4996 | if (widget == NULL) { |
4997 | dev_err(tplg->dev, "ASoC: failed to create widget %s controls\n", |
4998 | w->name); |
4999 | + ret = -ENOMEM; |
5000 | goto hdr_err; |
5001 | } |
5002 | |
5003 | diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h |
5004 | index 7ed72a475c57..e4b717e9eb6c 100644 |
5005 | --- a/tools/perf/perf-sys.h |
5006 | +++ b/tools/perf/perf-sys.h |
5007 | @@ -20,7 +20,6 @@ |
5008 | #endif |
5009 | |
5010 | #ifdef __powerpc__ |
5011 | -#include "../../arch/powerpc/include/uapi/asm/unistd.h" |
5012 | #define CPUINFO_PROC {"cpu"} |
5013 | #endif |
5014 | |
5015 | diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c |
5016 | index 13d414384739..7aee954b307f 100644 |
5017 | --- a/tools/perf/ui/browsers/hists.c |
5018 | +++ b/tools/perf/ui/browsers/hists.c |
5019 | @@ -1091,7 +1091,6 @@ static int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...) |
5020 | ret = scnprintf(hpp->buf, hpp->size, fmt, len, percent); |
5021 | ui_browser__printf(arg->b, "%s", hpp->buf); |
5022 | |
5023 | - advance_hpp(hpp, ret); |
5024 | return ret; |
5025 | } |
5026 | |
5027 | @@ -2046,6 +2045,7 @@ void hist_browser__init(struct hist_browser *browser, |
5028 | struct hists *hists) |
5029 | { |
5030 | struct perf_hpp_fmt *fmt; |
5031 | + struct perf_hpp_list_node *node; |
5032 | |
5033 | browser->hists = hists; |
5034 | browser->b.refresh = hist_browser__refresh; |
5035 | @@ -2058,6 +2058,11 @@ void hist_browser__init(struct hist_browser *browser, |
5036 | perf_hpp__reset_width(fmt, hists); |
5037 | ++browser->b.columns; |
5038 | } |
5039 | + /* hierarchy entries have their own hpp list */ |
5040 | + list_for_each_entry(node, &hists->hpp_formats, list) { |
5041 | + perf_hpp_list__for_each_format(&node->hpp, fmt) |
5042 | + perf_hpp__reset_width(fmt, hists); |
5043 | + } |
5044 | } |
5045 | |
5046 | struct hist_browser *hist_browser__new(struct hists *hists) |
5047 | diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c |
5048 | index f04a63112079..d0cae75408ff 100644 |
5049 | --- a/tools/perf/ui/stdio/hist.c |
5050 | +++ b/tools/perf/ui/stdio/hist.c |
5051 | @@ -628,14 +628,6 @@ hists__fprintf_hierarchy_headers(struct hists *hists, |
5052 | struct perf_hpp *hpp, |
5053 | FILE *fp) |
5054 | { |
5055 | - struct perf_hpp_list_node *fmt_node; |
5056 | - struct perf_hpp_fmt *fmt; |
5057 | - |
5058 | - list_for_each_entry(fmt_node, &hists->hpp_formats, list) { |
5059 | - perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) |
5060 | - perf_hpp__reset_width(fmt, hists); |
5061 | - } |
5062 | - |
5063 | return print_hierarchy_header(hists, hpp, symbol_conf.field_sep, fp); |
5064 | } |
5065 | |
5066 | @@ -714,6 +706,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, |
5067 | bool use_callchain) |
5068 | { |
5069 | struct perf_hpp_fmt *fmt; |
5070 | + struct perf_hpp_list_node *node; |
5071 | struct rb_node *nd; |
5072 | size_t ret = 0; |
5073 | const char *sep = symbol_conf.field_sep; |
5074 | @@ -726,6 +719,11 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, |
5075 | |
5076 | hists__for_each_format(hists, fmt) |
5077 | perf_hpp__reset_width(fmt, hists); |
5078 | + /* hierarchy entries have their own hpp list */ |
5079 | + list_for_each_entry(node, &hists->hpp_formats, list) { |
5080 | + perf_hpp_list__for_each_format(&node->hpp, fmt) |
5081 | + perf_hpp__reset_width(fmt, hists); |
5082 | + } |
5083 | |
5084 | if (symbol_conf.col_width_list_str) |
5085 | perf_hpp__set_user_width(symbol_conf.col_width_list_str); |
5086 | diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c |
5087 | index 4f979bb27b6c..7123f4de32cc 100644 |
5088 | --- a/tools/perf/util/data-convert-bt.c |
5089 | +++ b/tools/perf/util/data-convert-bt.c |
5090 | @@ -437,7 +437,7 @@ add_bpf_output_values(struct bt_ctf_event_class *event_class, |
5091 | int ret; |
5092 | |
5093 | if (nr_elements * sizeof(u32) != raw_size) |
5094 | - pr_warning("Incorrect raw_size (%u) in bpf output event, skip %lu bytes\n", |
5095 | + pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n", |
5096 | raw_size, nr_elements * sizeof(u32) - raw_size); |
5097 | |
5098 | len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len"); |
5099 | diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c |
5100 | index a811c13a74d6..f77b3167585c 100644 |
5101 | --- a/tools/perf/util/symbol-elf.c |
5102 | +++ b/tools/perf/util/symbol-elf.c |
5103 | @@ -1113,9 +1113,8 @@ new_symbol: |
5104 | * For misannotated, zeroed, ASM function sizes. |
5105 | */ |
5106 | if (nr > 0) { |
5107 | - if (!symbol_conf.allow_aliases) |
5108 | - symbols__fixup_duplicate(&dso->symbols[map->type]); |
5109 | symbols__fixup_end(&dso->symbols[map->type]); |
5110 | + symbols__fixup_duplicate(&dso->symbols[map->type]); |
5111 | if (kmap) { |
5112 | /* |
5113 | * We need to fixup this here too because we create new |
5114 | diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c |
5115 | index 37e8d20ae03e..f29f336ed17b 100644 |
5116 | --- a/tools/perf/util/symbol.c |
5117 | +++ b/tools/perf/util/symbol.c |
5118 | @@ -152,6 +152,9 @@ void symbols__fixup_duplicate(struct rb_root *symbols) |
5119 | struct rb_node *nd; |
5120 | struct symbol *curr, *next; |
5121 | |
5122 | + if (symbol_conf.allow_aliases) |
5123 | + return; |
5124 | + |
5125 | nd = rb_first(symbols); |
5126 | |
5127 | while (nd) { |
5128 | @@ -1234,8 +1237,8 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename, |
5129 | if (kallsyms__delta(map, filename, &delta)) |
5130 | return -1; |
5131 | |
5132 | - symbols__fixup_duplicate(&dso->symbols[map->type]); |
5133 | symbols__fixup_end(&dso->symbols[map->type]); |
5134 | + symbols__fixup_duplicate(&dso->symbols[map->type]); |
5135 | |
5136 | if (dso->kernel == DSO_TYPE_GUEST_KERNEL) |
5137 | dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; |