Contents of /trunk/kernel-magellan/patches-4.8/0108-4.8.9-all-fixes.patch
Parent Directory | Revision Log
Revision 2848 -
(show annotations)
(download)
Tue Nov 22 13:19:49 2016 UTC (7 years, 10 months ago) by niro
File size: 105964 byte(s)
Tue Nov 22 13:19:49 2016 UTC (7 years, 10 months ago) by niro
File size: 105964 byte(s)
-linux-4.8.9
1 | diff --git a/Makefile b/Makefile |
2 | index 8f18daa2c76a..c1519ab85258 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 8 |
8 | -SUBLEVEL = 8 |
9 | +SUBLEVEL = 9 |
10 | EXTRAVERSION = |
11 | NAME = Psychotic Stoned Sheep |
12 | |
13 | diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c |
14 | index f927b8dc6edd..c10390d1ddb6 100644 |
15 | --- a/arch/arc/kernel/time.c |
16 | +++ b/arch/arc/kernel/time.c |
17 | @@ -152,14 +152,17 @@ static cycle_t arc_read_rtc(struct clocksource *cs) |
18 | cycle_t full; |
19 | } stamp; |
20 | |
21 | - |
22 | - __asm__ __volatile( |
23 | - "1: \n" |
24 | - " lr %0, [AUX_RTC_LOW] \n" |
25 | - " lr %1, [AUX_RTC_HIGH] \n" |
26 | - " lr %2, [AUX_RTC_CTRL] \n" |
27 | - " bbit0.nt %2, 31, 1b \n" |
28 | - : "=r" (stamp.low), "=r" (stamp.high), "=r" (status)); |
29 | + /* |
30 | + * hardware has an internal state machine which tracks readout of |
31 | + * low/high and updates the CTRL.status if |
32 | + * - interrupt/exception taken between the two reads |
33 | + * - high increments after low has been read |
34 | + */ |
35 | + do { |
36 | + stamp.low = read_aux_reg(AUX_RTC_LOW); |
37 | + stamp.high = read_aux_reg(AUX_RTC_HIGH); |
38 | + status = read_aux_reg(AUX_RTC_CTRL); |
39 | + } while (!(status & _BITUL(31))); |
40 | |
41 | return stamp.full; |
42 | } |
43 | diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c |
44 | index 20afc65e22dc..9288851d43a0 100644 |
45 | --- a/arch/arc/mm/dma.c |
46 | +++ b/arch/arc/mm/dma.c |
47 | @@ -105,6 +105,31 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr, |
48 | __free_pages(page, get_order(size)); |
49 | } |
50 | |
51 | +static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
52 | + void *cpu_addr, dma_addr_t dma_addr, size_t size, |
53 | + unsigned long attrs) |
54 | +{ |
55 | + unsigned long user_count = vma_pages(vma); |
56 | + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
57 | + unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr)); |
58 | + unsigned long off = vma->vm_pgoff; |
59 | + int ret = -ENXIO; |
60 | + |
61 | + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
62 | + |
63 | + if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) |
64 | + return ret; |
65 | + |
66 | + if (off < count && user_count <= (count - off)) { |
67 | + ret = remap_pfn_range(vma, vma->vm_start, |
68 | + pfn + off, |
69 | + user_count << PAGE_SHIFT, |
70 | + vma->vm_page_prot); |
71 | + } |
72 | + |
73 | + return ret; |
74 | +} |
75 | + |
76 | /* |
77 | * streaming DMA Mapping API... |
78 | * CPU accesses page via normal paddr, thus needs to explicitly made |
79 | @@ -193,6 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask) |
80 | struct dma_map_ops arc_dma_ops = { |
81 | .alloc = arc_dma_alloc, |
82 | .free = arc_dma_free, |
83 | + .mmap = arc_dma_mmap, |
84 | .map_page = arc_dma_map_page, |
85 | .map_sg = arc_dma_map_sg, |
86 | .sync_single_for_device = arc_dma_sync_single_for_device, |
87 | diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c |
88 | index 28f03ca60100..794bebb43d23 100644 |
89 | --- a/arch/s390/hypfs/hypfs_diag.c |
90 | +++ b/arch/s390/hypfs/hypfs_diag.c |
91 | @@ -363,11 +363,11 @@ out: |
92 | static int diag224_get_name_table(void) |
93 | { |
94 | /* memory must be below 2GB */ |
95 | - diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA); |
96 | + diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); |
97 | if (!diag224_cpu_names) |
98 | return -ENOMEM; |
99 | if (diag224(diag224_cpu_names)) { |
100 | - kfree(diag224_cpu_names); |
101 | + free_page((unsigned long) diag224_cpu_names); |
102 | return -EOPNOTSUPP; |
103 | } |
104 | EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16); |
105 | @@ -376,7 +376,7 @@ static int diag224_get_name_table(void) |
106 | |
107 | static void diag224_delete_name_table(void) |
108 | { |
109 | - kfree(diag224_cpu_names); |
110 | + free_page((unsigned long) diag224_cpu_names); |
111 | } |
112 | |
113 | static int diag224_idx2name(int index, char *name) |
114 | diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h |
115 | index 03323175de30..602af692efdc 100644 |
116 | --- a/arch/s390/include/asm/processor.h |
117 | +++ b/arch/s390/include/asm/processor.h |
118 | @@ -192,7 +192,7 @@ struct task_struct; |
119 | struct mm_struct; |
120 | struct seq_file; |
121 | |
122 | -typedef int (*dump_trace_func_t)(void *data, unsigned long address); |
123 | +typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable); |
124 | void dump_trace(dump_trace_func_t func, void *data, |
125 | struct task_struct *task, unsigned long sp); |
126 | |
127 | diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c |
128 | index 6693383bc01b..518f615ad0a2 100644 |
129 | --- a/arch/s390/kernel/dumpstack.c |
130 | +++ b/arch/s390/kernel/dumpstack.c |
131 | @@ -38,10 +38,10 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp, |
132 | if (sp < low || sp > high - sizeof(*sf)) |
133 | return sp; |
134 | sf = (struct stack_frame *) sp; |
135 | + if (func(data, sf->gprs[8], 0)) |
136 | + return sp; |
137 | /* Follow the backchain. */ |
138 | while (1) { |
139 | - if (func(data, sf->gprs[8])) |
140 | - return sp; |
141 | low = sp; |
142 | sp = sf->back_chain; |
143 | if (!sp) |
144 | @@ -49,6 +49,8 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp, |
145 | if (sp <= low || sp > high - sizeof(*sf)) |
146 | return sp; |
147 | sf = (struct stack_frame *) sp; |
148 | + if (func(data, sf->gprs[8], 1)) |
149 | + return sp; |
150 | } |
151 | /* Zero backchain detected, check for interrupt frame. */ |
152 | sp = (unsigned long) (sf + 1); |
153 | @@ -56,7 +58,7 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp, |
154 | return sp; |
155 | regs = (struct pt_regs *) sp; |
156 | if (!user_mode(regs)) { |
157 | - if (func(data, regs->psw.addr)) |
158 | + if (func(data, regs->psw.addr, 1)) |
159 | return sp; |
160 | } |
161 | low = sp; |
162 | @@ -90,7 +92,7 @@ struct return_address_data { |
163 | int depth; |
164 | }; |
165 | |
166 | -static int __return_address(void *data, unsigned long address) |
167 | +static int __return_address(void *data, unsigned long address, int reliable) |
168 | { |
169 | struct return_address_data *rd = data; |
170 | |
171 | @@ -109,9 +111,12 @@ unsigned long return_address(int depth) |
172 | } |
173 | EXPORT_SYMBOL_GPL(return_address); |
174 | |
175 | -static int show_address(void *data, unsigned long address) |
176 | +static int show_address(void *data, unsigned long address, int reliable) |
177 | { |
178 | - printk("([<%016lx>] %pSR)\n", address, (void *)address); |
179 | + if (reliable) |
180 | + printk(" [<%016lx>] %pSR \n", address, (void *)address); |
181 | + else |
182 | + printk("([<%016lx>] %pSR)\n", address, (void *)address); |
183 | return 0; |
184 | } |
185 | |
186 | diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c |
187 | index 17431f63de00..955a7b6fa0a4 100644 |
188 | --- a/arch/s390/kernel/perf_event.c |
189 | +++ b/arch/s390/kernel/perf_event.c |
190 | @@ -222,7 +222,7 @@ static int __init service_level_perf_register(void) |
191 | } |
192 | arch_initcall(service_level_perf_register); |
193 | |
194 | -static int __perf_callchain_kernel(void *data, unsigned long address) |
195 | +static int __perf_callchain_kernel(void *data, unsigned long address, int reliable) |
196 | { |
197 | struct perf_callchain_entry_ctx *entry = data; |
198 | |
199 | diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c |
200 | index 44f84b23d4e5..355db9db8210 100644 |
201 | --- a/arch/s390/kernel/stacktrace.c |
202 | +++ b/arch/s390/kernel/stacktrace.c |
203 | @@ -27,12 +27,12 @@ static int __save_address(void *data, unsigned long address, int nosched) |
204 | return 1; |
205 | } |
206 | |
207 | -static int save_address(void *data, unsigned long address) |
208 | +static int save_address(void *data, unsigned long address, int reliable) |
209 | { |
210 | return __save_address(data, address, 0); |
211 | } |
212 | |
213 | -static int save_address_nosched(void *data, unsigned long address) |
214 | +static int save_address_nosched(void *data, unsigned long address, int reliable) |
215 | { |
216 | return __save_address(data, address, 1); |
217 | } |
218 | diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c |
219 | index 16f4c3960b87..9a4de4599c7b 100644 |
220 | --- a/arch/s390/oprofile/init.c |
221 | +++ b/arch/s390/oprofile/init.c |
222 | @@ -13,7 +13,7 @@ |
223 | #include <linux/init.h> |
224 | #include <asm/processor.h> |
225 | |
226 | -static int __s390_backtrace(void *data, unsigned long address) |
227 | +static int __s390_backtrace(void *data, unsigned long address, int reliable) |
228 | { |
229 | unsigned int *depth = data; |
230 | |
231 | diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile |
232 | index 77f28ce9c646..9976fcecd17e 100644 |
233 | --- a/arch/x86/entry/Makefile |
234 | +++ b/arch/x86/entry/Makefile |
235 | @@ -5,8 +5,8 @@ |
236 | OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y |
237 | OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y |
238 | |
239 | -CFLAGS_syscall_64.o += -Wno-override-init |
240 | -CFLAGS_syscall_32.o += -Wno-override-init |
241 | +CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,) |
242 | +CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,) |
243 | obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o |
244 | obj-y += common.o |
245 | |
246 | diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c |
247 | index fbd19444403f..d99ca57cbf60 100644 |
248 | --- a/arch/x86/kernel/acpi/boot.c |
249 | +++ b/arch/x86/kernel/acpi/boot.c |
250 | @@ -453,6 +453,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, |
251 | polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; |
252 | |
253 | mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); |
254 | + acpi_penalize_sci_irq(bus_irq, trigger, polarity); |
255 | |
256 | /* |
257 | * stash over-ride to indicate we've been here |
258 | diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c |
259 | index 60746ef904e4..caea575f25f8 100644 |
260 | --- a/drivers/acpi/apei/ghes.c |
261 | +++ b/drivers/acpi/apei/ghes.c |
262 | @@ -662,7 +662,7 @@ static int ghes_proc(struct ghes *ghes) |
263 | ghes_do_proc(ghes, ghes->estatus); |
264 | out: |
265 | ghes_clear_estatus(ghes); |
266 | - return 0; |
267 | + return rc; |
268 | } |
269 | |
270 | static void ghes_add_timer(struct ghes *ghes) |
271 | diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c |
272 | index c983bf733ad3..bc3d914dfc3e 100644 |
273 | --- a/drivers/acpi/pci_link.c |
274 | +++ b/drivers/acpi/pci_link.c |
275 | @@ -87,6 +87,7 @@ struct acpi_pci_link { |
276 | |
277 | static LIST_HEAD(acpi_link_list); |
278 | static DEFINE_MUTEX(acpi_link_lock); |
279 | +static int sci_irq = -1, sci_penalty; |
280 | |
281 | /* -------------------------------------------------------------------------- |
282 | PCI Link Device Management |
283 | @@ -496,25 +497,13 @@ static int acpi_irq_get_penalty(int irq) |
284 | { |
285 | int penalty = 0; |
286 | |
287 | - /* |
288 | - * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict |
289 | - * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be |
290 | - * use for PCI IRQs. |
291 | - */ |
292 | - if (irq == acpi_gbl_FADT.sci_interrupt) { |
293 | - u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK; |
294 | - |
295 | - if (type != IRQ_TYPE_LEVEL_LOW) |
296 | - penalty += PIRQ_PENALTY_ISA_ALWAYS; |
297 | - else |
298 | - penalty += PIRQ_PENALTY_PCI_USING; |
299 | - } |
300 | + if (irq == sci_irq) |
301 | + penalty += sci_penalty; |
302 | |
303 | if (irq < ACPI_MAX_ISA_IRQS) |
304 | return penalty + acpi_isa_irq_penalty[irq]; |
305 | |
306 | - penalty += acpi_irq_pci_sharing_penalty(irq); |
307 | - return penalty; |
308 | + return penalty + acpi_irq_pci_sharing_penalty(irq); |
309 | } |
310 | |
311 | int __init acpi_irq_penalty_init(void) |
312 | @@ -619,6 +608,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) |
313 | acpi_device_bid(link->device)); |
314 | return -ENODEV; |
315 | } else { |
316 | + if (link->irq.active < ACPI_MAX_ISA_IRQS) |
317 | + acpi_isa_irq_penalty[link->irq.active] += |
318 | + PIRQ_PENALTY_PCI_USING; |
319 | + |
320 | printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", |
321 | acpi_device_name(link->device), |
322 | acpi_device_bid(link->device), link->irq.active); |
323 | @@ -849,7 +842,7 @@ static int __init acpi_irq_penalty_update(char *str, int used) |
324 | continue; |
325 | |
326 | if (used) |
327 | - new_penalty = acpi_irq_get_penalty(irq) + |
328 | + new_penalty = acpi_isa_irq_penalty[irq] + |
329 | PIRQ_PENALTY_ISA_USED; |
330 | else |
331 | new_penalty = 0; |
332 | @@ -871,7 +864,7 @@ static int __init acpi_irq_penalty_update(char *str, int used) |
333 | void acpi_penalize_isa_irq(int irq, int active) |
334 | { |
335 | if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty))) |
336 | - acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) + |
337 | + acpi_isa_irq_penalty[irq] += |
338 | (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING); |
339 | } |
340 | |
341 | @@ -881,6 +874,17 @@ bool acpi_isa_irq_available(int irq) |
342 | acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS); |
343 | } |
344 | |
345 | +void acpi_penalize_sci_irq(int irq, int trigger, int polarity) |
346 | +{ |
347 | + sci_irq = irq; |
348 | + |
349 | + if (trigger == ACPI_MADT_TRIGGER_LEVEL && |
350 | + polarity == ACPI_MADT_POLARITY_ACTIVE_LOW) |
351 | + sci_penalty = PIRQ_PENALTY_PCI_USING; |
352 | + else |
353 | + sci_penalty = PIRQ_PENALTY_ISA_ALWAYS; |
354 | +} |
355 | + |
356 | /* |
357 | * Over-ride default table to reserve additional IRQs for use by ISA |
358 | * e.g. acpi_irq_isa=5 |
359 | diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c |
360 | index 100be556e613..83482721bc01 100644 |
361 | --- a/drivers/block/drbd/drbd_main.c |
362 | +++ b/drivers/block/drbd/drbd_main.c |
363 | @@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, |
364 | drbd_update_congested(connection); |
365 | } |
366 | do { |
367 | - rv = kernel_sendmsg(sock, &msg, &iov, 1, size); |
368 | + rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); |
369 | if (rv == -EAGAIN) { |
370 | if (we_should_drop_the_connection(connection, sock)) |
371 | break; |
372 | diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c |
373 | index 44311296ec02..0f7d28a98b9a 100644 |
374 | --- a/drivers/char/agp/intel-gtt.c |
375 | +++ b/drivers/char/agp/intel-gtt.c |
376 | @@ -845,6 +845,8 @@ void intel_gtt_insert_page(dma_addr_t addr, |
377 | unsigned int flags) |
378 | { |
379 | intel_private.driver->write_entry(addr, pg, flags); |
380 | + if (intel_private.driver->chipset_flush) |
381 | + intel_private.driver->chipset_flush(); |
382 | } |
383 | EXPORT_SYMBOL(intel_gtt_insert_page); |
384 | |
385 | diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c |
386 | index 9203f2d130c0..340f96e44642 100644 |
387 | --- a/drivers/char/hw_random/core.c |
388 | +++ b/drivers/char/hw_random/core.c |
389 | @@ -84,14 +84,14 @@ static size_t rng_buffer_size(void) |
390 | |
391 | static void add_early_randomness(struct hwrng *rng) |
392 | { |
393 | - unsigned char bytes[16]; |
394 | int bytes_read; |
395 | + size_t size = min_t(size_t, 16, rng_buffer_size()); |
396 | |
397 | mutex_lock(&reading_mutex); |
398 | - bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); |
399 | + bytes_read = rng_get_data(rng, rng_buffer, size, 1); |
400 | mutex_unlock(&reading_mutex); |
401 | if (bytes_read > 0) |
402 | - add_device_randomness(bytes, bytes_read); |
403 | + add_device_randomness(rng_buffer, bytes_read); |
404 | } |
405 | |
406 | static inline void cleanup_rng(struct kref *kref) |
407 | diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c |
408 | index 20b105584f82..80ae2a51452d 100644 |
409 | --- a/drivers/clk/clk-qoriq.c |
410 | +++ b/drivers/clk/clk-qoriq.c |
411 | @@ -700,6 +700,7 @@ static struct clk * __init create_mux_common(struct clockgen *cg, |
412 | struct mux_hwclock *hwc, |
413 | const struct clk_ops *ops, |
414 | unsigned long min_rate, |
415 | + unsigned long max_rate, |
416 | unsigned long pct80_rate, |
417 | const char *fmt, int idx) |
418 | { |
419 | @@ -728,6 +729,8 @@ static struct clk * __init create_mux_common(struct clockgen *cg, |
420 | continue; |
421 | if (rate < min_rate) |
422 | continue; |
423 | + if (rate > max_rate) |
424 | + continue; |
425 | |
426 | parent_names[j] = div->name; |
427 | hwc->parent_to_clksel[j] = i; |
428 | @@ -759,7 +762,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) |
429 | struct mux_hwclock *hwc; |
430 | const struct clockgen_pll_div *div; |
431 | unsigned long plat_rate, min_rate; |
432 | - u64 pct80_rate; |
433 | + u64 max_rate, pct80_rate; |
434 | u32 clksel; |
435 | |
436 | hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); |
437 | @@ -787,8 +790,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) |
438 | return NULL; |
439 | } |
440 | |
441 | - pct80_rate = clk_get_rate(div->clk); |
442 | - pct80_rate *= 8; |
443 | + max_rate = clk_get_rate(div->clk); |
444 | + pct80_rate = max_rate * 8; |
445 | do_div(pct80_rate, 10); |
446 | |
447 | plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); |
448 | @@ -798,7 +801,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) |
449 | else |
450 | min_rate = plat_rate / 2; |
451 | |
452 | - return create_mux_common(cg, hwc, &cmux_ops, min_rate, |
453 | + return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate, |
454 | pct80_rate, "cg-cmux%d", idx); |
455 | } |
456 | |
457 | @@ -813,7 +816,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx) |
458 | hwc->reg = cg->regs + 0x20 * idx + 0x10; |
459 | hwc->info = cg->info.hwaccel[idx]; |
460 | |
461 | - return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0, |
462 | + return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0, |
463 | "cg-hwaccel%d", idx); |
464 | } |
465 | |
466 | diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c |
467 | index bdf8b971f332..0fa91f37879b 100644 |
468 | --- a/drivers/clk/samsung/clk-exynos-audss.c |
469 | +++ b/drivers/clk/samsung/clk-exynos-audss.c |
470 | @@ -82,6 +82,7 @@ static const struct of_device_id exynos_audss_clk_of_match[] = { |
471 | .data = (void *)TYPE_EXYNOS5420, }, |
472 | {}, |
473 | }; |
474 | +MODULE_DEVICE_TABLE(of, exynos_audss_clk_of_match); |
475 | |
476 | static void exynos_audss_clk_teardown(void) |
477 | { |
478 | diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c |
479 | index c184eb84101e..4f87f3e76d83 100644 |
480 | --- a/drivers/clocksource/timer-sun5i.c |
481 | +++ b/drivers/clocksource/timer-sun5i.c |
482 | @@ -152,6 +152,13 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id) |
483 | return IRQ_HANDLED; |
484 | } |
485 | |
486 | +static cycle_t sun5i_clksrc_read(struct clocksource *clksrc) |
487 | +{ |
488 | + struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc); |
489 | + |
490 | + return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1)); |
491 | +} |
492 | + |
493 | static int sun5i_rate_cb_clksrc(struct notifier_block *nb, |
494 | unsigned long event, void *data) |
495 | { |
496 | @@ -210,8 +217,13 @@ static int __init sun5i_setup_clocksource(struct device_node *node, |
497 | writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, |
498 | base + TIMER_CTL_REG(1)); |
499 | |
500 | - ret = clocksource_mmio_init(base + TIMER_CNTVAL_LO_REG(1), node->name, |
501 | - rate, 340, 32, clocksource_mmio_readl_down); |
502 | + cs->clksrc.name = node->name; |
503 | + cs->clksrc.rating = 340; |
504 | + cs->clksrc.read = sun5i_clksrc_read; |
505 | + cs->clksrc.mask = CLOCKSOURCE_MASK(32); |
506 | + cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; |
507 | + |
508 | + ret = clocksource_register_hz(&cs->clksrc, rate); |
509 | if (ret) { |
510 | pr_err("Couldn't register clock source.\n"); |
511 | goto err_remove_notifier; |
512 | diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c |
513 | index cd5dc27320a2..1ed6132b993c 100644 |
514 | --- a/drivers/gpio/gpio-mvebu.c |
515 | +++ b/drivers/gpio/gpio-mvebu.c |
516 | @@ -293,10 +293,10 @@ static void mvebu_gpio_irq_ack(struct irq_data *d) |
517 | { |
518 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
519 | struct mvebu_gpio_chip *mvchip = gc->private; |
520 | - u32 mask = ~(1 << (d->irq - gc->irq_base)); |
521 | + u32 mask = d->mask; |
522 | |
523 | irq_gc_lock(gc); |
524 | - writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip)); |
525 | + writel_relaxed(~mask, mvebu_gpioreg_edge_cause(mvchip)); |
526 | irq_gc_unlock(gc); |
527 | } |
528 | |
529 | @@ -305,7 +305,7 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d) |
530 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
531 | struct mvebu_gpio_chip *mvchip = gc->private; |
532 | struct irq_chip_type *ct = irq_data_get_chip_type(d); |
533 | - u32 mask = 1 << (d->irq - gc->irq_base); |
534 | + u32 mask = d->mask; |
535 | |
536 | irq_gc_lock(gc); |
537 | ct->mask_cache_priv &= ~mask; |
538 | @@ -319,8 +319,7 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d) |
539 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
540 | struct mvebu_gpio_chip *mvchip = gc->private; |
541 | struct irq_chip_type *ct = irq_data_get_chip_type(d); |
542 | - |
543 | - u32 mask = 1 << (d->irq - gc->irq_base); |
544 | + u32 mask = d->mask; |
545 | |
546 | irq_gc_lock(gc); |
547 | ct->mask_cache_priv |= mask; |
548 | @@ -333,8 +332,7 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d) |
549 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
550 | struct mvebu_gpio_chip *mvchip = gc->private; |
551 | struct irq_chip_type *ct = irq_data_get_chip_type(d); |
552 | - |
553 | - u32 mask = 1 << (d->irq - gc->irq_base); |
554 | + u32 mask = d->mask; |
555 | |
556 | irq_gc_lock(gc); |
557 | ct->mask_cache_priv &= ~mask; |
558 | @@ -347,8 +345,7 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d) |
559 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
560 | struct mvebu_gpio_chip *mvchip = gc->private; |
561 | struct irq_chip_type *ct = irq_data_get_chip_type(d); |
562 | - |
563 | - u32 mask = 1 << (d->irq - gc->irq_base); |
564 | + u32 mask = d->mask; |
565 | |
566 | irq_gc_lock(gc); |
567 | ct->mask_cache_priv |= mask; |
568 | @@ -462,7 +459,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc) |
569 | for (i = 0; i < mvchip->chip.ngpio; i++) { |
570 | int irq; |
571 | |
572 | - irq = mvchip->irqbase + i; |
573 | + irq = irq_find_mapping(mvchip->domain, i); |
574 | |
575 | if (!(cause & (1 << i))) |
576 | continue; |
577 | @@ -655,6 +652,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev) |
578 | struct irq_chip_type *ct; |
579 | struct clk *clk; |
580 | unsigned int ngpios; |
581 | + bool have_irqs; |
582 | int soc_variant; |
583 | int i, cpu, id; |
584 | int err; |
585 | @@ -665,6 +663,9 @@ static int mvebu_gpio_probe(struct platform_device *pdev) |
586 | else |
587 | soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION; |
588 | |
589 | + /* Some gpio controllers do not provide irq support */ |
590 | + have_irqs = of_irq_count(np) != 0; |
591 | + |
592 | mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), |
593 | GFP_KERNEL); |
594 | if (!mvchip) |
595 | @@ -697,7 +698,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev) |
596 | mvchip->chip.get = mvebu_gpio_get; |
597 | mvchip->chip.direction_output = mvebu_gpio_direction_output; |
598 | mvchip->chip.set = mvebu_gpio_set; |
599 | - mvchip->chip.to_irq = mvebu_gpio_to_irq; |
600 | + if (have_irqs) |
601 | + mvchip->chip.to_irq = mvebu_gpio_to_irq; |
602 | mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK; |
603 | mvchip->chip.ngpio = ngpios; |
604 | mvchip->chip.can_sleep = false; |
605 | @@ -758,34 +760,30 @@ static int mvebu_gpio_probe(struct platform_device *pdev) |
606 | devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip); |
607 | |
608 | /* Some gpio controllers do not provide irq support */ |
609 | - if (!of_irq_count(np)) |
610 | + if (!have_irqs) |
611 | return 0; |
612 | |
613 | - /* Setup the interrupt handlers. Each chip can have up to 4 |
614 | - * interrupt handlers, with each handler dealing with 8 GPIO |
615 | - * pins. */ |
616 | - for (i = 0; i < 4; i++) { |
617 | - int irq = platform_get_irq(pdev, i); |
618 | - |
619 | - if (irq < 0) |
620 | - continue; |
621 | - irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler, |
622 | - mvchip); |
623 | - } |
624 | - |
625 | - mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1); |
626 | - if (mvchip->irqbase < 0) { |
627 | - dev_err(&pdev->dev, "no irqs\n"); |
628 | - return mvchip->irqbase; |
629 | + mvchip->domain = |
630 | + irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL); |
631 | + if (!mvchip->domain) { |
632 | + dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", |
633 | + mvchip->chip.label); |
634 | + return -ENODEV; |
635 | } |
636 | |
637 | - gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase, |
638 | - mvchip->membase, handle_level_irq); |
639 | - if (!gc) { |
640 | - dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n"); |
641 | - return -ENOMEM; |
642 | + err = irq_alloc_domain_generic_chips( |
643 | + mvchip->domain, ngpios, 2, np->name, handle_level_irq, |
644 | + IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0); |
645 | + if (err) { |
646 | + dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n", |
647 | + mvchip->chip.label); |
648 | + goto err_domain; |
649 | } |
650 | |
651 | + /* NOTE: The common accessors cannot be used because of the percpu |
652 | + * access to the mask registers |
653 | + */ |
654 | + gc = irq_get_domain_generic_chip(mvchip->domain, 0); |
655 | gc->private = mvchip; |
656 | ct = &gc->chip_types[0]; |
657 | ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; |
658 | @@ -803,27 +801,23 @@ static int mvebu_gpio_probe(struct platform_device *pdev) |
659 | ct->handler = handle_edge_irq; |
660 | ct->chip.name = mvchip->chip.label; |
661 | |
662 | - irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0, |
663 | - IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); |
664 | + /* Setup the interrupt handlers. Each chip can have up to 4 |
665 | + * interrupt handlers, with each handler dealing with 8 GPIO |
666 | + * pins. |
667 | + */ |
668 | + for (i = 0; i < 4; i++) { |
669 | + int irq = platform_get_irq(pdev, i); |
670 | |
671 | - /* Setup irq domain on top of the generic chip. */ |
672 | - mvchip->domain = irq_domain_add_simple(np, mvchip->chip.ngpio, |
673 | - mvchip->irqbase, |
674 | - &irq_domain_simple_ops, |
675 | - mvchip); |
676 | - if (!mvchip->domain) { |
677 | - dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", |
678 | - mvchip->chip.label); |
679 | - err = -ENODEV; |
680 | - goto err_generic_chip; |
681 | + if (irq < 0) |
682 | + continue; |
683 | + irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler, |
684 | + mvchip); |
685 | } |
686 | |
687 | return 0; |
688 | |
689 | -err_generic_chip: |
690 | - irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST, |
691 | - IRQ_LEVEL | IRQ_NOPROBE); |
692 | - kfree(gc); |
693 | +err_domain: |
694 | + irq_domain_remove(mvchip->domain); |
695 | |
696 | return err; |
697 | } |
698 | diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c |
699 | index a28feb3edf33..e3fc90130855 100644 |
700 | --- a/drivers/gpio/gpiolib-of.c |
701 | +++ b/drivers/gpio/gpiolib-of.c |
702 | @@ -26,14 +26,18 @@ |
703 | |
704 | #include "gpiolib.h" |
705 | |
706 | -static int of_gpiochip_match_node(struct gpio_chip *chip, void *data) |
707 | +static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data) |
708 | { |
709 | - return chip->gpiodev->dev.of_node == data; |
710 | + struct of_phandle_args *gpiospec = data; |
711 | + |
712 | + return chip->gpiodev->dev.of_node == gpiospec->np && |
713 | + chip->of_xlate(chip, gpiospec, NULL) >= 0; |
714 | } |
715 | |
716 | -static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np) |
717 | +static struct gpio_chip *of_find_gpiochip_by_xlate( |
718 | + struct of_phandle_args *gpiospec) |
719 | { |
720 | - return gpiochip_find(np, of_gpiochip_match_node); |
721 | + return gpiochip_find(gpiospec, of_gpiochip_match_node_and_xlate); |
722 | } |
723 | |
724 | static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip, |
725 | @@ -79,7 +83,7 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, |
726 | return ERR_PTR(ret); |
727 | } |
728 | |
729 | - chip = of_find_gpiochip_by_node(gpiospec.np); |
730 | + chip = of_find_gpiochip_by_xlate(&gpiospec); |
731 | if (!chip) { |
732 | desc = ERR_PTR(-EPROBE_DEFER); |
733 | goto out; |
734 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c |
735 | index 892d60fb225b..2057683f7b59 100644 |
736 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c |
737 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c |
738 | @@ -395,9 +395,12 @@ static int acp_hw_fini(void *handle) |
739 | { |
740 | int i, ret; |
741 | struct device *dev; |
742 | - |
743 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
744 | |
745 | + /* return early if no ACP */ |
746 | + if (!adev->acp.acp_genpd) |
747 | + return 0; |
748 | + |
749 | for (i = 0; i < ACP_DEVS ; i++) { |
750 | dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); |
751 | ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); |
752 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |
753 | index 9aa533cf4ad1..414a1600da54 100644 |
754 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |
755 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |
756 | @@ -605,6 +605,7 @@ static int __init amdgpu_init(void) |
757 | { |
758 | amdgpu_sync_init(); |
759 | amdgpu_fence_slab_init(); |
760 | + amd_sched_fence_slab_init(); |
761 | if (vgacon_text_force()) { |
762 | DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); |
763 | return -EINVAL; |
764 | @@ -624,6 +625,7 @@ static void __exit amdgpu_exit(void) |
765 | drm_pci_exit(driver, pdriver); |
766 | amdgpu_unregister_atpx_handler(); |
767 | amdgpu_sync_fini(); |
768 | + amd_sched_fence_slab_fini(); |
769 | amdgpu_fence_slab_fini(); |
770 | } |
771 | |
772 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c |
773 | index 0b109aebfec6..c82b95b838d0 100644 |
774 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c |
775 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c |
776 | @@ -68,6 +68,7 @@ int amdgpu_fence_slab_init(void) |
777 | |
778 | void amdgpu_fence_slab_fini(void) |
779 | { |
780 | + rcu_barrier(); |
781 | kmem_cache_destroy(amdgpu_fence_slab); |
782 | } |
783 | /* |
784 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |
785 | index e24a8af72d90..1ed64aedb2fe 100644 |
786 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |
787 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |
788 | @@ -99,6 +99,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) |
789 | |
790 | if ((amdgpu_runtime_pm != 0) && |
791 | amdgpu_has_atpx() && |
792 | + (amdgpu_is_atpx_hybrid() || |
793 | + amdgpu_has_atpx_dgpu_power_cntl()) && |
794 | ((flags & AMD_IS_APU) == 0)) |
795 | flags |= AMD_IS_PX; |
796 | |
797 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |
798 | index 80120fa4092c..e86ca392a08c 100644 |
799 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |
800 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |
801 | @@ -1654,5 +1654,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) |
802 | fence_put(adev->vm_manager.ids[i].first); |
803 | amdgpu_sync_free(&adev->vm_manager.ids[i].active); |
804 | fence_put(id->flushed_updates); |
805 | + fence_put(id->last_flush); |
806 | } |
807 | } |
808 | diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c |
809 | index 963a24d46a93..ffe1f85ce300 100644 |
810 | --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c |
811 | +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c |
812 | @@ -34,9 +34,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); |
813 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
814 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); |
815 | |
816 | -struct kmem_cache *sched_fence_slab; |
817 | -atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); |
818 | - |
819 | /* Initialize a given run queue struct */ |
820 | static void amd_sched_rq_init(struct amd_sched_rq *rq) |
821 | { |
822 | @@ -618,13 +615,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, |
823 | INIT_LIST_HEAD(&sched->ring_mirror_list); |
824 | spin_lock_init(&sched->job_list_lock); |
825 | atomic_set(&sched->hw_rq_count, 0); |
826 | - if (atomic_inc_return(&sched_fence_slab_ref) == 1) { |
827 | - sched_fence_slab = kmem_cache_create( |
828 | - "amd_sched_fence", sizeof(struct amd_sched_fence), 0, |
829 | - SLAB_HWCACHE_ALIGN, NULL); |
830 | - if (!sched_fence_slab) |
831 | - return -ENOMEM; |
832 | - } |
833 | |
834 | /* Each scheduler will run on a seperate kernel thread */ |
835 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); |
836 | @@ -645,6 +635,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched) |
837 | { |
838 | if (sched->thread) |
839 | kthread_stop(sched->thread); |
840 | - if (atomic_dec_and_test(&sched_fence_slab_ref)) |
841 | - kmem_cache_destroy(sched_fence_slab); |
842 | } |
843 | diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |
844 | index 7cbbbfb502ef..51068e6c3d9a 100644 |
845 | --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |
846 | +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |
847 | @@ -30,9 +30,6 @@ |
848 | struct amd_gpu_scheduler; |
849 | struct amd_sched_rq; |
850 | |
851 | -extern struct kmem_cache *sched_fence_slab; |
852 | -extern atomic_t sched_fence_slab_ref; |
853 | - |
854 | /** |
855 | * A scheduler entity is a wrapper around a job queue or a group |
856 | * of other entities. Entities take turns emitting jobs from their |
857 | @@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, |
858 | struct amd_sched_entity *entity); |
859 | void amd_sched_entity_push_job(struct amd_sched_job *sched_job); |
860 | |
861 | +int amd_sched_fence_slab_init(void); |
862 | +void amd_sched_fence_slab_fini(void); |
863 | + |
864 | struct amd_sched_fence *amd_sched_fence_create( |
865 | struct amd_sched_entity *s_entity, void *owner); |
866 | void amd_sched_fence_scheduled(struct amd_sched_fence *fence); |
867 | diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c |
868 | index 6b63beaf7574..93ad2e1f8f57 100644 |
869 | --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c |
870 | +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c |
871 | @@ -27,6 +27,25 @@ |
872 | #include <drm/drmP.h> |
873 | #include "gpu_scheduler.h" |
874 | |
875 | +static struct kmem_cache *sched_fence_slab; |
876 | + |
877 | +int amd_sched_fence_slab_init(void) |
878 | +{ |
879 | + sched_fence_slab = kmem_cache_create( |
880 | + "amd_sched_fence", sizeof(struct amd_sched_fence), 0, |
881 | + SLAB_HWCACHE_ALIGN, NULL); |
882 | + if (!sched_fence_slab) |
883 | + return -ENOMEM; |
884 | + |
885 | + return 0; |
886 | +} |
887 | + |
888 | +void amd_sched_fence_slab_fini(void) |
889 | +{ |
890 | + rcu_barrier(); |
891 | + kmem_cache_destroy(sched_fence_slab); |
892 | +} |
893 | + |
894 | struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, |
895 | void *owner) |
896 | { |
897 | diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c |
898 | index 5de36d8dcc68..d46fa2206722 100644 |
899 | --- a/drivers/gpu/drm/i915/i915_drv.c |
900 | +++ b/drivers/gpu/drm/i915/i915_drv.c |
901 | @@ -1490,8 +1490,6 @@ static int i915_drm_suspend(struct drm_device *dev) |
902 | |
903 | dev_priv->suspend_count++; |
904 | |
905 | - intel_display_set_init_power(dev_priv, false); |
906 | - |
907 | intel_csr_ucode_suspend(dev_priv); |
908 | |
909 | out: |
910 | @@ -1508,6 +1506,8 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) |
911 | |
912 | disable_rpm_wakeref_asserts(dev_priv); |
913 | |
914 | + intel_display_set_init_power(dev_priv, false); |
915 | + |
916 | fw_csr = !IS_BROXTON(dev_priv) && |
917 | suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; |
918 | /* |
919 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
920 | index 63462f279187..e26f88965c58 100644 |
921 | --- a/drivers/gpu/drm/i915/intel_display.c |
922 | +++ b/drivers/gpu/drm/i915/intel_display.c |
923 | @@ -9737,6 +9737,29 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state) |
924 | bxt_set_cdclk(to_i915(dev), req_cdclk); |
925 | } |
926 | |
927 | +static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state, |
928 | + int pixel_rate) |
929 | +{ |
930 | + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); |
931 | + |
932 | + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ |
933 | + if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) |
934 | + pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); |
935 | + |
936 | + /* BSpec says "Do not use DisplayPort with CDCLK less than |
937 | + * 432 MHz, audio enabled, port width x4, and link rate |
938 | + * HBR2 (5.4 GHz), or else there may be audio corruption or |
939 | + * screen corruption." |
940 | + */ |
941 | + if (intel_crtc_has_dp_encoder(crtc_state) && |
942 | + crtc_state->has_audio && |
943 | + crtc_state->port_clock >= 540000 && |
944 | + crtc_state->lane_count == 4) |
945 | + pixel_rate = max(432000, pixel_rate); |
946 | + |
947 | + return pixel_rate; |
948 | +} |
949 | + |
950 | /* compute the max rate for new configuration */ |
951 | static int ilk_max_pixel_rate(struct drm_atomic_state *state) |
952 | { |
953 | @@ -9762,9 +9785,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state) |
954 | |
955 | pixel_rate = ilk_pipe_pixel_rate(crtc_state); |
956 | |
957 | - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ |
958 | - if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) |
959 | - pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); |
960 | + if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv)) |
961 | + pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state, |
962 | + pixel_rate); |
963 | |
964 | intel_state->min_pixclk[i] = pixel_rate; |
965 | } |
966 | diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c |
967 | index c3aa9e670d15..1421270071d2 100644 |
968 | --- a/drivers/gpu/drm/i915/intel_hdmi.c |
969 | +++ b/drivers/gpu/drm/i915/intel_hdmi.c |
970 | @@ -1759,6 +1759,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c |
971 | intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; |
972 | } |
973 | |
974 | +static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, |
975 | + enum port port) |
976 | +{ |
977 | + const struct ddi_vbt_port_info *info = |
978 | + &dev_priv->vbt.ddi_port_info[port]; |
979 | + u8 ddc_pin; |
980 | + |
981 | + if (info->alternate_ddc_pin) { |
982 | + DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n", |
983 | + info->alternate_ddc_pin, port_name(port)); |
984 | + return info->alternate_ddc_pin; |
985 | + } |
986 | + |
987 | + switch (port) { |
988 | + case PORT_B: |
989 | + if (IS_BROXTON(dev_priv)) |
990 | + ddc_pin = GMBUS_PIN_1_BXT; |
991 | + else |
992 | + ddc_pin = GMBUS_PIN_DPB; |
993 | + break; |
994 | + case PORT_C: |
995 | + if (IS_BROXTON(dev_priv)) |
996 | + ddc_pin = GMBUS_PIN_2_BXT; |
997 | + else |
998 | + ddc_pin = GMBUS_PIN_DPC; |
999 | + break; |
1000 | + case PORT_D: |
1001 | + if (IS_CHERRYVIEW(dev_priv)) |
1002 | + ddc_pin = GMBUS_PIN_DPD_CHV; |
1003 | + else |
1004 | + ddc_pin = GMBUS_PIN_DPD; |
1005 | + break; |
1006 | + default: |
1007 | + MISSING_CASE(port); |
1008 | + ddc_pin = GMBUS_PIN_DPB; |
1009 | + break; |
1010 | + } |
1011 | + |
1012 | + DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n", |
1013 | + ddc_pin, port_name(port)); |
1014 | + |
1015 | + return ddc_pin; |
1016 | +} |
1017 | + |
1018 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
1019 | struct intel_connector *intel_connector) |
1020 | { |
1021 | @@ -1768,7 +1812,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
1022 | struct drm_device *dev = intel_encoder->base.dev; |
1023 | struct drm_i915_private *dev_priv = to_i915(dev); |
1024 | enum port port = intel_dig_port->port; |
1025 | - uint8_t alternate_ddc_pin; |
1026 | |
1027 | DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", |
1028 | port_name(port)); |
1029 | @@ -1786,12 +1829,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
1030 | connector->doublescan_allowed = 0; |
1031 | connector->stereo_allowed = 1; |
1032 | |
1033 | + intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port); |
1034 | + |
1035 | switch (port) { |
1036 | case PORT_B: |
1037 | - if (IS_BROXTON(dev_priv)) |
1038 | - intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT; |
1039 | - else |
1040 | - intel_hdmi->ddc_bus = GMBUS_PIN_DPB; |
1041 | /* |
1042 | * On BXT A0/A1, sw needs to activate DDIA HPD logic and |
1043 | * interrupts to check the external panel connection. |
1044 | @@ -1802,46 +1843,17 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
1045 | intel_encoder->hpd_pin = HPD_PORT_B; |
1046 | break; |
1047 | case PORT_C: |
1048 | - if (IS_BROXTON(dev_priv)) |
1049 | - intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT; |
1050 | - else |
1051 | - intel_hdmi->ddc_bus = GMBUS_PIN_DPC; |
1052 | intel_encoder->hpd_pin = HPD_PORT_C; |
1053 | break; |
1054 | case PORT_D: |
1055 | - if (WARN_ON(IS_BROXTON(dev_priv))) |
1056 | - intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED; |
1057 | - else if (IS_CHERRYVIEW(dev_priv)) |
1058 | - intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV; |
1059 | - else |
1060 | - intel_hdmi->ddc_bus = GMBUS_PIN_DPD; |
1061 | intel_encoder->hpd_pin = HPD_PORT_D; |
1062 | break; |
1063 | case PORT_E: |
1064 | - /* On SKL PORT E doesn't have seperate GMBUS pin |
1065 | - * We rely on VBT to set a proper alternate GMBUS pin. */ |
1066 | - alternate_ddc_pin = |
1067 | - dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin; |
1068 | - switch (alternate_ddc_pin) { |
1069 | - case DDC_PIN_B: |
1070 | - intel_hdmi->ddc_bus = GMBUS_PIN_DPB; |
1071 | - break; |
1072 | - case DDC_PIN_C: |
1073 | - intel_hdmi->ddc_bus = GMBUS_PIN_DPC; |
1074 | - break; |
1075 | - case DDC_PIN_D: |
1076 | - intel_hdmi->ddc_bus = GMBUS_PIN_DPD; |
1077 | - break; |
1078 | - default: |
1079 | - MISSING_CASE(alternate_ddc_pin); |
1080 | - } |
1081 | intel_encoder->hpd_pin = HPD_PORT_E; |
1082 | break; |
1083 | - case PORT_A: |
1084 | - intel_encoder->hpd_pin = HPD_PORT_A; |
1085 | - /* Internal port only for eDP. */ |
1086 | default: |
1087 | - BUG(); |
1088 | + MISSING_CASE(port); |
1089 | + return; |
1090 | } |
1091 | |
1092 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
1093 | diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c |
1094 | index 554ca7115f98..edd2d0396290 100644 |
1095 | --- a/drivers/gpu/drm/radeon/radeon_device.c |
1096 | +++ b/drivers/gpu/drm/radeon/radeon_device.c |
1097 | @@ -104,6 +104,14 @@ static const char radeon_family_name[][16] = { |
1098 | "LAST", |
1099 | }; |
1100 | |
1101 | +#if defined(CONFIG_VGA_SWITCHEROO) |
1102 | +bool radeon_has_atpx_dgpu_power_cntl(void); |
1103 | +bool radeon_is_atpx_hybrid(void); |
1104 | +#else |
1105 | +static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } |
1106 | +static inline bool radeon_is_atpx_hybrid(void) { return false; } |
1107 | +#endif |
1108 | + |
1109 | #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) |
1110 | #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) |
1111 | |
1112 | @@ -160,6 +168,11 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev) |
1113 | |
1114 | if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX) |
1115 | rdev->flags &= ~RADEON_IS_PX; |
1116 | + |
1117 | + /* disable PX is the system doesn't support dGPU power control or hybrid gfx */ |
1118 | + if (!radeon_is_atpx_hybrid() && |
1119 | + !radeon_has_atpx_dgpu_power_cntl()) |
1120 | + rdev->flags &= ~RADEON_IS_PX; |
1121 | } |
1122 | |
1123 | /** |
1124 | diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c |
1125 | index da3fb069ec5c..ce69048c88e9 100644 |
1126 | --- a/drivers/iio/accel/st_accel_core.c |
1127 | +++ b/drivers/iio/accel/st_accel_core.c |
1128 | @@ -743,8 +743,8 @@ static int st_accel_read_raw(struct iio_dev *indio_dev, |
1129 | |
1130 | return IIO_VAL_INT; |
1131 | case IIO_CHAN_INFO_SCALE: |
1132 | - *val = 0; |
1133 | - *val2 = adata->current_fullscale->gain; |
1134 | + *val = adata->current_fullscale->gain / 1000000; |
1135 | + *val2 = adata->current_fullscale->gain % 1000000; |
1136 | return IIO_VAL_INT_PLUS_MICRO; |
1137 | case IIO_CHAN_INFO_SAMP_FREQ: |
1138 | *val = adata->odr; |
1139 | @@ -763,9 +763,13 @@ static int st_accel_write_raw(struct iio_dev *indio_dev, |
1140 | int err; |
1141 | |
1142 | switch (mask) { |
1143 | - case IIO_CHAN_INFO_SCALE: |
1144 | - err = st_sensors_set_fullscale_by_gain(indio_dev, val2); |
1145 | + case IIO_CHAN_INFO_SCALE: { |
1146 | + int gain; |
1147 | + |
1148 | + gain = val * 1000000 + val2; |
1149 | + err = st_sensors_set_fullscale_by_gain(indio_dev, gain); |
1150 | break; |
1151 | + } |
1152 | case IIO_CHAN_INFO_SAMP_FREQ: |
1153 | if (val2) |
1154 | return -EINVAL; |
1155 | diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c |
1156 | index dc33c1dd5191..b5beea53d6f6 100644 |
1157 | --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c |
1158 | +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c |
1159 | @@ -30,26 +30,26 @@ static struct { |
1160 | u32 usage_id; |
1161 | int unit; /* 0 for default others from HID sensor spec */ |
1162 | int scale_val0; /* scale, whole number */ |
1163 | - int scale_val1; /* scale, fraction in micros */ |
1164 | + int scale_val1; /* scale, fraction in nanos */ |
1165 | } unit_conversion[] = { |
1166 | - {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650}, |
1167 | + {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650000}, |
1168 | {HID_USAGE_SENSOR_ACCEL_3D, |
1169 | HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0}, |
1170 | {HID_USAGE_SENSOR_ACCEL_3D, |
1171 | - HID_USAGE_SENSOR_UNITS_G, 9, 806650}, |
1172 | + HID_USAGE_SENSOR_UNITS_G, 9, 806650000}, |
1173 | |
1174 | - {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453}, |
1175 | + {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453293}, |
1176 | {HID_USAGE_SENSOR_GYRO_3D, |
1177 | HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0}, |
1178 | {HID_USAGE_SENSOR_GYRO_3D, |
1179 | - HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453}, |
1180 | + HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453293}, |
1181 | |
1182 | - {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000}, |
1183 | + {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000000}, |
1184 | {HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0}, |
1185 | |
1186 | - {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453}, |
1187 | + {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453293}, |
1188 | {HID_USAGE_SENSOR_INCLINOMETER_3D, |
1189 | - HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453}, |
1190 | + HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293}, |
1191 | {HID_USAGE_SENSOR_INCLINOMETER_3D, |
1192 | HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0}, |
1193 | |
1194 | @@ -57,7 +57,7 @@ static struct { |
1195 | {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0}, |
1196 | |
1197 | {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0}, |
1198 | - {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000}, |
1199 | + {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000000}, |
1200 | }; |
1201 | |
1202 | static int pow_10(unsigned power) |
1203 | @@ -266,15 +266,15 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value); |
1204 | /* |
1205 | * This fuction applies the unit exponent to the scale. |
1206 | * For example: |
1207 | - * 9.806650 ->exp:2-> val0[980]val1[665000] |
1208 | - * 9.000806 ->exp:2-> val0[900]val1[80600] |
1209 | - * 0.174535 ->exp:2-> val0[17]val1[453500] |
1210 | - * 1.001745 ->exp:0-> val0[1]val1[1745] |
1211 | - * 1.001745 ->exp:2-> val0[100]val1[174500] |
1212 | - * 1.001745 ->exp:4-> val0[10017]val1[450000] |
1213 | - * 9.806650 ->exp:-2-> val0[0]val1[98066] |
1214 | + * 9.806650000 ->exp:2-> val0[980]val1[665000000] |
1215 | + * 9.000806000 ->exp:2-> val0[900]val1[80600000] |
1216 | + * 0.174535293 ->exp:2-> val0[17]val1[453529300] |
1217 | + * 1.001745329 ->exp:0-> val0[1]val1[1745329] |
1218 | + * 1.001745329 ->exp:2-> val0[100]val1[174532900] |
1219 | + * 1.001745329 ->exp:4-> val0[10017]val1[453290000] |
1220 | + * 9.806650000 ->exp:-2-> val0[0]val1[98066500] |
1221 | */ |
1222 | -static void adjust_exponent_micro(int *val0, int *val1, int scale0, |
1223 | +static void adjust_exponent_nano(int *val0, int *val1, int scale0, |
1224 | int scale1, int exp) |
1225 | { |
1226 | int i; |
1227 | @@ -285,32 +285,32 @@ static void adjust_exponent_micro(int *val0, int *val1, int scale0, |
1228 | if (exp > 0) { |
1229 | *val0 = scale0 * pow_10(exp); |
1230 | res = 0; |
1231 | - if (exp > 6) { |
1232 | + if (exp > 9) { |
1233 | *val1 = 0; |
1234 | return; |
1235 | } |
1236 | for (i = 0; i < exp; ++i) { |
1237 | - x = scale1 / pow_10(5 - i); |
1238 | + x = scale1 / pow_10(8 - i); |
1239 | res += (pow_10(exp - 1 - i) * x); |
1240 | - scale1 = scale1 % pow_10(5 - i); |
1241 | + scale1 = scale1 % pow_10(8 - i); |
1242 | } |
1243 | *val0 += res; |
1244 | *val1 = scale1 * pow_10(exp); |
1245 | } else if (exp < 0) { |
1246 | exp = abs(exp); |
1247 | - if (exp > 6) { |
1248 | + if (exp > 9) { |
1249 | *val0 = *val1 = 0; |
1250 | return; |
1251 | } |
1252 | *val0 = scale0 / pow_10(exp); |
1253 | rem = scale0 % pow_10(exp); |
1254 | res = 0; |
1255 | - for (i = 0; i < (6 - exp); ++i) { |
1256 | - x = scale1 / pow_10(5 - i); |
1257 | - res += (pow_10(5 - exp - i) * x); |
1258 | - scale1 = scale1 % pow_10(5 - i); |
1259 | + for (i = 0; i < (9 - exp); ++i) { |
1260 | + x = scale1 / pow_10(8 - i); |
1261 | + res += (pow_10(8 - exp - i) * x); |
1262 | + scale1 = scale1 % pow_10(8 - i); |
1263 | } |
1264 | - *val1 = rem * pow_10(6 - exp) + res; |
1265 | + *val1 = rem * pow_10(9 - exp) + res; |
1266 | } else { |
1267 | *val0 = scale0; |
1268 | *val1 = scale1; |
1269 | @@ -332,14 +332,14 @@ int hid_sensor_format_scale(u32 usage_id, |
1270 | unit_conversion[i].unit == attr_info->units) { |
1271 | exp = hid_sensor_convert_exponent( |
1272 | attr_info->unit_expo); |
1273 | - adjust_exponent_micro(val0, val1, |
1274 | + adjust_exponent_nano(val0, val1, |
1275 | unit_conversion[i].scale_val0, |
1276 | unit_conversion[i].scale_val1, exp); |
1277 | break; |
1278 | } |
1279 | } |
1280 | |
1281 | - return IIO_VAL_INT_PLUS_MICRO; |
1282 | + return IIO_VAL_INT_PLUS_NANO; |
1283 | } |
1284 | EXPORT_SYMBOL(hid_sensor_format_scale); |
1285 | |
1286 | diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c |
1287 | index 2d5282e05482..32a594675a54 100644 |
1288 | --- a/drivers/iio/common/st_sensors/st_sensors_core.c |
1289 | +++ b/drivers/iio/common/st_sensors/st_sensors_core.c |
1290 | @@ -619,7 +619,7 @@ EXPORT_SYMBOL(st_sensors_sysfs_sampling_frequency_avail); |
1291 | ssize_t st_sensors_sysfs_scale_avail(struct device *dev, |
1292 | struct device_attribute *attr, char *buf) |
1293 | { |
1294 | - int i, len = 0; |
1295 | + int i, len = 0, q, r; |
1296 | struct iio_dev *indio_dev = dev_get_drvdata(dev); |
1297 | struct st_sensor_data *sdata = iio_priv(indio_dev); |
1298 | |
1299 | @@ -628,8 +628,10 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev, |
1300 | if (sdata->sensor_settings->fs.fs_avl[i].num == 0) |
1301 | break; |
1302 | |
1303 | - len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ", |
1304 | - sdata->sensor_settings->fs.fs_avl[i].gain); |
1305 | + q = sdata->sensor_settings->fs.fs_avl[i].gain / 1000000; |
1306 | + r = sdata->sensor_settings->fs.fs_avl[i].gain % 1000000; |
1307 | + |
1308 | + len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r); |
1309 | } |
1310 | mutex_unlock(&indio_dev->mlock); |
1311 | buf[len - 1] = '\n'; |
1312 | diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c |
1313 | index b98b9d94d184..a97e802ca523 100644 |
1314 | --- a/drivers/iio/orientation/hid-sensor-rotation.c |
1315 | +++ b/drivers/iio/orientation/hid-sensor-rotation.c |
1316 | @@ -335,6 +335,7 @@ static struct platform_driver hid_dev_rot_platform_driver = { |
1317 | .id_table = hid_dev_rot_ids, |
1318 | .driver = { |
1319 | .name = KBUILD_MODNAME, |
1320 | + .pm = &hid_sensor_pm_ops, |
1321 | }, |
1322 | .probe = hid_dev_rot_probe, |
1323 | .remove = hid_dev_rot_remove, |
1324 | diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c |
1325 | index 6f2e0e4f0296..1ebc2c1debae 100644 |
1326 | --- a/drivers/input/rmi4/rmi_i2c.c |
1327 | +++ b/drivers/input/rmi4/rmi_i2c.c |
1328 | @@ -221,6 +221,21 @@ static const struct of_device_id rmi_i2c_of_match[] = { |
1329 | MODULE_DEVICE_TABLE(of, rmi_i2c_of_match); |
1330 | #endif |
1331 | |
1332 | +static void rmi_i2c_regulator_bulk_disable(void *data) |
1333 | +{ |
1334 | + struct rmi_i2c_xport *rmi_i2c = data; |
1335 | + |
1336 | + regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies), |
1337 | + rmi_i2c->supplies); |
1338 | +} |
1339 | + |
1340 | +static void rmi_i2c_unregister_transport(void *data) |
1341 | +{ |
1342 | + struct rmi_i2c_xport *rmi_i2c = data; |
1343 | + |
1344 | + rmi_unregister_transport_device(&rmi_i2c->xport); |
1345 | +} |
1346 | + |
1347 | static int rmi_i2c_probe(struct i2c_client *client, |
1348 | const struct i2c_device_id *id) |
1349 | { |
1350 | @@ -264,6 +279,12 @@ static int rmi_i2c_probe(struct i2c_client *client, |
1351 | if (retval < 0) |
1352 | return retval; |
1353 | |
1354 | + retval = devm_add_action_or_reset(&client->dev, |
1355 | + rmi_i2c_regulator_bulk_disable, |
1356 | + rmi_i2c); |
1357 | + if (retval) |
1358 | + return retval; |
1359 | + |
1360 | of_property_read_u32(client->dev.of_node, "syna,startup-delay-ms", |
1361 | &rmi_i2c->startup_delay); |
1362 | |
1363 | @@ -294,6 +315,11 @@ static int rmi_i2c_probe(struct i2c_client *client, |
1364 | client->addr); |
1365 | return retval; |
1366 | } |
1367 | + retval = devm_add_action_or_reset(&client->dev, |
1368 | + rmi_i2c_unregister_transport, |
1369 | + rmi_i2c); |
1370 | + if (retval) |
1371 | + return retval; |
1372 | |
1373 | retval = rmi_i2c_init_irq(client); |
1374 | if (retval < 0) |
1375 | @@ -304,17 +330,6 @@ static int rmi_i2c_probe(struct i2c_client *client, |
1376 | return 0; |
1377 | } |
1378 | |
1379 | -static int rmi_i2c_remove(struct i2c_client *client) |
1380 | -{ |
1381 | - struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client); |
1382 | - |
1383 | - rmi_unregister_transport_device(&rmi_i2c->xport); |
1384 | - regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies), |
1385 | - rmi_i2c->supplies); |
1386 | - |
1387 | - return 0; |
1388 | -} |
1389 | - |
1390 | #ifdef CONFIG_PM_SLEEP |
1391 | static int rmi_i2c_suspend(struct device *dev) |
1392 | { |
1393 | @@ -431,7 +446,6 @@ static struct i2c_driver rmi_i2c_driver = { |
1394 | }, |
1395 | .id_table = rmi_id, |
1396 | .probe = rmi_i2c_probe, |
1397 | - .remove = rmi_i2c_remove, |
1398 | }; |
1399 | |
1400 | module_i2c_driver(rmi_i2c_driver); |
1401 | diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c |
1402 | index 55bd1b34970c..4ebef607e214 100644 |
1403 | --- a/drivers/input/rmi4/rmi_spi.c |
1404 | +++ b/drivers/input/rmi4/rmi_spi.c |
1405 | @@ -396,6 +396,13 @@ static inline int rmi_spi_of_probe(struct spi_device *spi, |
1406 | } |
1407 | #endif |
1408 | |
1409 | +static void rmi_spi_unregister_transport(void *data) |
1410 | +{ |
1411 | + struct rmi_spi_xport *rmi_spi = data; |
1412 | + |
1413 | + rmi_unregister_transport_device(&rmi_spi->xport); |
1414 | +} |
1415 | + |
1416 | static int rmi_spi_probe(struct spi_device *spi) |
1417 | { |
1418 | struct rmi_spi_xport *rmi_spi; |
1419 | @@ -464,6 +471,11 @@ static int rmi_spi_probe(struct spi_device *spi) |
1420 | dev_err(&spi->dev, "failed to register transport.\n"); |
1421 | return retval; |
1422 | } |
1423 | + retval = devm_add_action_or_reset(&spi->dev, |
1424 | + rmi_spi_unregister_transport, |
1425 | + rmi_spi); |
1426 | + if (retval) |
1427 | + return retval; |
1428 | |
1429 | retval = rmi_spi_init_irq(spi); |
1430 | if (retval < 0) |
1431 | @@ -473,15 +485,6 @@ static int rmi_spi_probe(struct spi_device *spi) |
1432 | return 0; |
1433 | } |
1434 | |
1435 | -static int rmi_spi_remove(struct spi_device *spi) |
1436 | -{ |
1437 | - struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi); |
1438 | - |
1439 | - rmi_unregister_transport_device(&rmi_spi->xport); |
1440 | - |
1441 | - return 0; |
1442 | -} |
1443 | - |
1444 | #ifdef CONFIG_PM_SLEEP |
1445 | static int rmi_spi_suspend(struct device *dev) |
1446 | { |
1447 | @@ -577,7 +580,6 @@ static struct spi_driver rmi_spi_driver = { |
1448 | }, |
1449 | .id_table = rmi_id, |
1450 | .probe = rmi_spi_probe, |
1451 | - .remove = rmi_spi_remove, |
1452 | }; |
1453 | |
1454 | module_spi_driver(rmi_spi_driver); |
1455 | diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c |
1456 | index 96de97a46079..822fc4afad3c 100644 |
1457 | --- a/drivers/iommu/amd_iommu.c |
1458 | +++ b/drivers/iommu/amd_iommu.c |
1459 | @@ -1654,6 +1654,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) |
1460 | |
1461 | free_pagetable(&dom->domain); |
1462 | |
1463 | + if (dom->domain.id) |
1464 | + domain_id_free(dom->domain.id); |
1465 | + |
1466 | kfree(dom); |
1467 | } |
1468 | |
1469 | diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
1470 | index ebb5bf3ddbd9..1257b0b80296 100644 |
1471 | --- a/drivers/iommu/intel-iommu.c |
1472 | +++ b/drivers/iommu/intel-iommu.c |
1473 | @@ -1711,6 +1711,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) |
1474 | if (!iommu->domains || !iommu->domain_ids) |
1475 | return; |
1476 | |
1477 | +again: |
1478 | spin_lock_irqsave(&device_domain_lock, flags); |
1479 | list_for_each_entry_safe(info, tmp, &device_domain_list, global) { |
1480 | struct dmar_domain *domain; |
1481 | @@ -1723,10 +1724,19 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) |
1482 | |
1483 | domain = info->domain; |
1484 | |
1485 | - dmar_remove_one_dev_info(domain, info->dev); |
1486 | + __dmar_remove_one_dev_info(info); |
1487 | |
1488 | - if (!domain_type_is_vm_or_si(domain)) |
1489 | + if (!domain_type_is_vm_or_si(domain)) { |
1490 | + /* |
1491 | + * The domain_exit() function can't be called under |
1492 | + * device_domain_lock, as it takes this lock itself. |
1493 | + * So release the lock here and re-run the loop |
1494 | + * afterwards. |
1495 | + */ |
1496 | + spin_unlock_irqrestore(&device_domain_lock, flags); |
1497 | domain_exit(domain); |
1498 | + goto again; |
1499 | + } |
1500 | } |
1501 | spin_unlock_irqrestore(&device_domain_lock, flags); |
1502 | |
1503 | diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c |
1504 | index def8ca1c982d..f50e51c1a9c8 100644 |
1505 | --- a/drivers/iommu/io-pgtable-arm-v7s.c |
1506 | +++ b/drivers/iommu/io-pgtable-arm-v7s.c |
1507 | @@ -633,6 +633,10 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, |
1508 | { |
1509 | struct arm_v7s_io_pgtable *data; |
1510 | |
1511 | +#ifdef PHYS_OFFSET |
1512 | + if (upper_32_bits(PHYS_OFFSET)) |
1513 | + return NULL; |
1514 | +#endif |
1515 | if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS) |
1516 | return NULL; |
1517 | |
1518 | diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c |
1519 | index bf890c3d9cda..f73e108dc980 100644 |
1520 | --- a/drivers/media/usb/dvb-usb/dib0700_core.c |
1521 | +++ b/drivers/media/usb/dvb-usb/dib0700_core.c |
1522 | @@ -677,7 +677,7 @@ static void dib0700_rc_urb_completion(struct urb *purb) |
1523 | struct dvb_usb_device *d = purb->context; |
1524 | struct dib0700_rc_response *poll_reply; |
1525 | enum rc_type protocol; |
1526 | - u32 uninitialized_var(keycode); |
1527 | + u32 keycode; |
1528 | u8 toggle; |
1529 | |
1530 | deb_info("%s()\n", __func__); |
1531 | @@ -719,7 +719,8 @@ static void dib0700_rc_urb_completion(struct urb *purb) |
1532 | poll_reply->nec.data == 0x00 && |
1533 | poll_reply->nec.not_data == 0xff) { |
1534 | poll_reply->data_state = 2; |
1535 | - break; |
1536 | + rc_repeat(d->rc_dev); |
1537 | + goto resubmit; |
1538 | } |
1539 | |
1540 | if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) { |
1541 | diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c |
1542 | index e9e6ea3ab73c..75b9d4ac8b1e 100644 |
1543 | --- a/drivers/misc/mei/bus-fixup.c |
1544 | +++ b/drivers/misc/mei/bus-fixup.c |
1545 | @@ -178,7 +178,7 @@ static int mei_nfc_if_version(struct mei_cl *cl, |
1546 | |
1547 | ret = 0; |
1548 | bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); |
1549 | - if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { |
1550 | + if (bytes_recv < if_version_length) { |
1551 | dev_err(bus->dev, "Could not read IF version\n"); |
1552 | ret = -EIO; |
1553 | goto err; |
1554 | diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c |
1555 | index c57eb32dc075..6ef1e3c731f8 100644 |
1556 | --- a/drivers/mmc/core/mmc.c |
1557 | +++ b/drivers/mmc/core/mmc.c |
1558 | @@ -26,6 +26,8 @@ |
1559 | #include "mmc_ops.h" |
1560 | #include "sd_ops.h" |
1561 | |
1562 | +#define DEFAULT_CMD6_TIMEOUT_MS 500 |
1563 | + |
1564 | static const unsigned int tran_exp[] = { |
1565 | 10000, 100000, 1000000, 10000000, |
1566 | 0, 0, 0, 0 |
1567 | @@ -571,6 +573,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) |
1568 | card->erased_byte = 0x0; |
1569 | |
1570 | /* eMMC v4.5 or later */ |
1571 | + card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS; |
1572 | if (card->ext_csd.rev >= 6) { |
1573 | card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; |
1574 | |
1575 | diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c |
1576 | index d839147e591d..44ecebd1ea8c 100644 |
1577 | --- a/drivers/mmc/host/mxs-mmc.c |
1578 | +++ b/drivers/mmc/host/mxs-mmc.c |
1579 | @@ -661,13 +661,13 @@ static int mxs_mmc_probe(struct platform_device *pdev) |
1580 | |
1581 | platform_set_drvdata(pdev, mmc); |
1582 | |
1583 | + spin_lock_init(&host->lock); |
1584 | + |
1585 | ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, |
1586 | dev_name(&pdev->dev), host); |
1587 | if (ret) |
1588 | goto out_free_dma; |
1589 | |
1590 | - spin_lock_init(&host->lock); |
1591 | - |
1592 | ret = mmc_add_host(mmc); |
1593 | if (ret) |
1594 | goto out_free_dma; |
1595 | diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c |
1596 | index 8ef44a2a2fd9..90ed2e12d345 100644 |
1597 | --- a/drivers/mmc/host/sdhci-msm.c |
1598 | +++ b/drivers/mmc/host/sdhci-msm.c |
1599 | @@ -647,6 +647,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) |
1600 | if (msm_host->pwr_irq < 0) { |
1601 | dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n", |
1602 | msm_host->pwr_irq); |
1603 | + ret = msm_host->pwr_irq; |
1604 | goto clk_disable; |
1605 | } |
1606 | |
1607 | diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c |
1608 | index a8a022a7358f..6eb8f0705c65 100644 |
1609 | --- a/drivers/mmc/host/sdhci.c |
1610 | +++ b/drivers/mmc/host/sdhci.c |
1611 | @@ -2269,10 +2269,8 @@ static bool sdhci_request_done(struct sdhci_host *host) |
1612 | |
1613 | for (i = 0; i < SDHCI_MAX_MRQS; i++) { |
1614 | mrq = host->mrqs_done[i]; |
1615 | - if (mrq) { |
1616 | - host->mrqs_done[i] = NULL; |
1617 | + if (mrq) |
1618 | break; |
1619 | - } |
1620 | } |
1621 | |
1622 | if (!mrq) { |
1623 | @@ -2303,6 +2301,17 @@ static bool sdhci_request_done(struct sdhci_host *host) |
1624 | * upon error conditions. |
1625 | */ |
1626 | if (sdhci_needs_reset(host, mrq)) { |
1627 | + /* |
1628 | + * Do not finish until command and data lines are available for |
1629 | + * reset. Note there can only be one other mrq, so it cannot |
1630 | + * also be in mrqs_done, otherwise host->cmd and host->data_cmd |
1631 | + * would both be null. |
1632 | + */ |
1633 | + if (host->cmd || host->data_cmd) { |
1634 | + spin_unlock_irqrestore(&host->lock, flags); |
1635 | + return true; |
1636 | + } |
1637 | + |
1638 | /* Some controllers need this kick or reset won't work here */ |
1639 | if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) |
1640 | /* This is to force an update */ |
1641 | @@ -2310,10 +2319,8 @@ static bool sdhci_request_done(struct sdhci_host *host) |
1642 | |
1643 | /* Spec says we should do both at the same time, but Ricoh |
1644 | controllers do not like that. */ |
1645 | - if (!host->cmd) |
1646 | - sdhci_do_reset(host, SDHCI_RESET_CMD); |
1647 | - if (!host->data_cmd) |
1648 | - sdhci_do_reset(host, SDHCI_RESET_DATA); |
1649 | + sdhci_do_reset(host, SDHCI_RESET_CMD); |
1650 | + sdhci_do_reset(host, SDHCI_RESET_DATA); |
1651 | |
1652 | host->pending_reset = false; |
1653 | } |
1654 | @@ -2321,6 +2328,8 @@ static bool sdhci_request_done(struct sdhci_host *host) |
1655 | if (!sdhci_has_requests(host)) |
1656 | sdhci_led_deactivate(host); |
1657 | |
1658 | + host->mrqs_done[i] = NULL; |
1659 | + |
1660 | mmiowb(); |
1661 | spin_unlock_irqrestore(&host->lock, flags); |
1662 | |
1663 | @@ -2500,9 +2509,6 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) |
1664 | if (!host->data) { |
1665 | struct mmc_command *data_cmd = host->data_cmd; |
1666 | |
1667 | - if (data_cmd) |
1668 | - host->data_cmd = NULL; |
1669 | - |
1670 | /* |
1671 | * The "data complete" interrupt is also used to |
1672 | * indicate that a busy state has ended. See comment |
1673 | @@ -2510,11 +2516,13 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) |
1674 | */ |
1675 | if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { |
1676 | if (intmask & SDHCI_INT_DATA_TIMEOUT) { |
1677 | + host->data_cmd = NULL; |
1678 | data_cmd->error = -ETIMEDOUT; |
1679 | sdhci_finish_mrq(host, data_cmd->mrq); |
1680 | return; |
1681 | } |
1682 | if (intmask & SDHCI_INT_DATA_END) { |
1683 | + host->data_cmd = NULL; |
1684 | /* |
1685 | * Some cards handle busy-end interrupt |
1686 | * before the command completed, so make |
1687 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c |
1688 | index c74d16409941..6b46a37ba139 100644 |
1689 | --- a/drivers/net/ethernet/intel/i40e/i40e_main.c |
1690 | +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c |
1691 | @@ -9001,7 +9001,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
1692 | return 0; |
1693 | |
1694 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, |
1695 | - nlflags, 0, 0, filter_mask, NULL); |
1696 | + 0, 0, nlflags, filter_mask, NULL); |
1697 | } |
1698 | |
1699 | /* Hardware supports L4 tunnel length of 128B (=2^7) which includes |
1700 | diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c |
1701 | index 83deda4bb4d6..6f9563a96488 100644 |
1702 | --- a/drivers/nfc/mei_phy.c |
1703 | +++ b/drivers/nfc/mei_phy.c |
1704 | @@ -133,7 +133,7 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy) |
1705 | return -ENOMEM; |
1706 | |
1707 | bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length); |
1708 | - if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { |
1709 | + if (bytes_recv < 0 || bytes_recv < if_version_length) { |
1710 | pr_err("Could not read IF version\n"); |
1711 | r = -EIO; |
1712 | goto err; |
1713 | diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c |
1714 | index 60f7eab11865..da134a0df7d8 100644 |
1715 | --- a/drivers/nvme/host/pci.c |
1716 | +++ b/drivers/nvme/host/pci.c |
1717 | @@ -1531,9 +1531,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) |
1718 | return 0; |
1719 | } |
1720 | |
1721 | -static void nvme_disable_io_queues(struct nvme_dev *dev) |
1722 | +static void nvme_disable_io_queues(struct nvme_dev *dev, int queues) |
1723 | { |
1724 | - int pass, queues = dev->online_queues - 1; |
1725 | + int pass; |
1726 | unsigned long timeout; |
1727 | u8 opcode = nvme_admin_delete_sq; |
1728 | |
1729 | @@ -1678,7 +1678,7 @@ static void nvme_pci_disable(struct nvme_dev *dev) |
1730 | |
1731 | static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) |
1732 | { |
1733 | - int i; |
1734 | + int i, queues; |
1735 | u32 csts = -1; |
1736 | |
1737 | del_timer_sync(&dev->watchdog_timer); |
1738 | @@ -1689,6 +1689,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) |
1739 | csts = readl(dev->bar + NVME_REG_CSTS); |
1740 | } |
1741 | |
1742 | + queues = dev->online_queues - 1; |
1743 | for (i = dev->queue_count - 1; i > 0; i--) |
1744 | nvme_suspend_queue(dev->queues[i]); |
1745 | |
1746 | @@ -1700,7 +1701,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) |
1747 | if (dev->queue_count) |
1748 | nvme_suspend_queue(dev->queues[0]); |
1749 | } else { |
1750 | - nvme_disable_io_queues(dev); |
1751 | + nvme_disable_io_queues(dev, queues); |
1752 | nvme_disable_admin_queue(dev, shutdown); |
1753 | } |
1754 | nvme_pci_disable(dev); |
1755 | diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c |
1756 | index 66c4d8f42233..9526e341988b 100644 |
1757 | --- a/drivers/pci/setup-res.c |
1758 | +++ b/drivers/pci/setup-res.c |
1759 | @@ -121,6 +121,14 @@ int pci_claim_resource(struct pci_dev *dev, int resource) |
1760 | return -EINVAL; |
1761 | } |
1762 | |
1763 | + /* |
1764 | + * If we have a shadow copy in RAM, the PCI device doesn't respond |
1765 | + * to the shadow range, so we don't need to claim it, and upstream |
1766 | + * bridges don't need to route the range to the device. |
1767 | + */ |
1768 | + if (res->flags & IORESOURCE_ROM_SHADOW) |
1769 | + return 0; |
1770 | + |
1771 | root = pci_find_parent_resource(dev, res); |
1772 | if (!root) { |
1773 | dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n", |
1774 | diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c |
1775 | index 7f7700716398..5d1e505c3c63 100644 |
1776 | --- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c |
1777 | +++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c |
1778 | @@ -844,6 +844,6 @@ static struct platform_driver iproc_gpio_driver = { |
1779 | |
1780 | static int __init iproc_gpio_init(void) |
1781 | { |
1782 | - return platform_driver_probe(&iproc_gpio_driver, iproc_gpio_probe); |
1783 | + return platform_driver_register(&iproc_gpio_driver); |
1784 | } |
1785 | arch_initcall_sync(iproc_gpio_init); |
1786 | diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c |
1787 | index 35783db1c10b..c8deb8be1da7 100644 |
1788 | --- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c |
1789 | +++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c |
1790 | @@ -741,6 +741,6 @@ static struct platform_driver nsp_gpio_driver = { |
1791 | |
1792 | static int __init nsp_gpio_init(void) |
1793 | { |
1794 | - return platform_driver_probe(&nsp_gpio_driver, nsp_gpio_probe); |
1795 | + return platform_driver_register(&nsp_gpio_driver); |
1796 | } |
1797 | arch_initcall_sync(nsp_gpio_init); |
1798 | diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c |
1799 | index 0fe8fad25e4d..bc3150428d89 100644 |
1800 | --- a/drivers/pinctrl/intel/pinctrl-cherryview.c |
1801 | +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c |
1802 | @@ -1634,12 +1634,15 @@ static int chv_pinctrl_remove(struct platform_device *pdev) |
1803 | } |
1804 | |
1805 | #ifdef CONFIG_PM_SLEEP |
1806 | -static int chv_pinctrl_suspend(struct device *dev) |
1807 | +static int chv_pinctrl_suspend_noirq(struct device *dev) |
1808 | { |
1809 | struct platform_device *pdev = to_platform_device(dev); |
1810 | struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); |
1811 | + unsigned long flags; |
1812 | int i; |
1813 | |
1814 | + raw_spin_lock_irqsave(&chv_lock, flags); |
1815 | + |
1816 | pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK); |
1817 | |
1818 | for (i = 0; i < pctrl->community->npins; i++) { |
1819 | @@ -1660,15 +1663,20 @@ static int chv_pinctrl_suspend(struct device *dev) |
1820 | ctx->padctrl1 = readl(reg); |
1821 | } |
1822 | |
1823 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
1824 | + |
1825 | return 0; |
1826 | } |
1827 | |
1828 | -static int chv_pinctrl_resume(struct device *dev) |
1829 | +static int chv_pinctrl_resume_noirq(struct device *dev) |
1830 | { |
1831 | struct platform_device *pdev = to_platform_device(dev); |
1832 | struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); |
1833 | + unsigned long flags; |
1834 | int i; |
1835 | |
1836 | + raw_spin_lock_irqsave(&chv_lock, flags); |
1837 | + |
1838 | /* |
1839 | * Mask all interrupts before restoring per-pin configuration |
1840 | * registers because we don't know in which state BIOS left them |
1841 | @@ -1713,12 +1721,15 @@ static int chv_pinctrl_resume(struct device *dev) |
1842 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); |
1843 | chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK); |
1844 | |
1845 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
1846 | + |
1847 | return 0; |
1848 | } |
1849 | #endif |
1850 | |
1851 | static const struct dev_pm_ops chv_pinctrl_pm_ops = { |
1852 | - SET_LATE_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend, chv_pinctrl_resume) |
1853 | + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend_noirq, |
1854 | + chv_pinctrl_resume_noirq) |
1855 | }; |
1856 | |
1857 | static const struct acpi_device_id chv_pinctrl_acpi_match[] = { |
1858 | diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c |
1859 | index feac4576b837..2df07ee8f3c3 100644 |
1860 | --- a/drivers/platform/x86/toshiba-wmi.c |
1861 | +++ b/drivers/platform/x86/toshiba-wmi.c |
1862 | @@ -24,14 +24,15 @@ |
1863 | #include <linux/acpi.h> |
1864 | #include <linux/input.h> |
1865 | #include <linux/input/sparse-keymap.h> |
1866 | +#include <linux/dmi.h> |
1867 | |
1868 | MODULE_AUTHOR("Azael Avalos"); |
1869 | MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver"); |
1870 | MODULE_LICENSE("GPL"); |
1871 | |
1872 | -#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" |
1873 | +#define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" |
1874 | |
1875 | -MODULE_ALIAS("wmi:"TOSHIBA_WMI_EVENT_GUID); |
1876 | +MODULE_ALIAS("wmi:"WMI_EVENT_GUID); |
1877 | |
1878 | static struct input_dev *toshiba_wmi_input_dev; |
1879 | |
1880 | @@ -63,6 +64,16 @@ static void toshiba_wmi_notify(u32 value, void *context) |
1881 | kfree(response.pointer); |
1882 | } |
1883 | |
1884 | +static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = { |
1885 | + { |
1886 | + .ident = "Toshiba laptop", |
1887 | + .matches = { |
1888 | + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
1889 | + }, |
1890 | + }, |
1891 | + {} |
1892 | +}; |
1893 | + |
1894 | static int __init toshiba_wmi_input_setup(void) |
1895 | { |
1896 | acpi_status status; |
1897 | @@ -81,7 +92,7 @@ static int __init toshiba_wmi_input_setup(void) |
1898 | if (err) |
1899 | goto err_free_dev; |
1900 | |
1901 | - status = wmi_install_notify_handler(TOSHIBA_WMI_EVENT_GUID, |
1902 | + status = wmi_install_notify_handler(WMI_EVENT_GUID, |
1903 | toshiba_wmi_notify, NULL); |
1904 | if (ACPI_FAILURE(status)) { |
1905 | err = -EIO; |
1906 | @@ -95,7 +106,7 @@ static int __init toshiba_wmi_input_setup(void) |
1907 | return 0; |
1908 | |
1909 | err_remove_notifier: |
1910 | - wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); |
1911 | + wmi_remove_notify_handler(WMI_EVENT_GUID); |
1912 | err_free_keymap: |
1913 | sparse_keymap_free(toshiba_wmi_input_dev); |
1914 | err_free_dev: |
1915 | @@ -105,7 +116,7 @@ static int __init toshiba_wmi_input_setup(void) |
1916 | |
1917 | static void toshiba_wmi_input_destroy(void) |
1918 | { |
1919 | - wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); |
1920 | + wmi_remove_notify_handler(WMI_EVENT_GUID); |
1921 | sparse_keymap_free(toshiba_wmi_input_dev); |
1922 | input_unregister_device(toshiba_wmi_input_dev); |
1923 | } |
1924 | @@ -114,7 +125,8 @@ static int __init toshiba_wmi_init(void) |
1925 | { |
1926 | int ret; |
1927 | |
1928 | - if (!wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) |
1929 | + if (!wmi_has_guid(WMI_EVENT_GUID) || |
1930 | + !dmi_check_system(toshiba_wmi_dmi_table)) |
1931 | return -ENODEV; |
1932 | |
1933 | ret = toshiba_wmi_input_setup(); |
1934 | @@ -130,7 +142,7 @@ static int __init toshiba_wmi_init(void) |
1935 | |
1936 | static void __exit toshiba_wmi_exit(void) |
1937 | { |
1938 | - if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) |
1939 | + if (wmi_has_guid(WMI_EVENT_GUID)) |
1940 | toshiba_wmi_input_destroy(); |
1941 | } |
1942 | |
1943 | diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c |
1944 | index b4478cc92b55..8895f77726e8 100644 |
1945 | --- a/drivers/rtc/rtc-pcf2123.c |
1946 | +++ b/drivers/rtc/rtc-pcf2123.c |
1947 | @@ -182,7 +182,8 @@ static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr, |
1948 | } |
1949 | |
1950 | static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr, |
1951 | - const char *buffer, size_t count) { |
1952 | + const char *buffer, size_t count) |
1953 | +{ |
1954 | struct pcf2123_sysfs_reg *r; |
1955 | unsigned long reg; |
1956 | unsigned long val; |
1957 | @@ -199,7 +200,7 @@ static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr, |
1958 | if (ret) |
1959 | return ret; |
1960 | |
1961 | - pcf2123_write_reg(dev, reg, val); |
1962 | + ret = pcf2123_write_reg(dev, reg, val); |
1963 | if (ret < 0) |
1964 | return -EIO; |
1965 | return count; |
1966 | diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c |
1967 | index 752b5c9d1ab2..920c42151e92 100644 |
1968 | --- a/drivers/scsi/device_handler/scsi_dh_alua.c |
1969 | +++ b/drivers/scsi/device_handler/scsi_dh_alua.c |
1970 | @@ -792,6 +792,7 @@ static void alua_rtpg_work(struct work_struct *work) |
1971 | WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); |
1972 | WARN_ON(pg->flags & ALUA_PG_RUN_STPG); |
1973 | spin_unlock_irqrestore(&pg->lock, flags); |
1974 | + kref_put(&pg->kref, release_port_group); |
1975 | return; |
1976 | } |
1977 | if (pg->flags & ALUA_SYNC_STPG) |
1978 | @@ -889,6 +890,7 @@ static void alua_rtpg_queue(struct alua_port_group *pg, |
1979 | /* Do not queue if the worker is already running */ |
1980 | if (!(pg->flags & ALUA_PG_RUNNING)) { |
1981 | kref_get(&pg->kref); |
1982 | + sdev = NULL; |
1983 | start_queue = 1; |
1984 | } |
1985 | } |
1986 | @@ -900,7 +902,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg, |
1987 | if (start_queue && |
1988 | !queue_delayed_work(alua_wq, &pg->rtpg_work, |
1989 | msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { |
1990 | - scsi_device_put(sdev); |
1991 | + if (sdev) |
1992 | + scsi_device_put(sdev); |
1993 | kref_put(&pg->kref, release_port_group); |
1994 | } |
1995 | } |
1996 | diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
1997 | index 4cb79902e7a8..46c0f5ecd99d 100644 |
1998 | --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
1999 | +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
2000 | @@ -1273,9 +1273,9 @@ scsih_target_alloc(struct scsi_target *starget) |
2001 | sas_target_priv_data->handle = raid_device->handle; |
2002 | sas_target_priv_data->sas_address = raid_device->wwid; |
2003 | sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; |
2004 | - sas_target_priv_data->raid_device = raid_device; |
2005 | if (ioc->is_warpdrive) |
2006 | - raid_device->starget = starget; |
2007 | + sas_target_priv_data->raid_device = raid_device; |
2008 | + raid_device->starget = starget; |
2009 | } |
2010 | spin_unlock_irqrestore(&ioc->raid_device_lock, flags); |
2011 | return 0; |
2012 | diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c |
2013 | index 2674f4c16bc3..e46e2c53871a 100644 |
2014 | --- a/drivers/scsi/qla2xxx/qla_os.c |
2015 | +++ b/drivers/scsi/qla2xxx/qla_os.c |
2016 | @@ -2341,6 +2341,8 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) |
2017 | { |
2018 | scsi_qla_host_t *vha = shost_priv(shost); |
2019 | |
2020 | + if (test_bit(UNLOADING, &vha->dpc_flags)) |
2021 | + return 1; |
2022 | if (!vha->host) |
2023 | return 1; |
2024 | if (time > vha->hw->loop_reset_delay * HZ) |
2025 | diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c |
2026 | index 7043eb0543f6..5ab49a798164 100644 |
2027 | --- a/drivers/staging/comedi/drivers/ni_tio.c |
2028 | +++ b/drivers/staging/comedi/drivers/ni_tio.c |
2029 | @@ -207,7 +207,8 @@ static int ni_tio_clock_period_ps(const struct ni_gpct *counter, |
2030 | * clock period is specified by user with prescaling |
2031 | * already taken into account. |
2032 | */ |
2033 | - return counter->clock_period_ps; |
2034 | + *period_ps = counter->clock_period_ps; |
2035 | + return 0; |
2036 | } |
2037 | |
2038 | switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) { |
2039 | diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c |
2040 | index 24c348d2f5bb..98d947338e01 100644 |
2041 | --- a/drivers/staging/iio/impedance-analyzer/ad5933.c |
2042 | +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c |
2043 | @@ -655,6 +655,7 @@ static void ad5933_work(struct work_struct *work) |
2044 | __be16 buf[2]; |
2045 | int val[2]; |
2046 | unsigned char status; |
2047 | + int ret; |
2048 | |
2049 | mutex_lock(&indio_dev->mlock); |
2050 | if (st->state == AD5933_CTRL_INIT_START_FREQ) { |
2051 | @@ -662,19 +663,22 @@ static void ad5933_work(struct work_struct *work) |
2052 | ad5933_cmd(st, AD5933_CTRL_START_SWEEP); |
2053 | st->state = AD5933_CTRL_START_SWEEP; |
2054 | schedule_delayed_work(&st->work, st->poll_time_jiffies); |
2055 | - mutex_unlock(&indio_dev->mlock); |
2056 | - return; |
2057 | + goto out; |
2058 | } |
2059 | |
2060 | - ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); |
2061 | + ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); |
2062 | + if (ret) |
2063 | + goto out; |
2064 | |
2065 | if (status & AD5933_STAT_DATA_VALID) { |
2066 | int scan_count = bitmap_weight(indio_dev->active_scan_mask, |
2067 | indio_dev->masklength); |
2068 | - ad5933_i2c_read(st->client, |
2069 | + ret = ad5933_i2c_read(st->client, |
2070 | test_bit(1, indio_dev->active_scan_mask) ? |
2071 | AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA, |
2072 | scan_count * 2, (u8 *)buf); |
2073 | + if (ret) |
2074 | + goto out; |
2075 | |
2076 | if (scan_count == 2) { |
2077 | val[0] = be16_to_cpu(buf[0]); |
2078 | @@ -686,8 +690,7 @@ static void ad5933_work(struct work_struct *work) |
2079 | } else { |
2080 | /* no data available - try again later */ |
2081 | schedule_delayed_work(&st->work, st->poll_time_jiffies); |
2082 | - mutex_unlock(&indio_dev->mlock); |
2083 | - return; |
2084 | + goto out; |
2085 | } |
2086 | |
2087 | if (status & AD5933_STAT_SWEEP_DONE) { |
2088 | @@ -700,7 +703,7 @@ static void ad5933_work(struct work_struct *work) |
2089 | ad5933_cmd(st, AD5933_CTRL_INC_FREQ); |
2090 | schedule_delayed_work(&st->work, st->poll_time_jiffies); |
2091 | } |
2092 | - |
2093 | +out: |
2094 | mutex_unlock(&indio_dev->mlock); |
2095 | } |
2096 | |
2097 | diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c |
2098 | index a324322ee0ad..499952c8ef39 100644 |
2099 | --- a/drivers/staging/nvec/nvec_ps2.c |
2100 | +++ b/drivers/staging/nvec/nvec_ps2.c |
2101 | @@ -106,13 +106,12 @@ static int nvec_mouse_probe(struct platform_device *pdev) |
2102 | { |
2103 | struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); |
2104 | struct serio *ser_dev; |
2105 | - char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 }; |
2106 | |
2107 | - ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL); |
2108 | + ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL); |
2109 | if (!ser_dev) |
2110 | return -ENOMEM; |
2111 | |
2112 | - ser_dev->id.type = SERIO_PS_PSTHRU; |
2113 | + ser_dev->id.type = SERIO_8042; |
2114 | ser_dev->write = ps2_sendcommand; |
2115 | ser_dev->start = ps2_startstreaming; |
2116 | ser_dev->stop = ps2_stopstreaming; |
2117 | @@ -127,9 +126,6 @@ static int nvec_mouse_probe(struct platform_device *pdev) |
2118 | |
2119 | serio_register_port(ser_dev); |
2120 | |
2121 | - /* mouse reset */ |
2122 | - nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset)); |
2123 | - |
2124 | return 0; |
2125 | } |
2126 | |
2127 | diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h |
2128 | index 955247979aaa..4ed6d8d7712a 100644 |
2129 | --- a/drivers/staging/sm750fb/ddk750_reg.h |
2130 | +++ b/drivers/staging/sm750fb/ddk750_reg.h |
2131 | @@ -601,13 +601,13 @@ |
2132 | |
2133 | #define PANEL_PLANE_TL 0x08001C |
2134 | #define PANEL_PLANE_TL_TOP_SHIFT 16 |
2135 | -#define PANEL_PLANE_TL_TOP_MASK (0xeff << 16) |
2136 | -#define PANEL_PLANE_TL_LEFT_MASK 0xeff |
2137 | +#define PANEL_PLANE_TL_TOP_MASK (0x7ff << 16) |
2138 | +#define PANEL_PLANE_TL_LEFT_MASK 0x7ff |
2139 | |
2140 | #define PANEL_PLANE_BR 0x080020 |
2141 | #define PANEL_PLANE_BR_BOTTOM_SHIFT 16 |
2142 | -#define PANEL_PLANE_BR_BOTTOM_MASK (0xeff << 16) |
2143 | -#define PANEL_PLANE_BR_RIGHT_MASK 0xeff |
2144 | +#define PANEL_PLANE_BR_BOTTOM_MASK (0x7ff << 16) |
2145 | +#define PANEL_PLANE_BR_RIGHT_MASK 0x7ff |
2146 | |
2147 | #define PANEL_HORIZONTAL_TOTAL 0x080024 |
2148 | #define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16 |
2149 | diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c |
2150 | index 8bbde52db376..21aeac59df48 100644 |
2151 | --- a/drivers/tty/serial/atmel_serial.c |
2152 | +++ b/drivers/tty/serial/atmel_serial.c |
2153 | @@ -2026,6 +2026,7 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state, |
2154 | static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, |
2155 | struct ktermios *old) |
2156 | { |
2157 | + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); |
2158 | unsigned long flags; |
2159 | unsigned int old_mode, mode, imr, quot, baud; |
2160 | |
2161 | @@ -2129,11 +2130,29 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, |
2162 | mode |= ATMEL_US_USMODE_RS485; |
2163 | } else if (termios->c_cflag & CRTSCTS) { |
2164 | /* RS232 with hardware handshake (RTS/CTS) */ |
2165 | - if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) { |
2166 | - dev_info(port->dev, "not enabling hardware flow control because DMA is used"); |
2167 | - termios->c_cflag &= ~CRTSCTS; |
2168 | - } else { |
2169 | + if (atmel_use_fifo(port) && |
2170 | + !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) { |
2171 | + /* |
2172 | + * with ATMEL_US_USMODE_HWHS set, the controller will |
2173 | + * be able to drive the RTS pin high/low when the RX |
2174 | + * FIFO is above RXFTHRES/below RXFTHRES2. |
2175 | + * It will also disable the transmitter when the CTS |
2176 | + * pin is high. |
2177 | + * This mode is not activated if CTS pin is a GPIO |
2178 | + * because in this case, the transmitter is always |
2179 | + * disabled (there must be an internal pull-up |
2180 | + * responsible for this behaviour). |
2181 | + * If the RTS pin is a GPIO, the controller won't be |
2182 | + * able to drive it according to the FIFO thresholds, |
2183 | + * but it will be handled by the driver. |
2184 | + */ |
2185 | mode |= ATMEL_US_USMODE_HWHS; |
2186 | + } else { |
2187 | + /* |
2188 | + * For platforms without FIFO, the flow control is |
2189 | + * handled by the driver. |
2190 | + */ |
2191 | + mode |= ATMEL_US_USMODE_NORMAL; |
2192 | } |
2193 | } else { |
2194 | /* RS232 without hadware handshake */ |
2195 | diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c |
2196 | index 0f3f62e81e5b..3ca9fdb0a271 100644 |
2197 | --- a/drivers/usb/class/cdc-acm.c |
2198 | +++ b/drivers/usb/class/cdc-acm.c |
2199 | @@ -946,8 +946,6 @@ static int wait_serial_change(struct acm *acm, unsigned long arg) |
2200 | DECLARE_WAITQUEUE(wait, current); |
2201 | struct async_icount old, new; |
2202 | |
2203 | - if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD)) |
2204 | - return -EINVAL; |
2205 | do { |
2206 | spin_lock_irq(&acm->read_lock); |
2207 | old = acm->oldcount; |
2208 | @@ -1175,6 +1173,8 @@ static int acm_probe(struct usb_interface *intf, |
2209 | if (quirks == IGNORE_DEVICE) |
2210 | return -ENODEV; |
2211 | |
2212 | + memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header)); |
2213 | + |
2214 | num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR; |
2215 | |
2216 | /* handle quirks deadly to normal probing*/ |
2217 | diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c |
2218 | index 35d092456bec..2d47010e55af 100644 |
2219 | --- a/drivers/usb/dwc3/core.c |
2220 | +++ b/drivers/usb/dwc3/core.c |
2221 | @@ -669,15 +669,14 @@ static int dwc3_core_init(struct dwc3 *dwc) |
2222 | return 0; |
2223 | |
2224 | err4: |
2225 | - phy_power_off(dwc->usb2_generic_phy); |
2226 | + phy_power_off(dwc->usb3_generic_phy); |
2227 | |
2228 | err3: |
2229 | - phy_power_off(dwc->usb3_generic_phy); |
2230 | + phy_power_off(dwc->usb2_generic_phy); |
2231 | |
2232 | err2: |
2233 | usb_phy_set_suspend(dwc->usb2_phy, 1); |
2234 | usb_phy_set_suspend(dwc->usb3_phy, 1); |
2235 | - dwc3_core_exit(dwc); |
2236 | |
2237 | err1: |
2238 | usb_phy_shutdown(dwc->usb2_phy); |
2239 | diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c |
2240 | index 9b9e71f2c66e..f590adaaba8e 100644 |
2241 | --- a/drivers/usb/gadget/function/u_ether.c |
2242 | +++ b/drivers/usb/gadget/function/u_ether.c |
2243 | @@ -585,14 +585,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, |
2244 | |
2245 | req->length = length; |
2246 | |
2247 | - /* throttle high/super speed IRQ rate back slightly */ |
2248 | - if (gadget_is_dualspeed(dev->gadget)) |
2249 | - req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH || |
2250 | - dev->gadget->speed == USB_SPEED_SUPER)) && |
2251 | - !list_empty(&dev->tx_reqs)) |
2252 | - ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) |
2253 | - : 0; |
2254 | - |
2255 | retval = usb_ep_queue(in, req, GFP_ATOMIC); |
2256 | switch (retval) { |
2257 | default: |
2258 | diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c |
2259 | index 6abb83cd7681..74265b2f806c 100644 |
2260 | --- a/drivers/watchdog/watchdog_core.c |
2261 | +++ b/drivers/watchdog/watchdog_core.c |
2262 | @@ -349,7 +349,7 @@ int devm_watchdog_register_device(struct device *dev, |
2263 | struct watchdog_device **rcwdd; |
2264 | int ret; |
2265 | |
2266 | - rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*wdd), |
2267 | + rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*rcwdd), |
2268 | GFP_KERNEL); |
2269 | if (!rcwdd) |
2270 | return -ENOMEM; |
2271 | diff --git a/fs/coredump.c b/fs/coredump.c |
2272 | index 281b768000e6..eb9c92c9b20f 100644 |
2273 | --- a/fs/coredump.c |
2274 | +++ b/fs/coredump.c |
2275 | @@ -1,6 +1,7 @@ |
2276 | #include <linux/slab.h> |
2277 | #include <linux/file.h> |
2278 | #include <linux/fdtable.h> |
2279 | +#include <linux/freezer.h> |
2280 | #include <linux/mm.h> |
2281 | #include <linux/stat.h> |
2282 | #include <linux/fcntl.h> |
2283 | @@ -423,7 +424,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state) |
2284 | if (core_waiters > 0) { |
2285 | struct core_thread *ptr; |
2286 | |
2287 | + freezer_do_not_count(); |
2288 | wait_for_completion(&core_state->startup); |
2289 | + freezer_count(); |
2290 | /* |
2291 | * Wait for all the threads to become inactive, so that |
2292 | * all the thread context (extended register state, like |
2293 | diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c |
2294 | index b62973045a3e..150c5a1879bf 100644 |
2295 | --- a/fs/nfs/nfs4session.c |
2296 | +++ b/fs/nfs/nfs4session.c |
2297 | @@ -178,12 +178,14 @@ static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid, |
2298 | __must_hold(&tbl->slot_tbl_lock) |
2299 | { |
2300 | struct nfs4_slot *slot; |
2301 | + int ret; |
2302 | |
2303 | slot = nfs4_lookup_slot(tbl, slotid); |
2304 | - if (IS_ERR(slot)) |
2305 | - return PTR_ERR(slot); |
2306 | - *seq_nr = slot->seq_nr; |
2307 | - return 0; |
2308 | + ret = PTR_ERR_OR_ZERO(slot); |
2309 | + if (!ret) |
2310 | + *seq_nr = slot->seq_nr; |
2311 | + |
2312 | + return ret; |
2313 | } |
2314 | |
2315 | /* |
2316 | diff --git a/include/linux/acpi.h b/include/linux/acpi.h |
2317 | index c5eaf2f80a4c..67d1d3ebb4b2 100644 |
2318 | --- a/include/linux/acpi.h |
2319 | +++ b/include/linux/acpi.h |
2320 | @@ -318,6 +318,7 @@ struct pci_dev; |
2321 | int acpi_pci_irq_enable (struct pci_dev *dev); |
2322 | void acpi_penalize_isa_irq(int irq, int active); |
2323 | bool acpi_isa_irq_available(int irq); |
2324 | +void acpi_penalize_sci_irq(int irq, int trigger, int polarity); |
2325 | void acpi_pci_irq_disable (struct pci_dev *dev); |
2326 | |
2327 | extern int ec_read(u8 addr, u8 *val); |
2328 | diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h |
2329 | index c46d2aa16d81..1d18af034554 100644 |
2330 | --- a/include/linux/frontswap.h |
2331 | +++ b/include/linux/frontswap.h |
2332 | @@ -106,8 +106,9 @@ static inline void frontswap_invalidate_area(unsigned type) |
2333 | |
2334 | static inline void frontswap_init(unsigned type, unsigned long *map) |
2335 | { |
2336 | - if (frontswap_enabled()) |
2337 | - __frontswap_init(type, map); |
2338 | +#ifdef CONFIG_FRONTSWAP |
2339 | + __frontswap_init(type, map); |
2340 | +#endif |
2341 | } |
2342 | |
2343 | #endif /* _LINUX_FRONTSWAP_H */ |
2344 | diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h |
2345 | index d6917b896d3a..3584bc8864c4 100644 |
2346 | --- a/include/linux/sunrpc/svc_rdma.h |
2347 | +++ b/include/linux/sunrpc/svc_rdma.h |
2348 | @@ -86,6 +86,7 @@ struct svc_rdma_op_ctxt { |
2349 | unsigned long flags; |
2350 | enum dma_data_direction direction; |
2351 | int count; |
2352 | + unsigned int mapped_sges; |
2353 | struct ib_sge sge[RPCSVC_MAXPAGES]; |
2354 | struct page *pages[RPCSVC_MAXPAGES]; |
2355 | }; |
2356 | @@ -193,6 +194,14 @@ struct svcxprt_rdma { |
2357 | |
2358 | #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD |
2359 | |
2360 | +/* Track DMA maps for this transport and context */ |
2361 | +static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma, |
2362 | + struct svc_rdma_op_ctxt *ctxt) |
2363 | +{ |
2364 | + ctxt->mapped_sges++; |
2365 | + atomic_inc(&rdma->sc_dma_used); |
2366 | +} |
2367 | + |
2368 | /* svc_rdma_backchannel.c */ |
2369 | extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, |
2370 | struct rpcrdma_msg *rmsgp, |
2371 | diff --git a/lib/genalloc.c b/lib/genalloc.c |
2372 | index 0a1139644d32..144fe6b1a03e 100644 |
2373 | --- a/lib/genalloc.c |
2374 | +++ b/lib/genalloc.c |
2375 | @@ -292,7 +292,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, |
2376 | struct gen_pool_chunk *chunk; |
2377 | unsigned long addr = 0; |
2378 | int order = pool->min_alloc_order; |
2379 | - int nbits, start_bit = 0, end_bit, remain; |
2380 | + int nbits, start_bit, end_bit, remain; |
2381 | |
2382 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
2383 | BUG_ON(in_nmi()); |
2384 | @@ -307,6 +307,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, |
2385 | if (size > atomic_read(&chunk->avail)) |
2386 | continue; |
2387 | |
2388 | + start_bit = 0; |
2389 | end_bit = chunk_size(chunk) >> order; |
2390 | retry: |
2391 | start_bit = algo(chunk->bits, end_bit, start_bit, |
2392 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
2393 | index 770d83eb3f48..0ddce6a1cdf7 100644 |
2394 | --- a/mm/hugetlb.c |
2395 | +++ b/mm/hugetlb.c |
2396 | @@ -1826,11 +1826,17 @@ static void return_unused_surplus_pages(struct hstate *h, |
2397 | * is not the case is if a reserve map was changed between calls. It |
2398 | * is the responsibility of the caller to notice the difference and |
2399 | * take appropriate action. |
2400 | + * |
2401 | + * vma_add_reservation is used in error paths where a reservation must |
2402 | + * be restored when a newly allocated huge page must be freed. It is |
2403 | + * to be called after calling vma_needs_reservation to determine if a |
2404 | + * reservation exists. |
2405 | */ |
2406 | enum vma_resv_mode { |
2407 | VMA_NEEDS_RESV, |
2408 | VMA_COMMIT_RESV, |
2409 | VMA_END_RESV, |
2410 | + VMA_ADD_RESV, |
2411 | }; |
2412 | static long __vma_reservation_common(struct hstate *h, |
2413 | struct vm_area_struct *vma, unsigned long addr, |
2414 | @@ -1856,6 +1862,14 @@ static long __vma_reservation_common(struct hstate *h, |
2415 | region_abort(resv, idx, idx + 1); |
2416 | ret = 0; |
2417 | break; |
2418 | + case VMA_ADD_RESV: |
2419 | + if (vma->vm_flags & VM_MAYSHARE) |
2420 | + ret = region_add(resv, idx, idx + 1); |
2421 | + else { |
2422 | + region_abort(resv, idx, idx + 1); |
2423 | + ret = region_del(resv, idx, idx + 1); |
2424 | + } |
2425 | + break; |
2426 | default: |
2427 | BUG(); |
2428 | } |
2429 | @@ -1903,6 +1917,56 @@ static void vma_end_reservation(struct hstate *h, |
2430 | (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); |
2431 | } |
2432 | |
2433 | +static long vma_add_reservation(struct hstate *h, |
2434 | + struct vm_area_struct *vma, unsigned long addr) |
2435 | +{ |
2436 | + return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); |
2437 | +} |
2438 | + |
2439 | +/* |
2440 | + * This routine is called to restore a reservation on error paths. In the |
2441 | + * specific error paths, a huge page was allocated (via alloc_huge_page) |
2442 | + * and is about to be freed. If a reservation for the page existed, |
2443 | + * alloc_huge_page would have consumed the reservation and set PagePrivate |
2444 | + * in the newly allocated page. When the page is freed via free_huge_page, |
2445 | + * the global reservation count will be incremented if PagePrivate is set. |
2446 | + * However, free_huge_page can not adjust the reserve map. Adjust the |
2447 | + * reserve map here to be consistent with global reserve count adjustments |
2448 | + * to be made by free_huge_page. |
2449 | + */ |
2450 | +static void restore_reserve_on_error(struct hstate *h, |
2451 | + struct vm_area_struct *vma, unsigned long address, |
2452 | + struct page *page) |
2453 | +{ |
2454 | + if (unlikely(PagePrivate(page))) { |
2455 | + long rc = vma_needs_reservation(h, vma, address); |
2456 | + |
2457 | + if (unlikely(rc < 0)) { |
2458 | + /* |
2459 | + * Rare out of memory condition in reserve map |
2460 | + * manipulation. Clear PagePrivate so that |
2461 | + * global reserve count will not be incremented |
2462 | + * by free_huge_page. This will make it appear |
2463 | + * as though the reservation for this page was |
2464 | + * consumed. This may prevent the task from |
2465 | + * faulting in the page at a later time. This |
2466 | + * is better than inconsistent global huge page |
2467 | + * accounting of reserve counts. |
2468 | + */ |
2469 | + ClearPagePrivate(page); |
2470 | + } else if (rc) { |
2471 | + rc = vma_add_reservation(h, vma, address); |
2472 | + if (unlikely(rc < 0)) |
2473 | + /* |
2474 | + * See above comment about rare out of |
2475 | + * memory condition. |
2476 | + */ |
2477 | + ClearPagePrivate(page); |
2478 | + } else |
2479 | + vma_end_reservation(h, vma, address); |
2480 | + } |
2481 | +} |
2482 | + |
2483 | struct page *alloc_huge_page(struct vm_area_struct *vma, |
2484 | unsigned long addr, int avoid_reserve) |
2485 | { |
2486 | @@ -3498,6 +3562,7 @@ retry_avoidcopy: |
2487 | spin_unlock(ptl); |
2488 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
2489 | out_release_all: |
2490 | + restore_reserve_on_error(h, vma, address, new_page); |
2491 | put_page(new_page); |
2492 | out_release_old: |
2493 | put_page(old_page); |
2494 | @@ -3680,6 +3745,7 @@ backout: |
2495 | spin_unlock(ptl); |
2496 | backout_unlocked: |
2497 | unlock_page(page); |
2498 | + restore_reserve_on_error(h, vma, address, page); |
2499 | put_page(page); |
2500 | goto out; |
2501 | } |
2502 | diff --git a/mm/memory-failure.c b/mm/memory-failure.c |
2503 | index de88f33519c0..19e796d36a62 100644 |
2504 | --- a/mm/memory-failure.c |
2505 | +++ b/mm/memory-failure.c |
2506 | @@ -1112,10 +1112,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) |
2507 | } |
2508 | |
2509 | if (!PageHuge(p) && PageTransHuge(hpage)) { |
2510 | - lock_page(hpage); |
2511 | - if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) { |
2512 | - unlock_page(hpage); |
2513 | - if (!PageAnon(hpage)) |
2514 | + lock_page(p); |
2515 | + if (!PageAnon(p) || unlikely(split_huge_page(p))) { |
2516 | + unlock_page(p); |
2517 | + if (!PageAnon(p)) |
2518 | pr_err("Memory failure: %#lx: non anonymous thp\n", |
2519 | pfn); |
2520 | else |
2521 | @@ -1126,9 +1126,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) |
2522 | put_hwpoison_page(p); |
2523 | return -EBUSY; |
2524 | } |
2525 | - unlock_page(hpage); |
2526 | - get_hwpoison_page(p); |
2527 | - put_hwpoison_page(hpage); |
2528 | + unlock_page(p); |
2529 | VM_BUG_ON_PAGE(!page_count(p), p); |
2530 | hpage = compound_head(p); |
2531 | } |
2532 | diff --git a/mm/shmem.c b/mm/shmem.c |
2533 | index 971fc83e6402..38aa5e0a955f 100644 |
2534 | --- a/mm/shmem.c |
2535 | +++ b/mm/shmem.c |
2536 | @@ -1483,6 +1483,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, |
2537 | copy_highpage(newpage, oldpage); |
2538 | flush_dcache_page(newpage); |
2539 | |
2540 | + __SetPageLocked(newpage); |
2541 | + __SetPageSwapBacked(newpage); |
2542 | SetPageUptodate(newpage); |
2543 | set_page_private(newpage, swap_index); |
2544 | SetPageSwapCache(newpage); |
2545 | diff --git a/mm/slab_common.c b/mm/slab_common.c |
2546 | index 71f0b28a1bec..329b03843863 100644 |
2547 | --- a/mm/slab_common.c |
2548 | +++ b/mm/slab_common.c |
2549 | @@ -533,8 +533,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, |
2550 | |
2551 | s = create_cache(cache_name, root_cache->object_size, |
2552 | root_cache->size, root_cache->align, |
2553 | - root_cache->flags, root_cache->ctor, |
2554 | - memcg, root_cache); |
2555 | + root_cache->flags & CACHE_CREATE_MASK, |
2556 | + root_cache->ctor, memcg, root_cache); |
2557 | /* |
2558 | * If we could not create a memcg cache, do not complain, because |
2559 | * that's not critical at all as we can always proceed with the root |
2560 | diff --git a/mm/swapfile.c b/mm/swapfile.c |
2561 | index 2657accc6e2b..bf262e494f68 100644 |
2562 | --- a/mm/swapfile.c |
2563 | +++ b/mm/swapfile.c |
2564 | @@ -2218,6 +2218,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p, |
2565 | swab32s(&swap_header->info.version); |
2566 | swab32s(&swap_header->info.last_page); |
2567 | swab32s(&swap_header->info.nr_badpages); |
2568 | + if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) |
2569 | + return 0; |
2570 | for (i = 0; i < swap_header->info.nr_badpages; i++) |
2571 | swab32s(&swap_header->info.badpages[i]); |
2572 | } |
2573 | diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c |
2574 | index 3940b5d24421..3e9667e467c3 100644 |
2575 | --- a/net/batman-adv/originator.c |
2576 | +++ b/net/batman-adv/originator.c |
2577 | @@ -537,7 +537,7 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface, |
2578 | if (bat_priv->algo_ops->neigh.hardif_init) |
2579 | bat_priv->algo_ops->neigh.hardif_init(hardif_neigh); |
2580 | |
2581 | - hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list); |
2582 | + hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list); |
2583 | |
2584 | out: |
2585 | spin_unlock_bh(&hard_iface->neigh_list_lock); |
2586 | diff --git a/net/ceph/ceph_fs.c b/net/ceph/ceph_fs.c |
2587 | index 7d54e944de5e..dcbe67ff3e2b 100644 |
2588 | --- a/net/ceph/ceph_fs.c |
2589 | +++ b/net/ceph/ceph_fs.c |
2590 | @@ -34,7 +34,8 @@ void ceph_file_layout_from_legacy(struct ceph_file_layout *fl, |
2591 | fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count); |
2592 | fl->object_size = le32_to_cpu(legacy->fl_object_size); |
2593 | fl->pool_id = le32_to_cpu(legacy->fl_pg_pool); |
2594 | - if (fl->pool_id == 0) |
2595 | + if (fl->pool_id == 0 && fl->stripe_unit == 0 && |
2596 | + fl->stripe_count == 0 && fl->object_size == 0) |
2597 | fl->pool_id = -1; |
2598 | } |
2599 | EXPORT_SYMBOL(ceph_file_layout_from_legacy); |
2600 | diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c |
2601 | index aa5847a16713..1df2c8dac7c5 100644 |
2602 | --- a/net/netfilter/nf_log.c |
2603 | +++ b/net/netfilter/nf_log.c |
2604 | @@ -420,7 +420,7 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, |
2605 | char buf[NFLOGGER_NAME_LEN]; |
2606 | int r = 0; |
2607 | int tindex = (unsigned long)table->extra1; |
2608 | - struct net *net = current->nsproxy->net_ns; |
2609 | + struct net *net = table->extra2; |
2610 | |
2611 | if (write) { |
2612 | struct ctl_table tmp = *table; |
2613 | @@ -474,7 +474,6 @@ static int netfilter_log_sysctl_init(struct net *net) |
2614 | 3, "%d", i); |
2615 | nf_log_sysctl_table[i].procname = |
2616 | nf_log_sysctl_fnames[i]; |
2617 | - nf_log_sysctl_table[i].data = NULL; |
2618 | nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN; |
2619 | nf_log_sysctl_table[i].mode = 0644; |
2620 | nf_log_sysctl_table[i].proc_handler = |
2621 | @@ -484,6 +483,9 @@ static int netfilter_log_sysctl_init(struct net *net) |
2622 | } |
2623 | } |
2624 | |
2625 | + for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) |
2626 | + table[i].extra2 = net; |
2627 | + |
2628 | net->nf.nf_log_dir_header = register_net_sysctl(net, |
2629 | "net/netfilter/nf_log", |
2630 | table); |
2631 | diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c |
2632 | index 892b5e1d9b09..2761377dcc17 100644 |
2633 | --- a/net/sunrpc/xprtrdma/frwr_ops.c |
2634 | +++ b/net/sunrpc/xprtrdma/frwr_ops.c |
2635 | @@ -44,18 +44,20 @@ |
2636 | * being done. |
2637 | * |
2638 | * When the underlying transport disconnects, MRs are left in one of |
2639 | - * three states: |
2640 | + * four states: |
2641 | * |
2642 | * INVALID: The MR was not in use before the QP entered ERROR state. |
2643 | - * (Or, the LOCAL_INV WR has not completed or flushed yet). |
2644 | - * |
2645 | - * STALE: The MR was being registered or unregistered when the QP |
2646 | - * entered ERROR state, and the pending WR was flushed. |
2647 | * |
2648 | * VALID: The MR was registered before the QP entered ERROR state. |
2649 | * |
2650 | - * When frwr_op_map encounters STALE and VALID MRs, they are recovered |
2651 | - * with ib_dereg_mr and then are re-initialized. Beause MR recovery |
2652 | + * FLUSHED_FR: The MR was being registered when the QP entered ERROR |
2653 | + * state, and the pending WR was flushed. |
2654 | + * |
2655 | + * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR |
2656 | + * state, and the pending WR was flushed. |
2657 | + * |
2658 | + * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered |
2659 | + * with ib_dereg_mr and then are re-initialized. Because MR recovery |
2660 | * allocates fresh resources, it is deferred to a workqueue, and the |
2661 | * recovered MRs are placed back on the rb_mws list when recovery is |
2662 | * complete. frwr_op_map allocates another MR for the current RPC while |
2663 | @@ -175,12 +177,15 @@ __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r) |
2664 | static void |
2665 | frwr_op_recover_mr(struct rpcrdma_mw *mw) |
2666 | { |
2667 | + enum rpcrdma_frmr_state state = mw->frmr.fr_state; |
2668 | struct rpcrdma_xprt *r_xprt = mw->mw_xprt; |
2669 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
2670 | int rc; |
2671 | |
2672 | rc = __frwr_reset_mr(ia, mw); |
2673 | - ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir); |
2674 | + if (state != FRMR_FLUSHED_LI) |
2675 | + ib_dma_unmap_sg(ia->ri_device, |
2676 | + mw->mw_sg, mw->mw_nents, mw->mw_dir); |
2677 | if (rc) |
2678 | goto out_release; |
2679 | |
2680 | @@ -261,10 +266,8 @@ frwr_op_maxpages(struct rpcrdma_xprt *r_xprt) |
2681 | } |
2682 | |
2683 | static void |
2684 | -__frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr, |
2685 | - const char *wr) |
2686 | +__frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr) |
2687 | { |
2688 | - frmr->fr_state = FRMR_IS_STALE; |
2689 | if (wc->status != IB_WC_WR_FLUSH_ERR) |
2690 | pr_err("rpcrdma: %s: %s (%u/0x%x)\n", |
2691 | wr, ib_wc_status_msg(wc->status), |
2692 | @@ -287,7 +290,8 @@ frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) |
2693 | if (wc->status != IB_WC_SUCCESS) { |
2694 | cqe = wc->wr_cqe; |
2695 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); |
2696 | - __frwr_sendcompletion_flush(wc, frmr, "fastreg"); |
2697 | + frmr->fr_state = FRMR_FLUSHED_FR; |
2698 | + __frwr_sendcompletion_flush(wc, "fastreg"); |
2699 | } |
2700 | } |
2701 | |
2702 | @@ -307,7 +311,8 @@ frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) |
2703 | if (wc->status != IB_WC_SUCCESS) { |
2704 | cqe = wc->wr_cqe; |
2705 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); |
2706 | - __frwr_sendcompletion_flush(wc, frmr, "localinv"); |
2707 | + frmr->fr_state = FRMR_FLUSHED_LI; |
2708 | + __frwr_sendcompletion_flush(wc, "localinv"); |
2709 | } |
2710 | } |
2711 | |
2712 | @@ -327,9 +332,11 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) |
2713 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
2714 | cqe = wc->wr_cqe; |
2715 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); |
2716 | - if (wc->status != IB_WC_SUCCESS) |
2717 | - __frwr_sendcompletion_flush(wc, frmr, "localinv"); |
2718 | - complete_all(&frmr->fr_linv_done); |
2719 | + if (wc->status != IB_WC_SUCCESS) { |
2720 | + frmr->fr_state = FRMR_FLUSHED_LI; |
2721 | + __frwr_sendcompletion_flush(wc, "localinv"); |
2722 | + } |
2723 | + complete(&frmr->fr_linv_done); |
2724 | } |
2725 | |
2726 | /* Post a REG_MR Work Request to register a memory region |
2727 | diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c |
2728 | index a2a7519b0f23..cd0c5581498c 100644 |
2729 | --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c |
2730 | +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c |
2731 | @@ -129,7 +129,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, |
2732 | ret = -EIO; |
2733 | goto out_unmap; |
2734 | } |
2735 | - atomic_inc(&rdma->sc_dma_used); |
2736 | + svc_rdma_count_mappings(rdma, ctxt); |
2737 | |
2738 | memset(&send_wr, 0, sizeof(send_wr)); |
2739 | ctxt->cqe.done = svc_rdma_wc_send; |
2740 | diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c |
2741 | index 2c25606f2561..ad1df979b3f0 100644 |
2742 | --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c |
2743 | +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c |
2744 | @@ -159,7 +159,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, |
2745 | ctxt->sge[pno].addr); |
2746 | if (ret) |
2747 | goto err; |
2748 | - atomic_inc(&xprt->sc_dma_used); |
2749 | + svc_rdma_count_mappings(xprt, ctxt); |
2750 | |
2751 | ctxt->sge[pno].lkey = xprt->sc_pd->local_dma_lkey; |
2752 | ctxt->sge[pno].length = len; |
2753 | diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c |
2754 | index 54d533300620..3b95b19fcf72 100644 |
2755 | --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c |
2756 | +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c |
2757 | @@ -280,7 +280,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, |
2758 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
2759 | sge[sge_no].addr)) |
2760 | goto err; |
2761 | - atomic_inc(&xprt->sc_dma_used); |
2762 | + svc_rdma_count_mappings(xprt, ctxt); |
2763 | sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey; |
2764 | ctxt->count++; |
2765 | sge_off = 0; |
2766 | @@ -489,7 +489,7 @@ static int send_reply(struct svcxprt_rdma *rdma, |
2767 | ctxt->sge[0].length, DMA_TO_DEVICE); |
2768 | if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) |
2769 | goto err; |
2770 | - atomic_inc(&rdma->sc_dma_used); |
2771 | + svc_rdma_count_mappings(rdma, ctxt); |
2772 | |
2773 | ctxt->direction = DMA_TO_DEVICE; |
2774 | |
2775 | @@ -505,7 +505,7 @@ static int send_reply(struct svcxprt_rdma *rdma, |
2776 | if (ib_dma_mapping_error(rdma->sc_cm_id->device, |
2777 | ctxt->sge[sge_no].addr)) |
2778 | goto err; |
2779 | - atomic_inc(&rdma->sc_dma_used); |
2780 | + svc_rdma_count_mappings(rdma, ctxt); |
2781 | ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey; |
2782 | ctxt->sge[sge_no].length = sge_bytes; |
2783 | } |
2784 | @@ -523,23 +523,9 @@ static int send_reply(struct svcxprt_rdma *rdma, |
2785 | ctxt->pages[page_no+1] = rqstp->rq_respages[page_no]; |
2786 | ctxt->count++; |
2787 | rqstp->rq_respages[page_no] = NULL; |
2788 | - /* |
2789 | - * If there are more pages than SGE, terminate SGE |
2790 | - * list so that svc_rdma_unmap_dma doesn't attempt to |
2791 | - * unmap garbage. |
2792 | - */ |
2793 | - if (page_no+1 >= sge_no) |
2794 | - ctxt->sge[page_no+1].length = 0; |
2795 | } |
2796 | rqstp->rq_next_page = rqstp->rq_respages + 1; |
2797 | |
2798 | - /* The loop above bumps sc_dma_used for each sge. The |
2799 | - * xdr_buf.tail gets a separate sge, but resides in the |
2800 | - * same page as xdr_buf.head. Don't count it twice. |
2801 | - */ |
2802 | - if (sge_no > ctxt->count) |
2803 | - atomic_dec(&rdma->sc_dma_used); |
2804 | - |
2805 | if (sge_no > rdma->sc_max_sge) { |
2806 | pr_err("svcrdma: Too many sges (%d)\n", sge_no); |
2807 | goto err; |
2808 | @@ -635,7 +621,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) |
2809 | ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec, |
2810 | inline_bytes); |
2811 | if (ret < 0) |
2812 | - goto err1; |
2813 | + goto err0; |
2814 | |
2815 | svc_rdma_put_req_map(rdma, vec); |
2816 | dprintk("svcrdma: send_reply returns %d\n", ret); |
2817 | @@ -692,7 +678,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, |
2818 | svc_rdma_put_context(ctxt, 1); |
2819 | return; |
2820 | } |
2821 | - atomic_inc(&xprt->sc_dma_used); |
2822 | + svc_rdma_count_mappings(xprt, ctxt); |
2823 | |
2824 | /* Prepare SEND WR */ |
2825 | memset(&err_wr, 0, sizeof(err_wr)); |
2826 | diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c |
2827 | index dd9440137834..924271c9ef3e 100644 |
2828 | --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c |
2829 | +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c |
2830 | @@ -198,6 +198,7 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) |
2831 | |
2832 | out: |
2833 | ctxt->count = 0; |
2834 | + ctxt->mapped_sges = 0; |
2835 | ctxt->frmr = NULL; |
2836 | return ctxt; |
2837 | |
2838 | @@ -221,22 +222,27 @@ out_empty: |
2839 | void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) |
2840 | { |
2841 | struct svcxprt_rdma *xprt = ctxt->xprt; |
2842 | - int i; |
2843 | - for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { |
2844 | + struct ib_device *device = xprt->sc_cm_id->device; |
2845 | + u32 lkey = xprt->sc_pd->local_dma_lkey; |
2846 | + unsigned int i, count; |
2847 | + |
2848 | + for (count = 0, i = 0; i < ctxt->mapped_sges; i++) { |
2849 | /* |
2850 | * Unmap the DMA addr in the SGE if the lkey matches |
2851 | * the local_dma_lkey, otherwise, ignore it since it is |
2852 | * an FRMR lkey and will be unmapped later when the |
2853 | * last WR that uses it completes. |
2854 | */ |
2855 | - if (ctxt->sge[i].lkey == xprt->sc_pd->local_dma_lkey) { |
2856 | - atomic_dec(&xprt->sc_dma_used); |
2857 | - ib_dma_unmap_page(xprt->sc_cm_id->device, |
2858 | + if (ctxt->sge[i].lkey == lkey) { |
2859 | + count++; |
2860 | + ib_dma_unmap_page(device, |
2861 | ctxt->sge[i].addr, |
2862 | ctxt->sge[i].length, |
2863 | ctxt->direction); |
2864 | } |
2865 | } |
2866 | + ctxt->mapped_sges = 0; |
2867 | + atomic_sub(count, &xprt->sc_dma_used); |
2868 | } |
2869 | |
2870 | void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) |
2871 | @@ -600,7 +606,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) |
2872 | DMA_FROM_DEVICE); |
2873 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) |
2874 | goto err_put_ctxt; |
2875 | - atomic_inc(&xprt->sc_dma_used); |
2876 | + svc_rdma_count_mappings(xprt, ctxt); |
2877 | ctxt->sge[sge_no].addr = pa; |
2878 | ctxt->sge[sge_no].length = PAGE_SIZE; |
2879 | ctxt->sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey; |
2880 | diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h |
2881 | index a71b0f5897d8..edc03445beed 100644 |
2882 | --- a/net/sunrpc/xprtrdma/xprt_rdma.h |
2883 | +++ b/net/sunrpc/xprtrdma/xprt_rdma.h |
2884 | @@ -207,7 +207,8 @@ struct rpcrdma_rep { |
2885 | enum rpcrdma_frmr_state { |
2886 | FRMR_IS_INVALID, /* ready to be used */ |
2887 | FRMR_IS_VALID, /* in use */ |
2888 | - FRMR_IS_STALE, /* failed completion */ |
2889 | + FRMR_FLUSHED_FR, /* flushed FASTREG WR */ |
2890 | + FRMR_FLUSHED_LI, /* flushed LOCALINV WR */ |
2891 | }; |
2892 | |
2893 | struct rpcrdma_frmr { |
2894 | diff --git a/sound/core/info.c b/sound/core/info.c |
2895 | index 895362a696c9..8ab72e0f5932 100644 |
2896 | --- a/sound/core/info.c |
2897 | +++ b/sound/core/info.c |
2898 | @@ -325,10 +325,15 @@ static ssize_t snd_info_text_entry_write(struct file *file, |
2899 | size_t next; |
2900 | int err = 0; |
2901 | |
2902 | + if (!entry->c.text.write) |
2903 | + return -EIO; |
2904 | pos = *offset; |
2905 | if (!valid_pos(pos, count)) |
2906 | return -EIO; |
2907 | next = pos + count; |
2908 | + /* don't handle too large text inputs */ |
2909 | + if (next > 16 * 1024) |
2910 | + return -EIO; |
2911 | mutex_lock(&entry->access); |
2912 | buf = data->wbuffer; |
2913 | if (!buf) { |
2914 | @@ -366,7 +371,9 @@ static int snd_info_seq_show(struct seq_file *seq, void *p) |
2915 | struct snd_info_private_data *data = seq->private; |
2916 | struct snd_info_entry *entry = data->entry; |
2917 | |
2918 | - if (entry->c.text.read) { |
2919 | + if (!entry->c.text.read) { |
2920 | + return -EIO; |
2921 | + } else { |
2922 | data->rbuffer->buffer = (char *)seq; /* XXX hack! */ |
2923 | entry->c.text.read(entry, data->rbuffer); |
2924 | } |
2925 | diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c |
2926 | index e07807d96b68..3670086b9227 100644 |
2927 | --- a/sound/soc/codecs/cs4270.c |
2928 | +++ b/sound/soc/codecs/cs4270.c |
2929 | @@ -148,11 +148,11 @@ SND_SOC_DAPM_OUTPUT("AOUTR"), |
2930 | }; |
2931 | |
2932 | static const struct snd_soc_dapm_route cs4270_dapm_routes[] = { |
2933 | - { "Capture", NULL, "AINA" }, |
2934 | - { "Capture", NULL, "AINB" }, |
2935 | + { "Capture", NULL, "AINL" }, |
2936 | + { "Capture", NULL, "AINR" }, |
2937 | |
2938 | - { "AOUTA", NULL, "Playback" }, |
2939 | - { "AOUTB", NULL, "Playback" }, |
2940 | + { "AOUTL", NULL, "Playback" }, |
2941 | + { "AOUTR", NULL, "Playback" }, |
2942 | }; |
2943 | |
2944 | /** |
2945 | diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c |
2946 | index e3e764167765..7b7a380b1245 100644 |
2947 | --- a/sound/soc/intel/skylake/skl.c |
2948 | +++ b/sound/soc/intel/skylake/skl.c |
2949 | @@ -785,8 +785,7 @@ static void skl_remove(struct pci_dev *pci) |
2950 | |
2951 | release_firmware(skl->tplg); |
2952 | |
2953 | - if (pci_dev_run_wake(pci)) |
2954 | - pm_runtime_get_noresume(&pci->dev); |
2955 | + pm_runtime_get_noresume(&pci->dev); |
2956 | |
2957 | /* codec removal, invoke bus_device_remove */ |
2958 | snd_hdac_ext_bus_device_remove(ebus); |
2959 | diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c |
2960 | index 44f170c73b06..03c18db96741 100644 |
2961 | --- a/sound/soc/sunxi/sun4i-codec.c |
2962 | +++ b/sound/soc/sunxi/sun4i-codec.c |
2963 | @@ -738,11 +738,11 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev) |
2964 | |
2965 | card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); |
2966 | if (!card) |
2967 | - return NULL; |
2968 | + return ERR_PTR(-ENOMEM); |
2969 | |
2970 | card->dai_link = sun4i_codec_create_link(dev, &card->num_links); |
2971 | if (!card->dai_link) |
2972 | - return NULL; |
2973 | + return ERR_PTR(-ENOMEM); |
2974 | |
2975 | card->dev = dev; |
2976 | card->name = "sun4i-codec"; |
2977 | @@ -842,7 +842,8 @@ static int sun4i_codec_probe(struct platform_device *pdev) |
2978 | } |
2979 | |
2980 | card = sun4i_codec_create_card(&pdev->dev); |
2981 | - if (!card) { |
2982 | + if (IS_ERR(card)) { |
2983 | + ret = PTR_ERR(card); |
2984 | dev_err(&pdev->dev, "Failed to create our card\n"); |
2985 | goto err_unregister_codec; |
2986 | } |
2987 | diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c |
2988 | index 7aee954b307f..4ad1eac23f66 100644 |
2989 | --- a/tools/perf/ui/browsers/hists.c |
2990 | +++ b/tools/perf/ui/browsers/hists.c |
2991 | @@ -595,7 +595,8 @@ int hist_browser__run(struct hist_browser *browser, const char *help) |
2992 | u64 nr_entries; |
2993 | hbt->timer(hbt->arg); |
2994 | |
2995 | - if (hist_browser__has_filter(browser)) |
2996 | + if (hist_browser__has_filter(browser) || |
2997 | + symbol_conf.report_hierarchy) |
2998 | hist_browser__update_nr_entries(browser); |
2999 | |
3000 | nr_entries = hist_browser__nr_entries(browser); |
3001 | diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c |
3002 | index b4bf76971dc9..1eef0aed6423 100644 |
3003 | --- a/tools/power/cpupower/utils/cpufreq-set.c |
3004 | +++ b/tools/power/cpupower/utils/cpufreq-set.c |
3005 | @@ -296,7 +296,7 @@ int cmd_freq_set(int argc, char **argv) |
3006 | struct cpufreq_affected_cpus *cpus; |
3007 | |
3008 | if (!bitmask_isbitset(cpus_chosen, cpu) || |
3009 | - cpupower_is_cpu_online(cpu)) |
3010 | + cpupower_is_cpu_online(cpu) != 1) |
3011 | continue; |
3012 | |
3013 | cpus = cpufreq_get_related_cpus(cpu); |
3014 | @@ -316,10 +316,7 @@ int cmd_freq_set(int argc, char **argv) |
3015 | cpu <= bitmask_last(cpus_chosen); cpu++) { |
3016 | |
3017 | if (!bitmask_isbitset(cpus_chosen, cpu) || |
3018 | - cpupower_is_cpu_online(cpu)) |
3019 | - continue; |
3020 | - |
3021 | - if (cpupower_is_cpu_online(cpu) != 1) |
3022 | + cpupower_is_cpu_online(cpu) != 1) |
3023 | continue; |
3024 | |
3025 | printf(_("Setting cpu: %d\n"), cpu); |
3026 | diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c |
3027 | index 3bad3c5ed431..d1b080ca8dc9 100644 |
3028 | --- a/virt/kvm/arm/vgic/vgic-mmio.c |
3029 | +++ b/virt/kvm/arm/vgic/vgic-mmio.c |
3030 | @@ -453,17 +453,33 @@ struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev) |
3031 | return container_of(dev, struct vgic_io_device, dev); |
3032 | } |
3033 | |
3034 | -static bool check_region(const struct vgic_register_region *region, |
3035 | +static bool check_region(const struct kvm *kvm, |
3036 | + const struct vgic_register_region *region, |
3037 | gpa_t addr, int len) |
3038 | { |
3039 | - if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1) |
3040 | - return true; |
3041 | - if ((region->access_flags & VGIC_ACCESS_32bit) && |
3042 | - len == sizeof(u32) && !(addr & 3)) |
3043 | - return true; |
3044 | - if ((region->access_flags & VGIC_ACCESS_64bit) && |
3045 | - len == sizeof(u64) && !(addr & 7)) |
3046 | - return true; |
3047 | + int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; |
3048 | + |
3049 | + switch (len) { |
3050 | + case sizeof(u8): |
3051 | + flags = VGIC_ACCESS_8bit; |
3052 | + break; |
3053 | + case sizeof(u32): |
3054 | + flags = VGIC_ACCESS_32bit; |
3055 | + break; |
3056 | + case sizeof(u64): |
3057 | + flags = VGIC_ACCESS_64bit; |
3058 | + break; |
3059 | + default: |
3060 | + return false; |
3061 | + } |
3062 | + |
3063 | + if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) { |
3064 | + if (!region->bits_per_irq) |
3065 | + return true; |
3066 | + |
3067 | + /* Do we access a non-allocated IRQ? */ |
3068 | + return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs; |
3069 | + } |
3070 | |
3071 | return false; |
3072 | } |
3073 | @@ -477,7 +493,7 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
3074 | |
3075 | region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, |
3076 | addr - iodev->base_addr); |
3077 | - if (!region || !check_region(region, addr, len)) { |
3078 | + if (!region || !check_region(vcpu->kvm, region, addr, len)) { |
3079 | memset(val, 0, len); |
3080 | return 0; |
3081 | } |
3082 | @@ -510,10 +526,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
3083 | |
3084 | region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, |
3085 | addr - iodev->base_addr); |
3086 | - if (!region) |
3087 | - return 0; |
3088 | - |
3089 | - if (!check_region(region, addr, len)) |
3090 | + if (!region || !check_region(vcpu->kvm, region, addr, len)) |
3091 | return 0; |
3092 | |
3093 | switch (iodev->iodev_type) { |
3094 | diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h |
3095 | index 0b3ecf9d100e..ba63d91d2619 100644 |
3096 | --- a/virt/kvm/arm/vgic/vgic-mmio.h |
3097 | +++ b/virt/kvm/arm/vgic/vgic-mmio.h |
3098 | @@ -50,15 +50,15 @@ extern struct kvm_io_device_ops kvm_io_gic_ops; |
3099 | #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1) |
3100 | |
3101 | /* |
3102 | - * (addr & mask) gives us the byte offset for the INT ID, so we want to |
3103 | - * divide this with 'bytes per irq' to get the INT ID, which is given |
3104 | - * by '(bits) / 8'. But we do this with fixed-point-arithmetic and |
3105 | - * take advantage of the fact that division by a fraction equals |
3106 | - * multiplication with the inverted fraction, and scale up both the |
3107 | - * numerator and denominator with 8 to support at most 64 bits per IRQ: |
3108 | + * (addr & mask) gives us the _byte_ offset for the INT ID. |
3109 | + * We multiply this by 8 the get the _bit_ offset, then divide this by |
3110 | + * the number of bits to learn the actual INT ID. |
3111 | + * But instead of a division (which requires a "long long div" implementation), |
3112 | + * we shift by the binary logarithm of <bits>. |
3113 | + * This assumes that <bits> is a power of two. |
3114 | */ |
3115 | #define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \ |
3116 | - 64 / (bits) / 8) |
3117 | + 8 >> ilog2(bits)) |
3118 | |
3119 | /* |
3120 | * Some VGIC registers store per-IRQ information, with a different number |