Contents of /trunk/kernel26-alx/patches-3.10/0148-3.10.49-all-fixes.patch
Parent Directory | Revision Log
Revision 2672 -
(show annotations)
(download)
Tue Jul 21 16:46:35 2015 UTC (9 years, 2 months ago) by niro
File size: 53568 byte(s)
Tue Jul 21 16:46:35 2015 UTC (9 years, 2 months ago) by niro
File size: 53568 byte(s)
-3.10.84-alx-r1
1 | diff --git a/Makefile b/Makefile |
2 | index f7e5680740f9..b8b8d33eab55 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 3 |
7 | PATCHLEVEL = 10 |
8 | -SUBLEVEL = 48 |
9 | +SUBLEVEL = 49 |
10 | EXTRAVERSION = |
11 | NAME = TOSSUG Baby Fish |
12 | |
13 | diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h |
14 | index 381f556b664e..bf0838323896 100644 |
15 | --- a/arch/arm64/include/asm/memory.h |
16 | +++ b/arch/arm64/include/asm/memory.h |
17 | @@ -51,6 +51,8 @@ |
18 | #define TASK_SIZE_32 UL(0x100000000) |
19 | #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ |
20 | TASK_SIZE_32 : TASK_SIZE_64) |
21 | +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ |
22 | + TASK_SIZE_32 : TASK_SIZE_64) |
23 | #else |
24 | #define TASK_SIZE TASK_SIZE_64 |
25 | #endif /* CONFIG_COMPAT */ |
26 | diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c |
27 | index 872275659d98..c22c3d84e28b 100644 |
28 | --- a/arch/parisc/kernel/hardware.c |
29 | +++ b/arch/parisc/kernel/hardware.c |
30 | @@ -1205,7 +1205,8 @@ static struct hp_hardware hp_hardware_list[] = { |
31 | {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, |
32 | {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, |
33 | {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, |
34 | - {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"}, |
35 | + {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"}, |
36 | + {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"}, |
37 | {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, |
38 | {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, |
39 | {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, |
40 | diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h |
41 | index f265049dd7d6..960bf64788a3 100644 |
42 | --- a/arch/powerpc/include/asm/perf_event_server.h |
43 | +++ b/arch/powerpc/include/asm/perf_event_server.h |
44 | @@ -59,7 +59,7 @@ struct power_pmu { |
45 | #define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ |
46 | #define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ |
47 | #define PPMU_HAS_SIER 0x00000040 /* Has SIER */ |
48 | -#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */ |
49 | +#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ |
50 | |
51 | /* |
52 | * Values for flags to get_alternatives() |
53 | diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c |
54 | index d3ee2e50a3a6..846861a20b07 100644 |
55 | --- a/arch/powerpc/perf/core-book3s.c |
56 | +++ b/arch/powerpc/perf/core-book3s.c |
57 | @@ -749,7 +749,22 @@ static void power_pmu_read(struct perf_event *event) |
58 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); |
59 | |
60 | local64_add(delta, &event->count); |
61 | - local64_sub(delta, &event->hw.period_left); |
62 | + |
63 | + /* |
64 | + * A number of places program the PMC with (0x80000000 - period_left). |
65 | + * We never want period_left to be less than 1 because we will program |
66 | + * the PMC with a value >= 0x800000000 and an edge detected PMC will |
67 | + * roll around to 0 before taking an exception. We have seen this |
68 | + * on POWER8. |
69 | + * |
70 | + * To fix this, clamp the minimum value of period_left to 1. |
71 | + */ |
72 | + do { |
73 | + prev = local64_read(&event->hw.period_left); |
74 | + val = prev - delta; |
75 | + if (val < 1) |
76 | + val = 1; |
77 | + } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); |
78 | } |
79 | |
80 | /* |
81 | @@ -1327,6 +1342,9 @@ static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, |
82 | if (ppmu->limited_pmc_event(ev)) |
83 | return 1; |
84 | |
85 | + if (ppmu->flags & PPMU_ARCH_207S) |
86 | + mtspr(SPRN_MMCR2, 0); |
87 | + |
88 | /* |
89 | * The requested event_id isn't on a limited PMC already; |
90 | * see if any alternative code goes on a limited PMC. |
91 | @@ -1421,7 +1439,7 @@ static int power_pmu_event_init(struct perf_event *event) |
92 | |
93 | if (has_branch_stack(event)) { |
94 | /* PMU has BHRB enabled */ |
95 | - if (!(ppmu->flags & PPMU_BHRB)) |
96 | + if (!(ppmu->flags & PPMU_ARCH_207S)) |
97 | return -EOPNOTSUPP; |
98 | } |
99 | |
100 | diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c |
101 | index 9aefaebedef1..ee3b4048ab4d 100644 |
102 | --- a/arch/powerpc/perf/power8-pmu.c |
103 | +++ b/arch/powerpc/perf/power8-pmu.c |
104 | @@ -592,7 +592,7 @@ static struct power_pmu power8_pmu = { |
105 | .get_constraint = power8_get_constraint, |
106 | .get_alternatives = power8_get_alternatives, |
107 | .disable_pmc = power8_disable_pmc, |
108 | - .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB, |
109 | + .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S, |
110 | .n_generic = ARRAY_SIZE(power8_generic_events), |
111 | .generic_events = power8_generic_events, |
112 | .attr_groups = power8_pmu_attr_groups, |
113 | diff --git a/arch/score/Kconfig b/arch/score/Kconfig |
114 | index c8def8bc9020..91182e95b887 100644 |
115 | --- a/arch/score/Kconfig |
116 | +++ b/arch/score/Kconfig |
117 | @@ -109,3 +109,6 @@ source "security/Kconfig" |
118 | source "crypto/Kconfig" |
119 | |
120 | source "lib/Kconfig" |
121 | + |
122 | +config NO_IOMEM |
123 | + def_bool y |
124 | diff --git a/arch/score/Makefile b/arch/score/Makefile |
125 | index 974aefe86123..9e3e060290e0 100644 |
126 | --- a/arch/score/Makefile |
127 | +++ b/arch/score/Makefile |
128 | @@ -20,8 +20,8 @@ cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \ |
129 | # |
130 | KBUILD_AFLAGS += $(cflags-y) |
131 | KBUILD_CFLAGS += $(cflags-y) |
132 | -KBUILD_AFLAGS_MODULE += -mlong-calls |
133 | -KBUILD_CFLAGS_MODULE += -mlong-calls |
134 | +KBUILD_AFLAGS_MODULE += |
135 | +KBUILD_CFLAGS_MODULE += |
136 | LDFLAGS += --oformat elf32-littlescore |
137 | LDFLAGS_vmlinux += -G0 -static -nostdlib |
138 | |
139 | diff --git a/arch/score/include/asm/checksum.h b/arch/score/include/asm/checksum.h |
140 | index f909ac3144a4..961bd64015a8 100644 |
141 | --- a/arch/score/include/asm/checksum.h |
142 | +++ b/arch/score/include/asm/checksum.h |
143 | @@ -184,48 +184,57 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
144 | __wsum sum) |
145 | { |
146 | __asm__ __volatile__( |
147 | - ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t" |
148 | - ".set\tnoat\n\t" |
149 | - "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t" |
150 | - "sltu\t$1, %0, %5\n\t" |
151 | - "addu\t%0, $1\n\t" |
152 | - "addu\t%0, %6\t\t\t# csum\n\t" |
153 | - "sltu\t$1, %0, %6\n\t" |
154 | - "lw\t%1, 0(%2)\t\t\t# four words source address\n\t" |
155 | - "addu\t%0, $1\n\t" |
156 | - "addu\t%0, %1\n\t" |
157 | - "sltu\t$1, %0, %1\n\t" |
158 | - "lw\t%1, 4(%2)\n\t" |
159 | - "addu\t%0, $1\n\t" |
160 | - "addu\t%0, %1\n\t" |
161 | - "sltu\t$1, %0, %1\n\t" |
162 | - "lw\t%1, 8(%2)\n\t" |
163 | - "addu\t%0, $1\n\t" |
164 | - "addu\t%0, %1\n\t" |
165 | - "sltu\t$1, %0, %1\n\t" |
166 | - "lw\t%1, 12(%2)\n\t" |
167 | - "addu\t%0, $1\n\t" |
168 | - "addu\t%0, %1\n\t" |
169 | - "sltu\t$1, %0, %1\n\t" |
170 | - "lw\t%1, 0(%3)\n\t" |
171 | - "addu\t%0, $1\n\t" |
172 | - "addu\t%0, %1\n\t" |
173 | - "sltu\t$1, %0, %1\n\t" |
174 | - "lw\t%1, 4(%3)\n\t" |
175 | - "addu\t%0, $1\n\t" |
176 | - "addu\t%0, %1\n\t" |
177 | - "sltu\t$1, %0, %1\n\t" |
178 | - "lw\t%1, 8(%3)\n\t" |
179 | - "addu\t%0, $1\n\t" |
180 | - "addu\t%0, %1\n\t" |
181 | - "sltu\t$1, %0, %1\n\t" |
182 | - "lw\t%1, 12(%3)\n\t" |
183 | - "addu\t%0, $1\n\t" |
184 | - "addu\t%0, %1\n\t" |
185 | - "sltu\t$1, %0, %1\n\t" |
186 | - "addu\t%0, $1\t\t\t# Add final carry\n\t" |
187 | - ".set\tnoat\n\t" |
188 | - ".set\tnoreorder" |
189 | + ".set\tvolatile\t\t\t# csum_ipv6_magic\n\t" |
190 | + "add\t%0, %0, %5\t\t\t# proto (long in network byte order)\n\t" |
191 | + "cmp.c\t%5, %0\n\t" |
192 | + "bleu 1f\n\t" |
193 | + "addi\t%0, 0x1\n\t" |
194 | + "1:add\t%0, %0, %6\t\t\t# csum\n\t" |
195 | + "cmp.c\t%6, %0\n\t" |
196 | + "lw\t%1, [%2, 0]\t\t\t# four words source address\n\t" |
197 | + "bleu 1f\n\t" |
198 | + "addi\t%0, 0x1\n\t" |
199 | + "1:add\t%0, %0, %1\n\t" |
200 | + "cmp.c\t%1, %0\n\t" |
201 | + "1:lw\t%1, [%2, 4]\n\t" |
202 | + "bleu 1f\n\t" |
203 | + "addi\t%0, 0x1\n\t" |
204 | + "1:add\t%0, %0, %1\n\t" |
205 | + "cmp.c\t%1, %0\n\t" |
206 | + "lw\t%1, [%2,8]\n\t" |
207 | + "bleu 1f\n\t" |
208 | + "addi\t%0, 0x1\n\t" |
209 | + "1:add\t%0, %0, %1\n\t" |
210 | + "cmp.c\t%1, %0\n\t" |
211 | + "lw\t%1, [%2, 12]\n\t" |
212 | + "bleu 1f\n\t" |
213 | + "addi\t%0, 0x1\n\t" |
214 | + "1:add\t%0, %0,%1\n\t" |
215 | + "cmp.c\t%1, %0\n\t" |
216 | + "lw\t%1, [%3, 0]\n\t" |
217 | + "bleu 1f\n\t" |
218 | + "addi\t%0, 0x1\n\t" |
219 | + "1:add\t%0, %0, %1\n\t" |
220 | + "cmp.c\t%1, %0\n\t" |
221 | + "lw\t%1, [%3, 4]\n\t" |
222 | + "bleu 1f\n\t" |
223 | + "addi\t%0, 0x1\n\t" |
224 | + "1:add\t%0, %0, %1\n\t" |
225 | + "cmp.c\t%1, %0\n\t" |
226 | + "lw\t%1, [%3, 8]\n\t" |
227 | + "bleu 1f\n\t" |
228 | + "addi\t%0, 0x1\n\t" |
229 | + "1:add\t%0, %0, %1\n\t" |
230 | + "cmp.c\t%1, %0\n\t" |
231 | + "lw\t%1, [%3, 12]\n\t" |
232 | + "bleu 1f\n\t" |
233 | + "addi\t%0, 0x1\n\t" |
234 | + "1:add\t%0, %0, %1\n\t" |
235 | + "cmp.c\t%1, %0\n\t" |
236 | + "bleu 1f\n\t" |
237 | + "addi\t%0, 0x1\n\t" |
238 | + "1:\n\t" |
239 | + ".set\toptimize" |
240 | : "=r" (sum), "=r" (proto) |
241 | : "r" (saddr), "r" (daddr), |
242 | "0" (htonl(len)), "1" (htonl(proto)), "r" (sum)); |
243 | diff --git a/arch/score/include/asm/io.h b/arch/score/include/asm/io.h |
244 | index fbbfd7132e3b..574c8827abe2 100644 |
245 | --- a/arch/score/include/asm/io.h |
246 | +++ b/arch/score/include/asm/io.h |
247 | @@ -5,5 +5,4 @@ |
248 | |
249 | #define virt_to_bus virt_to_phys |
250 | #define bus_to_virt phys_to_virt |
251 | - |
252 | #endif /* _ASM_SCORE_IO_H */ |
253 | diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h |
254 | index 059a61b7071b..716b3fd1d863 100644 |
255 | --- a/arch/score/include/asm/pgalloc.h |
256 | +++ b/arch/score/include/asm/pgalloc.h |
257 | @@ -2,7 +2,7 @@ |
258 | #define _ASM_SCORE_PGALLOC_H |
259 | |
260 | #include <linux/mm.h> |
261 | - |
262 | +#include <linux/highmem.h> |
263 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, |
264 | pte_t *pte) |
265 | { |
266 | diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S |
267 | index 7234ed09b7b7..befb87d30a89 100644 |
268 | --- a/arch/score/kernel/entry.S |
269 | +++ b/arch/score/kernel/entry.S |
270 | @@ -264,7 +264,7 @@ resume_kernel: |
271 | disable_irq |
272 | lw r8, [r28, TI_PRE_COUNT] |
273 | cmpz.c r8 |
274 | - bne r8, restore_all |
275 | + bne restore_all |
276 | need_resched: |
277 | lw r8, [r28, TI_FLAGS] |
278 | andri.c r9, r8, _TIF_NEED_RESCHED |
279 | @@ -415,7 +415,7 @@ ENTRY(handle_sys) |
280 | sw r9, [r0, PT_EPC] |
281 | |
282 | cmpi.c r27, __NR_syscalls # check syscall number |
283 | - bgeu illegal_syscall |
284 | + bcs illegal_syscall |
285 | |
286 | slli r8, r27, 2 # get syscall routine |
287 | la r11, sys_call_table |
288 | diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c |
289 | index f4c6d02421d3..a1519ad3d49d 100644 |
290 | --- a/arch/score/kernel/process.c |
291 | +++ b/arch/score/kernel/process.c |
292 | @@ -78,8 +78,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, |
293 | p->thread.reg0 = (unsigned long) childregs; |
294 | if (unlikely(p->flags & PF_KTHREAD)) { |
295 | memset(childregs, 0, sizeof(struct pt_regs)); |
296 | - p->thread->reg12 = usp; |
297 | - p->thread->reg13 = arg; |
298 | + p->thread.reg12 = usp; |
299 | + p->thread.reg13 = arg; |
300 | p->thread.reg3 = (unsigned long) ret_from_kernel_thread; |
301 | } else { |
302 | *childregs = *current_pt_regs(); |
303 | diff --git a/arch/score/kernel/vmlinux.lds.S b/arch/score/kernel/vmlinux.lds.S |
304 | index eebcbaa4e978..7274b5c4287e 100644 |
305 | --- a/arch/score/kernel/vmlinux.lds.S |
306 | +++ b/arch/score/kernel/vmlinux.lds.S |
307 | @@ -49,6 +49,7 @@ SECTIONS |
308 | } |
309 | |
310 | . = ALIGN(16); |
311 | + _sdata = .; /* Start of data section */ |
312 | RODATA |
313 | |
314 | EXCEPTION_TABLE(16) |
315 | diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c |
316 | index 6cbd8df348d2..9f5e71f06671 100644 |
317 | --- a/arch/x86/crypto/sha512_ssse3_glue.c |
318 | +++ b/arch/x86/crypto/sha512_ssse3_glue.c |
319 | @@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out) |
320 | |
321 | /* save number of bits */ |
322 | bits[1] = cpu_to_be64(sctx->count[0] << 3); |
323 | - bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61; |
324 | + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); |
325 | |
326 | /* Pad out to 112 mod 128 and append length */ |
327 | index = sctx->count[0] & 0x7f; |
328 | diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c |
329 | index 9a1e6583910c..86c758de4b34 100644 |
330 | --- a/arch/x86/mm/ioremap.c |
331 | +++ b/arch/x86/mm/ioremap.c |
332 | @@ -50,6 +50,21 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
333 | return err; |
334 | } |
335 | |
336 | +static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, |
337 | + void *arg) |
338 | +{ |
339 | + unsigned long i; |
340 | + |
341 | + for (i = 0; i < nr_pages; ++i) |
342 | + if (pfn_valid(start_pfn + i) && |
343 | + !PageReserved(pfn_to_page(start_pfn + i))) |
344 | + return 1; |
345 | + |
346 | + WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); |
347 | + |
348 | + return 0; |
349 | +} |
350 | + |
351 | /* |
352 | * Remap an arbitrary physical address space into the kernel virtual |
353 | * address space. Needed when the kernel wants to access high addresses |
354 | @@ -93,14 +108,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, |
355 | /* |
356 | * Don't allow anybody to remap normal RAM that we're using.. |
357 | */ |
358 | + pfn = phys_addr >> PAGE_SHIFT; |
359 | last_pfn = last_addr >> PAGE_SHIFT; |
360 | - for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) { |
361 | - int is_ram = page_is_ram(pfn); |
362 | - |
363 | - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) |
364 | - return NULL; |
365 | - WARN_ON_ONCE(is_ram); |
366 | - } |
367 | + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, |
368 | + __ioremap_check_ram) == 1) |
369 | + return NULL; |
370 | |
371 | /* |
372 | * Mappings have to be page-aligned |
373 | diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c |
374 | index 99427d7307af..7ae5ebd1e70e 100644 |
375 | --- a/drivers/acpi/battery.c |
376 | +++ b/drivers/acpi/battery.c |
377 | @@ -34,6 +34,7 @@ |
378 | #include <linux/dmi.h> |
379 | #include <linux/slab.h> |
380 | #include <linux/suspend.h> |
381 | +#include <linux/delay.h> |
382 | #include <asm/unaligned.h> |
383 | |
384 | #ifdef CONFIG_ACPI_PROCFS_POWER |
385 | @@ -1081,6 +1082,28 @@ static struct dmi_system_id bat_dmi_table[] = { |
386 | {}, |
387 | }; |
388 | |
389 | +/* |
390 | + * Some machines'(E,G Lenovo Z480) ECs are not stable |
391 | + * during boot up and this causes battery driver fails to be |
392 | + * probed due to failure of getting battery information |
393 | + * from EC sometimes. After several retries, the operation |
394 | + * may work. So add retry code here and 20ms sleep between |
395 | + * every retries. |
396 | + */ |
397 | +static int acpi_battery_update_retry(struct acpi_battery *battery) |
398 | +{ |
399 | + int retry, ret; |
400 | + |
401 | + for (retry = 5; retry; retry--) { |
402 | + ret = acpi_battery_update(battery); |
403 | + if (!ret) |
404 | + break; |
405 | + |
406 | + msleep(20); |
407 | + } |
408 | + return ret; |
409 | +} |
410 | + |
411 | static int acpi_battery_add(struct acpi_device *device) |
412 | { |
413 | int result = 0; |
414 | @@ -1100,9 +1123,11 @@ static int acpi_battery_add(struct acpi_device *device) |
415 | if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle, |
416 | "_BIX", &handle))) |
417 | set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); |
418 | - result = acpi_battery_update(battery); |
419 | + |
420 | + result = acpi_battery_update_retry(battery); |
421 | if (result) |
422 | goto fail; |
423 | + |
424 | #ifdef CONFIG_ACPI_PROCFS_POWER |
425 | result = acpi_battery_add_fs(device); |
426 | #endif |
427 | diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c |
428 | index c2dd598e25a2..b9cfaf1d94d8 100644 |
429 | --- a/drivers/acpi/resource.c |
430 | +++ b/drivers/acpi/resource.c |
431 | @@ -77,7 +77,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) |
432 | switch (ares->type) { |
433 | case ACPI_RESOURCE_TYPE_MEMORY24: |
434 | memory24 = &ares->data.memory24; |
435 | - if (!memory24->address_length) |
436 | + if (!memory24->minimum && !memory24->address_length) |
437 | return false; |
438 | acpi_dev_get_memresource(res, memory24->minimum, |
439 | memory24->address_length, |
440 | @@ -85,7 +85,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) |
441 | break; |
442 | case ACPI_RESOURCE_TYPE_MEMORY32: |
443 | memory32 = &ares->data.memory32; |
444 | - if (!memory32->address_length) |
445 | + if (!memory32->minimum && !memory32->address_length) |
446 | return false; |
447 | acpi_dev_get_memresource(res, memory32->minimum, |
448 | memory32->address_length, |
449 | @@ -93,7 +93,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) |
450 | break; |
451 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: |
452 | fixed_memory32 = &ares->data.fixed_memory32; |
453 | - if (!fixed_memory32->address_length) |
454 | + if (!fixed_memory32->address && !fixed_memory32->address_length) |
455 | return false; |
456 | acpi_dev_get_memresource(res, fixed_memory32->address, |
457 | fixed_memory32->address_length, |
458 | @@ -150,7 +150,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) |
459 | switch (ares->type) { |
460 | case ACPI_RESOURCE_TYPE_IO: |
461 | io = &ares->data.io; |
462 | - if (!io->address_length) |
463 | + if (!io->minimum && !io->address_length) |
464 | return false; |
465 | acpi_dev_get_ioresource(res, io->minimum, |
466 | io->address_length, |
467 | @@ -158,7 +158,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) |
468 | break; |
469 | case ACPI_RESOURCE_TYPE_FIXED_IO: |
470 | fixed_io = &ares->data.fixed_io; |
471 | - if (!fixed_io->address_length) |
472 | + if (!fixed_io->address && !fixed_io->address_length) |
473 | return false; |
474 | acpi_dev_get_ioresource(res, fixed_io->address, |
475 | fixed_io->address_length, |
476 | diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c |
477 | index 080c3c5e33f6..1fe259021747 100644 |
478 | --- a/drivers/clk/spear/spear3xx_clock.c |
479 | +++ b/drivers/clk/spear/spear3xx_clock.c |
480 | @@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { } |
481 | /* array of all spear 320 clock lookups */ |
482 | #ifdef CONFIG_MACH_SPEAR320 |
483 | |
484 | -#define SPEAR320_CONTROL_REG (soc_config_base + 0x0000) |
485 | +#define SPEAR320_CONTROL_REG (soc_config_base + 0x0010) |
486 | #define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018) |
487 | |
488 | #define SPEAR320_UARTX_PCLK_MASK 0x1 |
489 | diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile |
490 | index 315b9231feb1..3b95322fec5e 100644 |
491 | --- a/drivers/cpufreq/Makefile |
492 | +++ b/drivers/cpufreq/Makefile |
493 | @@ -50,7 +50,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o |
494 | # LITTLE drivers, so that it is probed last. |
495 | obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o |
496 | |
497 | -obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o |
498 | +obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o |
499 | obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o |
500 | obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o |
501 | obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o |
502 | diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c |
503 | index 93e26339051d..e62a9ce3e4dc 100644 |
504 | --- a/drivers/gpu/drm/radeon/evergreen.c |
505 | +++ b/drivers/gpu/drm/radeon/evergreen.c |
506 | @@ -94,7 +94,7 @@ static const u32 evergreen_golden_registers[] = |
507 | 0x8c1c, 0xffffffff, 0x00001010, |
508 | 0x28350, 0xffffffff, 0x00000000, |
509 | 0xa008, 0xffffffff, 0x00010000, |
510 | - 0x5cc, 0xffffffff, 0x00000001, |
511 | + 0x5c4, 0xffffffff, 0x00000001, |
512 | 0x9508, 0xffffffff, 0x00000002, |
513 | 0x913c, 0x0000000f, 0x0000000a |
514 | }; |
515 | @@ -381,7 +381,7 @@ static const u32 cedar_golden_registers[] = |
516 | 0x8c1c, 0xffffffff, 0x00001010, |
517 | 0x28350, 0xffffffff, 0x00000000, |
518 | 0xa008, 0xffffffff, 0x00010000, |
519 | - 0x5cc, 0xffffffff, 0x00000001, |
520 | + 0x5c4, 0xffffffff, 0x00000001, |
521 | 0x9508, 0xffffffff, 0x00000002 |
522 | }; |
523 | |
524 | @@ -540,7 +540,7 @@ static const u32 juniper_mgcg_init[] = |
525 | static const u32 supersumo_golden_registers[] = |
526 | { |
527 | 0x5eb4, 0xffffffff, 0x00000002, |
528 | - 0x5cc, 0xffffffff, 0x00000001, |
529 | + 0x5c4, 0xffffffff, 0x00000001, |
530 | 0x7030, 0xffffffff, 0x00000011, |
531 | 0x7c30, 0xffffffff, 0x00000011, |
532 | 0x6104, 0x01000300, 0x00000000, |
533 | @@ -624,7 +624,7 @@ static const u32 sumo_golden_registers[] = |
534 | static const u32 wrestler_golden_registers[] = |
535 | { |
536 | 0x5eb4, 0xffffffff, 0x00000002, |
537 | - 0x5cc, 0xffffffff, 0x00000001, |
538 | + 0x5c4, 0xffffffff, 0x00000001, |
539 | 0x7030, 0xffffffff, 0x00000011, |
540 | 0x7c30, 0xffffffff, 0x00000011, |
541 | 0x6104, 0x01000300, 0x00000000, |
542 | diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c |
543 | index 670b555d2ca2..ae813fef0818 100644 |
544 | --- a/drivers/gpu/drm/radeon/rs600.c |
545 | +++ b/drivers/gpu/drm/radeon/rs600.c |
546 | @@ -582,8 +582,10 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
547 | return -EINVAL; |
548 | } |
549 | addr = addr & 0xFFFFFFFFFFFFF000ULL; |
550 | - addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; |
551 | - addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; |
552 | + if (addr != rdev->dummy_page.addr) |
553 | + addr |= R600_PTE_VALID | R600_PTE_READABLE | |
554 | + R600_PTE_WRITEABLE; |
555 | + addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED; |
556 | writeq(addr, ptr + (i * 8)); |
557 | return 0; |
558 | } |
559 | diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c |
560 | index fd02cb79a99c..b9f5d295cbec 100644 |
561 | --- a/drivers/hv/connection.c |
562 | +++ b/drivers/hv/connection.c |
563 | @@ -304,9 +304,13 @@ static void process_chn_event(u32 relid) |
564 | */ |
565 | |
566 | do { |
567 | - hv_begin_read(&channel->inbound); |
568 | + if (read_state) |
569 | + hv_begin_read(&channel->inbound); |
570 | channel->onchannel_callback(arg); |
571 | - bytes_to_read = hv_end_read(&channel->inbound); |
572 | + if (read_state) |
573 | + bytes_to_read = hv_end_read(&channel->inbound); |
574 | + else |
575 | + bytes_to_read = 0; |
576 | } while (read_state && (bytes_to_read != 0)); |
577 | } else { |
578 | pr_err("no channel callback for relid - %u\n", relid); |
579 | diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c |
580 | index f920619cd6da..27ad7fb06572 100644 |
581 | --- a/drivers/hwmon/adm1021.c |
582 | +++ b/drivers/hwmon/adm1021.c |
583 | @@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev, |
584 | struct i2c_client *client = to_i2c_client(dev); |
585 | struct adm1021_data *data = i2c_get_clientdata(client); |
586 | long temp; |
587 | - int err; |
588 | + int reg_val, err; |
589 | |
590 | err = kstrtol(buf, 10, &temp); |
591 | if (err) |
592 | @@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev, |
593 | temp /= 1000; |
594 | |
595 | mutex_lock(&data->update_lock); |
596 | - data->temp_max[index] = clamp_val(temp, -128, 127); |
597 | + reg_val = clamp_val(temp, -128, 127); |
598 | + data->temp_max[index] = reg_val * 1000; |
599 | if (!read_only) |
600 | i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index), |
601 | - data->temp_max[index]); |
602 | + reg_val); |
603 | mutex_unlock(&data->update_lock); |
604 | |
605 | return count; |
606 | @@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev, |
607 | struct i2c_client *client = to_i2c_client(dev); |
608 | struct adm1021_data *data = i2c_get_clientdata(client); |
609 | long temp; |
610 | - int err; |
611 | + int reg_val, err; |
612 | |
613 | err = kstrtol(buf, 10, &temp); |
614 | if (err) |
615 | @@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev, |
616 | temp /= 1000; |
617 | |
618 | mutex_lock(&data->update_lock); |
619 | - data->temp_min[index] = clamp_val(temp, -128, 127); |
620 | + reg_val = clamp_val(temp, -128, 127); |
621 | + data->temp_min[index] = reg_val * 1000; |
622 | if (!read_only) |
623 | i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index), |
624 | - data->temp_min[index]); |
625 | + reg_val); |
626 | mutex_unlock(&data->update_lock); |
627 | |
628 | return count; |
629 | diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c |
630 | index 9ee5e066423b..39441e5d922c 100644 |
631 | --- a/drivers/hwmon/adm1029.c |
632 | +++ b/drivers/hwmon/adm1029.c |
633 | @@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev, |
634 | /* Update the value */ |
635 | reg = (reg & 0x3F) | (val << 6); |
636 | |
637 | + /* Update the cache */ |
638 | + data->fan_div[attr->index] = reg; |
639 | + |
640 | /* Write value */ |
641 | i2c_smbus_write_byte_data(client, |
642 | ADM1029_REG_FAN_DIV[attr->index], reg); |
643 | diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c |
644 | index 253ea396106d..bdceca0d7e22 100644 |
645 | --- a/drivers/hwmon/adm1031.c |
646 | +++ b/drivers/hwmon/adm1031.c |
647 | @@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr, |
648 | if (ret) |
649 | return ret; |
650 | |
651 | + val = clamp_val(val, 0, 127000); |
652 | mutex_lock(&data->update_lock); |
653 | data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]); |
654 | adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), |
655 | @@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr, |
656 | if (ret) |
657 | return ret; |
658 | |
659 | + val = clamp_val(val, 0, 127000); |
660 | mutex_lock(&data->update_lock); |
661 | data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], |
662 | data->pwm[nr]); |
663 | @@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, |
664 | if (ret) |
665 | return ret; |
666 | |
667 | - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); |
668 | + val = clamp_val(val, -55000, 127000); |
669 | mutex_lock(&data->update_lock); |
670 | data->temp_min[nr] = TEMP_TO_REG(val); |
671 | adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr), |
672 | @@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, |
673 | if (ret) |
674 | return ret; |
675 | |
676 | - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); |
677 | + val = clamp_val(val, -55000, 127000); |
678 | mutex_lock(&data->update_lock); |
679 | data->temp_max[nr] = TEMP_TO_REG(val); |
680 | adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr), |
681 | @@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr, |
682 | if (ret) |
683 | return ret; |
684 | |
685 | - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); |
686 | + val = clamp_val(val, -55000, 127000); |
687 | mutex_lock(&data->update_lock); |
688 | data->temp_crit[nr] = TEMP_TO_REG(val); |
689 | adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr), |
690 | diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c |
691 | index 4fe49d2bfe1d..09d2d78d482b 100644 |
692 | --- a/drivers/hwmon/amc6821.c |
693 | +++ b/drivers/hwmon/amc6821.c |
694 | @@ -707,7 +707,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, |
695 | get_temp_alarm, NULL, IDX_TEMP1_MAX); |
696 | static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, |
697 | get_temp_alarm, NULL, IDX_TEMP1_CRIT); |
698 | -static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR, |
699 | +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, |
700 | get_temp, NULL, IDX_TEMP2_INPUT); |
701 | static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp, |
702 | set_temp, IDX_TEMP2_MIN); |
703 | diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c |
704 | index ea49834377c8..d1de1626a9d2 100644 |
705 | --- a/drivers/md/dm-io.c |
706 | +++ b/drivers/md/dm-io.c |
707 | @@ -10,6 +10,7 @@ |
708 | #include <linux/device-mapper.h> |
709 | |
710 | #include <linux/bio.h> |
711 | +#include <linux/completion.h> |
712 | #include <linux/mempool.h> |
713 | #include <linux/module.h> |
714 | #include <linux/sched.h> |
715 | @@ -34,7 +35,7 @@ struct dm_io_client { |
716 | struct io { |
717 | unsigned long error_bits; |
718 | atomic_t count; |
719 | - struct task_struct *sleeper; |
720 | + struct completion *wait; |
721 | struct dm_io_client *client; |
722 | io_notify_fn callback; |
723 | void *context; |
724 | @@ -122,8 +123,8 @@ static void dec_count(struct io *io, unsigned int region, int error) |
725 | invalidate_kernel_vmap_range(io->vma_invalidate_address, |
726 | io->vma_invalidate_size); |
727 | |
728 | - if (io->sleeper) |
729 | - wake_up_process(io->sleeper); |
730 | + if (io->wait) |
731 | + complete(io->wait); |
732 | |
733 | else { |
734 | unsigned long r = io->error_bits; |
735 | @@ -386,6 +387,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
736 | */ |
737 | volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; |
738 | struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); |
739 | + DECLARE_COMPLETION_ONSTACK(wait); |
740 | |
741 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
742 | WARN_ON(1); |
743 | @@ -394,7 +396,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
744 | |
745 | io->error_bits = 0; |
746 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
747 | - io->sleeper = current; |
748 | + io->wait = &wait; |
749 | io->client = client; |
750 | |
751 | io->vma_invalidate_address = dp->vma_invalidate_address; |
752 | @@ -402,15 +404,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
753 | |
754 | dispatch_io(rw, num_regions, where, dp, io, 1); |
755 | |
756 | - while (1) { |
757 | - set_current_state(TASK_UNINTERRUPTIBLE); |
758 | - |
759 | - if (!atomic_read(&io->count)) |
760 | - break; |
761 | - |
762 | - io_schedule(); |
763 | - } |
764 | - set_current_state(TASK_RUNNING); |
765 | + wait_for_completion_io(&wait); |
766 | |
767 | if (error_bits) |
768 | *error_bits = io->error_bits; |
769 | @@ -433,7 +427,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, |
770 | io = mempool_alloc(client->pool, GFP_NOIO); |
771 | io->error_bits = 0; |
772 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
773 | - io->sleeper = NULL; |
774 | + io->wait = NULL; |
775 | io->client = client; |
776 | io->callback = fn; |
777 | io->context = context; |
778 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
779 | index ded751ca104a..b14379659e35 100644 |
780 | --- a/drivers/usb/serial/cp210x.c |
781 | +++ b/drivers/usb/serial/cp210x.c |
782 | @@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = { |
783 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ |
784 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ |
785 | { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ |
786 | + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ |
787 | { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ |
788 | { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ |
789 | { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ |
790 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
791 | index 3e315de9bbd4..9e75e3eaea4f 100644 |
792 | --- a/drivers/usb/serial/ftdi_sio.c |
793 | +++ b/drivers/usb/serial/ftdi_sio.c |
794 | @@ -723,7 +723,8 @@ static struct usb_device_id id_table_combined [] = { |
795 | { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, |
796 | { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, |
797 | { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, |
798 | - { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) }, |
799 | + { USB_DEVICE(TESTO_VID, TESTO_1_PID) }, |
800 | + { USB_DEVICE(TESTO_VID, TESTO_3_PID) }, |
801 | { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) }, |
802 | { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) }, |
803 | { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, |
804 | @@ -947,6 +948,8 @@ static struct usb_device_id id_table_combined [] = { |
805 | { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, |
806 | { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, |
807 | { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, |
808 | + /* Infineon Devices */ |
809 | + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, |
810 | { }, /* Optional parameter entry */ |
811 | { } /* Terminating entry */ |
812 | }; |
813 | diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h |
814 | index 500474c48f4b..c4777bc6aee0 100644 |
815 | --- a/drivers/usb/serial/ftdi_sio_ids.h |
816 | +++ b/drivers/usb/serial/ftdi_sio_ids.h |
817 | @@ -584,6 +584,12 @@ |
818 | #define RATOC_PRODUCT_ID_USB60F 0xb020 |
819 | |
820 | /* |
821 | + * Infineon Technologies |
822 | + */ |
823 | +#define INFINEON_VID 0x058b |
824 | +#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ |
825 | + |
826 | +/* |
827 | * Acton Research Corp. |
828 | */ |
829 | #define ACTON_VID 0x0647 /* Vendor ID */ |
830 | @@ -798,7 +804,8 @@ |
831 | * Submitted by Colin Leroy |
832 | */ |
833 | #define TESTO_VID 0x128D |
834 | -#define TESTO_USB_INTERFACE_PID 0x0001 |
835 | +#define TESTO_1_PID 0x0001 |
836 | +#define TESTO_3_PID 0x0003 |
837 | |
838 | /* |
839 | * Mobility Electronics products. |
840 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
841 | index e25e8ca09fe2..9da566a3f5c8 100644 |
842 | --- a/drivers/usb/serial/option.c |
843 | +++ b/drivers/usb/serial/option.c |
844 | @@ -1487,6 +1487,8 @@ static const struct usb_device_id option_ids[] = { |
845 | .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, |
846 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ |
847 | .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, |
848 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ |
849 | + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, |
850 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, |
851 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, |
852 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, |
853 | diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c |
854 | index 3da3bf1b2cd0..1ecd3a8c2444 100644 |
855 | --- a/fs/ext4/ialloc.c |
856 | +++ b/fs/ext4/ialloc.c |
857 | @@ -780,6 +780,13 @@ got: |
858 | goto out; |
859 | } |
860 | |
861 | + BUFFER_TRACE(group_desc_bh, "get_write_access"); |
862 | + err = ext4_journal_get_write_access(handle, group_desc_bh); |
863 | + if (err) { |
864 | + ext4_std_error(sb, err); |
865 | + goto out; |
866 | + } |
867 | + |
868 | /* We may have to initialize the block bitmap if it isn't already */ |
869 | if (ext4_has_group_desc_csum(sb) && |
870 | gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
871 | @@ -816,13 +823,6 @@ got: |
872 | } |
873 | } |
874 | |
875 | - BUFFER_TRACE(group_desc_bh, "get_write_access"); |
876 | - err = ext4_journal_get_write_access(handle, group_desc_bh); |
877 | - if (err) { |
878 | - ext4_std_error(sb, err); |
879 | - goto out; |
880 | - } |
881 | - |
882 | /* Update the relevant bg descriptor fields */ |
883 | if (ext4_has_group_desc_csum(sb)) { |
884 | int free; |
885 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
886 | index a7a5f7ea74db..1fc14f7a08b2 100644 |
887 | --- a/fs/ext4/super.c |
888 | +++ b/fs/ext4/super.c |
889 | @@ -1483,8 +1483,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, |
890 | arg = JBD2_DEFAULT_MAX_COMMIT_AGE; |
891 | sbi->s_commit_interval = HZ * arg; |
892 | } else if (token == Opt_max_batch_time) { |
893 | - if (arg == 0) |
894 | - arg = EXT4_DEF_MAX_BATCH_TIME; |
895 | sbi->s_max_batch_time = arg; |
896 | } else if (token == Opt_min_batch_time) { |
897 | sbi->s_min_batch_time = arg; |
898 | @@ -2687,10 +2685,11 @@ static void print_daily_error_info(unsigned long arg) |
899 | es = sbi->s_es; |
900 | |
901 | if (es->s_error_count) |
902 | - ext4_msg(sb, KERN_NOTICE, "error count: %u", |
903 | + /* fsck newer than v1.41.13 is needed to clean this condition. */ |
904 | + ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", |
905 | le32_to_cpu(es->s_error_count)); |
906 | if (es->s_first_error_time) { |
907 | - printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d", |
908 | + printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d", |
909 | sb->s_id, le32_to_cpu(es->s_first_error_time), |
910 | (int) sizeof(es->s_first_error_func), |
911 | es->s_first_error_func, |
912 | @@ -2704,7 +2703,7 @@ static void print_daily_error_info(unsigned long arg) |
913 | printk("\n"); |
914 | } |
915 | if (es->s_last_error_time) { |
916 | - printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d", |
917 | + printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d", |
918 | sb->s_id, le32_to_cpu(es->s_last_error_time), |
919 | (int) sizeof(es->s_last_error_func), |
920 | es->s_last_error_func, |
921 | diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c |
922 | index a6917125f215..ec34e11d6854 100644 |
923 | --- a/fs/jbd2/transaction.c |
924 | +++ b/fs/jbd2/transaction.c |
925 | @@ -1442,9 +1442,12 @@ int jbd2_journal_stop(handle_t *handle) |
926 | * to perform a synchronous write. We do this to detect the |
927 | * case where a single process is doing a stream of sync |
928 | * writes. No point in waiting for joiners in that case. |
929 | + * |
930 | + * Setting max_batch_time to 0 disables this completely. |
931 | */ |
932 | pid = current->pid; |
933 | - if (handle->h_sync && journal->j_last_sync_writer != pid) { |
934 | + if (handle->h_sync && journal->j_last_sync_writer != pid && |
935 | + journal->j_max_batch_time) { |
936 | u64 commit_time, trans_time; |
937 | |
938 | journal->j_last_sync_writer = pid; |
939 | diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h |
940 | index d69cf637a15a..49a4d6f59108 100644 |
941 | --- a/include/linux/ring_buffer.h |
942 | +++ b/include/linux/ring_buffer.h |
943 | @@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k |
944 | __ring_buffer_alloc((size), (flags), &__key); \ |
945 | }) |
946 | |
947 | -void ring_buffer_wait(struct ring_buffer *buffer, int cpu); |
948 | +int ring_buffer_wait(struct ring_buffer *buffer, int cpu); |
949 | int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, |
950 | struct file *filp, poll_table *poll_table); |
951 | |
952 | diff --git a/kernel/cpuset.c b/kernel/cpuset.c |
953 | index d9dd521ddd6b..067750bbdad8 100644 |
954 | --- a/kernel/cpuset.c |
955 | +++ b/kernel/cpuset.c |
956 | @@ -1153,7 +1153,13 @@ done: |
957 | |
958 | int current_cpuset_is_being_rebound(void) |
959 | { |
960 | - return task_cs(current) == cpuset_being_rebound; |
961 | + int ret; |
962 | + |
963 | + rcu_read_lock(); |
964 | + ret = task_cs(current) == cpuset_being_rebound; |
965 | + rcu_read_unlock(); |
966 | + |
967 | + return ret; |
968 | } |
969 | |
970 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
971 | diff --git a/kernel/rtmutex-debug.h b/kernel/rtmutex-debug.h |
972 | index 14193d596d78..ab29b6a22669 100644 |
973 | --- a/kernel/rtmutex-debug.h |
974 | +++ b/kernel/rtmutex-debug.h |
975 | @@ -31,3 +31,8 @@ static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, |
976 | { |
977 | return (waiter != NULL); |
978 | } |
979 | + |
980 | +static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) |
981 | +{ |
982 | + debug_rt_mutex_print_deadlock(w); |
983 | +} |
984 | diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c |
985 | index 1e09308bf2a1..d9ca207cec0c 100644 |
986 | --- a/kernel/rtmutex.c |
987 | +++ b/kernel/rtmutex.c |
988 | @@ -82,6 +82,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) |
989 | owner = *p; |
990 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); |
991 | } |
992 | + |
993 | +/* |
994 | + * Safe fastpath aware unlock: |
995 | + * 1) Clear the waiters bit |
996 | + * 2) Drop lock->wait_lock |
997 | + * 3) Try to unlock the lock with cmpxchg |
998 | + */ |
999 | +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) |
1000 | + __releases(lock->wait_lock) |
1001 | +{ |
1002 | + struct task_struct *owner = rt_mutex_owner(lock); |
1003 | + |
1004 | + clear_rt_mutex_waiters(lock); |
1005 | + raw_spin_unlock(&lock->wait_lock); |
1006 | + /* |
1007 | + * If a new waiter comes in between the unlock and the cmpxchg |
1008 | + * we have two situations: |
1009 | + * |
1010 | + * unlock(wait_lock); |
1011 | + * lock(wait_lock); |
1012 | + * cmpxchg(p, owner, 0) == owner |
1013 | + * mark_rt_mutex_waiters(lock); |
1014 | + * acquire(lock); |
1015 | + * or: |
1016 | + * |
1017 | + * unlock(wait_lock); |
1018 | + * lock(wait_lock); |
1019 | + * mark_rt_mutex_waiters(lock); |
1020 | + * |
1021 | + * cmpxchg(p, owner, 0) != owner |
1022 | + * enqueue_waiter(); |
1023 | + * unlock(wait_lock); |
1024 | + * lock(wait_lock); |
1025 | + * wake waiter(); |
1026 | + * unlock(wait_lock); |
1027 | + * lock(wait_lock); |
1028 | + * acquire(lock); |
1029 | + */ |
1030 | + return rt_mutex_cmpxchg(lock, owner, NULL); |
1031 | +} |
1032 | + |
1033 | #else |
1034 | # define rt_mutex_cmpxchg(l,c,n) (0) |
1035 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) |
1036 | @@ -89,6 +130,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) |
1037 | lock->owner = (struct task_struct *) |
1038 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); |
1039 | } |
1040 | + |
1041 | +/* |
1042 | + * Simple slow path only version: lock->owner is protected by lock->wait_lock. |
1043 | + */ |
1044 | +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) |
1045 | + __releases(lock->wait_lock) |
1046 | +{ |
1047 | + lock->owner = NULL; |
1048 | + raw_spin_unlock(&lock->wait_lock); |
1049 | + return true; |
1050 | +} |
1051 | #endif |
1052 | |
1053 | /* |
1054 | @@ -142,6 +194,11 @@ static void rt_mutex_adjust_prio(struct task_struct *task) |
1055 | */ |
1056 | int max_lock_depth = 1024; |
1057 | |
1058 | +static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) |
1059 | +{ |
1060 | + return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; |
1061 | +} |
1062 | + |
1063 | /* |
1064 | * Adjust the priority chain. Also used for deadlock detection. |
1065 | * Decreases task's usage by one - may thus free the task. |
1066 | @@ -150,6 +207,7 @@ int max_lock_depth = 1024; |
1067 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
1068 | int deadlock_detect, |
1069 | struct rt_mutex *orig_lock, |
1070 | + struct rt_mutex *next_lock, |
1071 | struct rt_mutex_waiter *orig_waiter, |
1072 | struct task_struct *top_task) |
1073 | { |
1074 | @@ -183,7 +241,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
1075 | } |
1076 | put_task_struct(task); |
1077 | |
1078 | - return deadlock_detect ? -EDEADLK : 0; |
1079 | + return -EDEADLK; |
1080 | } |
1081 | retry: |
1082 | /* |
1083 | @@ -208,13 +266,32 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
1084 | goto out_unlock_pi; |
1085 | |
1086 | /* |
1087 | + * We dropped all locks after taking a refcount on @task, so |
1088 | + * the task might have moved on in the lock chain or even left |
1089 | + * the chain completely and blocks now on an unrelated lock or |
1090 | + * on @orig_lock. |
1091 | + * |
1092 | + * We stored the lock on which @task was blocked in @next_lock, |
1093 | + * so we can detect the chain change. |
1094 | + */ |
1095 | + if (next_lock != waiter->lock) |
1096 | + goto out_unlock_pi; |
1097 | + |
1098 | + /* |
1099 | * Drop out, when the task has no waiters. Note, |
1100 | * top_waiter can be NULL, when we are in the deboosting |
1101 | * mode! |
1102 | */ |
1103 | - if (top_waiter && (!task_has_pi_waiters(task) || |
1104 | - top_waiter != task_top_pi_waiter(task))) |
1105 | - goto out_unlock_pi; |
1106 | + if (top_waiter) { |
1107 | + if (!task_has_pi_waiters(task)) |
1108 | + goto out_unlock_pi; |
1109 | + /* |
1110 | + * If deadlock detection is off, we stop here if we |
1111 | + * are not the top pi waiter of the task. |
1112 | + */ |
1113 | + if (!detect_deadlock && top_waiter != task_top_pi_waiter(task)) |
1114 | + goto out_unlock_pi; |
1115 | + } |
1116 | |
1117 | /* |
1118 | * When deadlock detection is off then we check, if further |
1119 | @@ -230,11 +307,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
1120 | goto retry; |
1121 | } |
1122 | |
1123 | - /* Deadlock detection */ |
1124 | + /* |
1125 | + * Deadlock detection. If the lock is the same as the original |
1126 | + * lock which caused us to walk the lock chain or if the |
1127 | + * current lock is owned by the task which initiated the chain |
1128 | + * walk, we detected a deadlock. |
1129 | + */ |
1130 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
1131 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
1132 | raw_spin_unlock(&lock->wait_lock); |
1133 | - ret = deadlock_detect ? -EDEADLK : 0; |
1134 | + ret = -EDEADLK; |
1135 | goto out_unlock_pi; |
1136 | } |
1137 | |
1138 | @@ -281,11 +363,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
1139 | __rt_mutex_adjust_prio(task); |
1140 | } |
1141 | |
1142 | + /* |
1143 | + * Check whether the task which owns the current lock is pi |
1144 | + * blocked itself. If yes we store a pointer to the lock for |
1145 | + * the lock chain change detection above. After we dropped |
1146 | + * task->pi_lock next_lock cannot be dereferenced anymore. |
1147 | + */ |
1148 | + next_lock = task_blocked_on_lock(task); |
1149 | + |
1150 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
1151 | |
1152 | top_waiter = rt_mutex_top_waiter(lock); |
1153 | raw_spin_unlock(&lock->wait_lock); |
1154 | |
1155 | + /* |
1156 | + * We reached the end of the lock chain. Stop right here. No |
1157 | + * point to go back just to figure that out. |
1158 | + */ |
1159 | + if (!next_lock) |
1160 | + goto out_put_task; |
1161 | + |
1162 | if (!detect_deadlock && waiter != top_waiter) |
1163 | goto out_put_task; |
1164 | |
1165 | @@ -396,8 +493,21 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, |
1166 | { |
1167 | struct task_struct *owner = rt_mutex_owner(lock); |
1168 | struct rt_mutex_waiter *top_waiter = waiter; |
1169 | - unsigned long flags; |
1170 | + struct rt_mutex *next_lock; |
1171 | int chain_walk = 0, res; |
1172 | + unsigned long flags; |
1173 | + |
1174 | + /* |
1175 | + * Early deadlock detection. We really don't want the task to |
1176 | + * enqueue on itself just to untangle the mess later. It's not |
1177 | + * only an optimization. We drop the locks, so another waiter |
1178 | + * can come in before the chain walk detects the deadlock. So |
1179 | + * the other will detect the deadlock and return -EDEADLOCK, |
1180 | + * which is wrong, as the other waiter is not in a deadlock |
1181 | + * situation. |
1182 | + */ |
1183 | + if (owner == task) |
1184 | + return -EDEADLK; |
1185 | |
1186 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
1187 | __rt_mutex_adjust_prio(task); |
1188 | @@ -418,20 +528,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, |
1189 | if (!owner) |
1190 | return 0; |
1191 | |
1192 | + raw_spin_lock_irqsave(&owner->pi_lock, flags); |
1193 | if (waiter == rt_mutex_top_waiter(lock)) { |
1194 | - raw_spin_lock_irqsave(&owner->pi_lock, flags); |
1195 | plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); |
1196 | plist_add(&waiter->pi_list_entry, &owner->pi_waiters); |
1197 | |
1198 | __rt_mutex_adjust_prio(owner); |
1199 | if (owner->pi_blocked_on) |
1200 | chain_walk = 1; |
1201 | - raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
1202 | - } |
1203 | - else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) |
1204 | + } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { |
1205 | chain_walk = 1; |
1206 | + } |
1207 | + |
1208 | + /* Store the lock on which owner is blocked or NULL */ |
1209 | + next_lock = task_blocked_on_lock(owner); |
1210 | |
1211 | - if (!chain_walk) |
1212 | + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
1213 | + /* |
1214 | + * Even if full deadlock detection is on, if the owner is not |
1215 | + * blocked itself, we can avoid finding this out in the chain |
1216 | + * walk. |
1217 | + */ |
1218 | + if (!chain_walk || !next_lock) |
1219 | return 0; |
1220 | |
1221 | /* |
1222 | @@ -443,8 +561,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, |
1223 | |
1224 | raw_spin_unlock(&lock->wait_lock); |
1225 | |
1226 | - res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, |
1227 | - task); |
1228 | + res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, |
1229 | + next_lock, waiter, task); |
1230 | |
1231 | raw_spin_lock(&lock->wait_lock); |
1232 | |
1233 | @@ -454,7 +572,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, |
1234 | /* |
1235 | * Wake up the next waiter on the lock. |
1236 | * |
1237 | - * Remove the top waiter from the current tasks waiter list and wake it up. |
1238 | + * Remove the top waiter from the current tasks pi waiter list and |
1239 | + * wake it up. |
1240 | * |
1241 | * Called with lock->wait_lock held. |
1242 | */ |
1243 | @@ -475,10 +594,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock) |
1244 | */ |
1245 | plist_del(&waiter->pi_list_entry, ¤t->pi_waiters); |
1246 | |
1247 | - rt_mutex_set_owner(lock, NULL); |
1248 | + /* |
1249 | + * As we are waking up the top waiter, and the waiter stays |
1250 | + * queued on the lock until it gets the lock, this lock |
1251 | + * obviously has waiters. Just set the bit here and this has |
1252 | + * the added benefit of forcing all new tasks into the |
1253 | + * slow path making sure no task of lower priority than |
1254 | + * the top waiter can steal this lock. |
1255 | + */ |
1256 | + lock->owner = (void *) RT_MUTEX_HAS_WAITERS; |
1257 | |
1258 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
1259 | |
1260 | + /* |
1261 | + * It's safe to dereference waiter as it cannot go away as |
1262 | + * long as we hold lock->wait_lock. The waiter task needs to |
1263 | + * acquire it in order to dequeue the waiter. |
1264 | + */ |
1265 | wake_up_process(waiter->task); |
1266 | } |
1267 | |
1268 | @@ -493,8 +625,8 @@ static void remove_waiter(struct rt_mutex *lock, |
1269 | { |
1270 | int first = (waiter == rt_mutex_top_waiter(lock)); |
1271 | struct task_struct *owner = rt_mutex_owner(lock); |
1272 | + struct rt_mutex *next_lock = NULL; |
1273 | unsigned long flags; |
1274 | - int chain_walk = 0; |
1275 | |
1276 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
1277 | plist_del(&waiter->list_entry, &lock->wait_list); |
1278 | @@ -518,15 +650,15 @@ static void remove_waiter(struct rt_mutex *lock, |
1279 | } |
1280 | __rt_mutex_adjust_prio(owner); |
1281 | |
1282 | - if (owner->pi_blocked_on) |
1283 | - chain_walk = 1; |
1284 | + /* Store the lock on which owner is blocked or NULL */ |
1285 | + next_lock = task_blocked_on_lock(owner); |
1286 | |
1287 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
1288 | } |
1289 | |
1290 | WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); |
1291 | |
1292 | - if (!chain_walk) |
1293 | + if (!next_lock) |
1294 | return; |
1295 | |
1296 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
1297 | @@ -534,7 +666,7 @@ static void remove_waiter(struct rt_mutex *lock, |
1298 | |
1299 | raw_spin_unlock(&lock->wait_lock); |
1300 | |
1301 | - rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); |
1302 | + rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current); |
1303 | |
1304 | raw_spin_lock(&lock->wait_lock); |
1305 | } |
1306 | @@ -547,6 +679,7 @@ static void remove_waiter(struct rt_mutex *lock, |
1307 | void rt_mutex_adjust_pi(struct task_struct *task) |
1308 | { |
1309 | struct rt_mutex_waiter *waiter; |
1310 | + struct rt_mutex *next_lock; |
1311 | unsigned long flags; |
1312 | |
1313 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
1314 | @@ -556,12 +689,13 @@ void rt_mutex_adjust_pi(struct task_struct *task) |
1315 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
1316 | return; |
1317 | } |
1318 | - |
1319 | + next_lock = waiter->lock; |
1320 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
1321 | |
1322 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
1323 | get_task_struct(task); |
1324 | - rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); |
1325 | + |
1326 | + rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task); |
1327 | } |
1328 | |
1329 | /** |
1330 | @@ -613,6 +747,26 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, |
1331 | return ret; |
1332 | } |
1333 | |
1334 | +static void rt_mutex_handle_deadlock(int res, int detect_deadlock, |
1335 | + struct rt_mutex_waiter *w) |
1336 | +{ |
1337 | + /* |
1338 | + * If the result is not -EDEADLOCK or the caller requested |
1339 | + * deadlock detection, nothing to do here. |
1340 | + */ |
1341 | + if (res != -EDEADLOCK || detect_deadlock) |
1342 | + return; |
1343 | + |
1344 | + /* |
1345 | + * Yell lowdly and stop the task right here. |
1346 | + */ |
1347 | + rt_mutex_print_deadlock(w); |
1348 | + while (1) { |
1349 | + set_current_state(TASK_INTERRUPTIBLE); |
1350 | + schedule(); |
1351 | + } |
1352 | +} |
1353 | + |
1354 | /* |
1355 | * Slow path lock function: |
1356 | */ |
1357 | @@ -650,8 +804,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, |
1358 | |
1359 | set_current_state(TASK_RUNNING); |
1360 | |
1361 | - if (unlikely(ret)) |
1362 | + if (unlikely(ret)) { |
1363 | remove_waiter(lock, &waiter); |
1364 | + rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter); |
1365 | + } |
1366 | |
1367 | /* |
1368 | * try_to_take_rt_mutex() sets the waiter bit |
1369 | @@ -707,12 +863,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock) |
1370 | |
1371 | rt_mutex_deadlock_account_unlock(current); |
1372 | |
1373 | - if (!rt_mutex_has_waiters(lock)) { |
1374 | - lock->owner = NULL; |
1375 | - raw_spin_unlock(&lock->wait_lock); |
1376 | - return; |
1377 | + /* |
1378 | + * We must be careful here if the fast path is enabled. If we |
1379 | + * have no waiters queued we cannot set owner to NULL here |
1380 | + * because of: |
1381 | + * |
1382 | + * foo->lock->owner = NULL; |
1383 | + * rtmutex_lock(foo->lock); <- fast path |
1384 | + * free = atomic_dec_and_test(foo->refcnt); |
1385 | + * rtmutex_unlock(foo->lock); <- fast path |
1386 | + * if (free) |
1387 | + * kfree(foo); |
1388 | + * raw_spin_unlock(foo->lock->wait_lock); |
1389 | + * |
1390 | + * So for the fastpath enabled kernel: |
1391 | + * |
1392 | + * Nothing can set the waiters bit as long as we hold |
1393 | + * lock->wait_lock. So we do the following sequence: |
1394 | + * |
1395 | + * owner = rt_mutex_owner(lock); |
1396 | + * clear_rt_mutex_waiters(lock); |
1397 | + * raw_spin_unlock(&lock->wait_lock); |
1398 | + * if (cmpxchg(&lock->owner, owner, 0) == owner) |
1399 | + * return; |
1400 | + * goto retry; |
1401 | + * |
1402 | + * The fastpath disabled variant is simple as all access to |
1403 | + * lock->owner is serialized by lock->wait_lock: |
1404 | + * |
1405 | + * lock->owner = NULL; |
1406 | + * raw_spin_unlock(&lock->wait_lock); |
1407 | + */ |
1408 | + while (!rt_mutex_has_waiters(lock)) { |
1409 | + /* Drops lock->wait_lock ! */ |
1410 | + if (unlock_rt_mutex_safe(lock) == true) |
1411 | + return; |
1412 | + /* Relock the rtmutex and try again */ |
1413 | + raw_spin_lock(&lock->wait_lock); |
1414 | } |
1415 | |
1416 | + /* |
1417 | + * The wakeup next waiter path does not suffer from the above |
1418 | + * race. See the comments there. |
1419 | + */ |
1420 | wakeup_next_waiter(lock); |
1421 | |
1422 | raw_spin_unlock(&lock->wait_lock); |
1423 | @@ -959,7 +1152,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, |
1424 | return 1; |
1425 | } |
1426 | |
1427 | - ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); |
1428 | + /* We enforce deadlock detection for futexes */ |
1429 | + ret = task_blocks_on_rt_mutex(lock, waiter, task, 1); |
1430 | |
1431 | if (ret && !rt_mutex_owner(lock)) { |
1432 | /* |
1433 | diff --git a/kernel/rtmutex.h b/kernel/rtmutex.h |
1434 | index a1a1dd06421d..f6a1f3c133b1 100644 |
1435 | --- a/kernel/rtmutex.h |
1436 | +++ b/kernel/rtmutex.h |
1437 | @@ -24,3 +24,8 @@ |
1438 | #define debug_rt_mutex_print_deadlock(w) do { } while (0) |
1439 | #define debug_rt_mutex_detect_deadlock(w,d) (d) |
1440 | #define debug_rt_mutex_reset_waiter(w) do { } while (0) |
1441 | + |
1442 | +static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) |
1443 | +{ |
1444 | + WARN(1, "rtmutex deadlock detected\n"); |
1445 | +} |
1446 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c |
1447 | index fd12cc56371f..8e94c1102636 100644 |
1448 | --- a/kernel/trace/ring_buffer.c |
1449 | +++ b/kernel/trace/ring_buffer.c |
1450 | @@ -543,7 +543,7 @@ static void rb_wake_up_waiters(struct irq_work *work) |
1451 | * as data is added to any of the @buffer's cpu buffers. Otherwise |
1452 | * it will wait for data to be added to a specific cpu buffer. |
1453 | */ |
1454 | -void ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
1455 | +int ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
1456 | { |
1457 | struct ring_buffer_per_cpu *cpu_buffer; |
1458 | DEFINE_WAIT(wait); |
1459 | @@ -557,6 +557,8 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
1460 | if (cpu == RING_BUFFER_ALL_CPUS) |
1461 | work = &buffer->irq_work; |
1462 | else { |
1463 | + if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1464 | + return -ENODEV; |
1465 | cpu_buffer = buffer->buffers[cpu]; |
1466 | work = &cpu_buffer->irq_work; |
1467 | } |
1468 | @@ -591,6 +593,7 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
1469 | schedule(); |
1470 | |
1471 | finish_wait(&work->waiters, &wait); |
1472 | + return 0; |
1473 | } |
1474 | |
1475 | /** |
1476 | diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
1477 | index 21920add7972..8fe92ce43f39 100644 |
1478 | --- a/kernel/trace/trace.c |
1479 | +++ b/kernel/trace/trace.c |
1480 | @@ -1027,13 +1027,13 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) |
1481 | } |
1482 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
1483 | |
1484 | -static void default_wait_pipe(struct trace_iterator *iter) |
1485 | +static int default_wait_pipe(struct trace_iterator *iter) |
1486 | { |
1487 | /* Iterators are static, they should be filled or empty */ |
1488 | if (trace_buffer_iter(iter, iter->cpu_file)) |
1489 | - return; |
1490 | + return 0; |
1491 | |
1492 | - ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); |
1493 | + return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); |
1494 | } |
1495 | |
1496 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
1497 | @@ -4054,17 +4054,19 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) |
1498 | * |
1499 | * Anyway, this is really very primitive wakeup. |
1500 | */ |
1501 | -void poll_wait_pipe(struct trace_iterator *iter) |
1502 | +int poll_wait_pipe(struct trace_iterator *iter) |
1503 | { |
1504 | set_current_state(TASK_INTERRUPTIBLE); |
1505 | /* sleep for 100 msecs, and try again. */ |
1506 | schedule_timeout(HZ / 10); |
1507 | + return 0; |
1508 | } |
1509 | |
1510 | /* Must be called with trace_types_lock mutex held. */ |
1511 | static int tracing_wait_pipe(struct file *filp) |
1512 | { |
1513 | struct trace_iterator *iter = filp->private_data; |
1514 | + int ret; |
1515 | |
1516 | while (trace_empty(iter)) { |
1517 | |
1518 | @@ -4074,10 +4076,13 @@ static int tracing_wait_pipe(struct file *filp) |
1519 | |
1520 | mutex_unlock(&iter->mutex); |
1521 | |
1522 | - iter->trace->wait_pipe(iter); |
1523 | + ret = iter->trace->wait_pipe(iter); |
1524 | |
1525 | mutex_lock(&iter->mutex); |
1526 | |
1527 | + if (ret) |
1528 | + return ret; |
1529 | + |
1530 | if (signal_pending(current)) |
1531 | return -EINTR; |
1532 | |
1533 | @@ -5011,8 +5016,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, |
1534 | goto out_unlock; |
1535 | } |
1536 | mutex_unlock(&trace_types_lock); |
1537 | - iter->trace->wait_pipe(iter); |
1538 | + ret = iter->trace->wait_pipe(iter); |
1539 | mutex_lock(&trace_types_lock); |
1540 | + if (ret) { |
1541 | + size = ret; |
1542 | + goto out_unlock; |
1543 | + } |
1544 | if (signal_pending(current)) { |
1545 | size = -EINTR; |
1546 | goto out_unlock; |
1547 | @@ -5224,8 +5233,10 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, |
1548 | goto out; |
1549 | } |
1550 | mutex_unlock(&trace_types_lock); |
1551 | - iter->trace->wait_pipe(iter); |
1552 | + ret = iter->trace->wait_pipe(iter); |
1553 | mutex_lock(&trace_types_lock); |
1554 | + if (ret) |
1555 | + goto out; |
1556 | if (signal_pending(current)) { |
1557 | ret = -EINTR; |
1558 | goto out; |
1559 | diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h |
1560 | index 51b44483eb78..aa0e736b72ac 100644 |
1561 | --- a/kernel/trace/trace.h |
1562 | +++ b/kernel/trace/trace.h |
1563 | @@ -342,7 +342,7 @@ struct tracer { |
1564 | void (*stop)(struct trace_array *tr); |
1565 | void (*open)(struct trace_iterator *iter); |
1566 | void (*pipe_open)(struct trace_iterator *iter); |
1567 | - void (*wait_pipe)(struct trace_iterator *iter); |
1568 | + int (*wait_pipe)(struct trace_iterator *iter); |
1569 | void (*close)(struct trace_iterator *iter); |
1570 | void (*pipe_close)(struct trace_iterator *iter); |
1571 | ssize_t (*read)(struct trace_iterator *iter, |
1572 | @@ -557,7 +557,7 @@ void trace_init_global_iter(struct trace_iterator *iter); |
1573 | |
1574 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
1575 | |
1576 | -void poll_wait_pipe(struct trace_iterator *iter); |
1577 | +int poll_wait_pipe(struct trace_iterator *iter); |
1578 | |
1579 | void ftrace(struct trace_array *tr, |
1580 | struct trace_array_cpu *data, |
1581 | diff --git a/kernel/workqueue.c b/kernel/workqueue.c |
1582 | index 652f36dd40de..c2f9d6ca7e5e 100644 |
1583 | --- a/kernel/workqueue.c |
1584 | +++ b/kernel/workqueue.c |
1585 | @@ -3373,6 +3373,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) |
1586 | } |
1587 | } |
1588 | |
1589 | + dev_set_uevent_suppress(&wq_dev->dev, false); |
1590 | kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); |
1591 | return 0; |
1592 | } |
1593 | @@ -4967,7 +4968,7 @@ static void __init wq_numa_init(void) |
1594 | BUG_ON(!tbl); |
1595 | |
1596 | for_each_node(node) |
1597 | - BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, |
1598 | + BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL, |
1599 | node_online(node) ? node : NUMA_NO_NODE)); |
1600 | |
1601 | for_each_possible_cpu(cpu) { |
1602 | diff --git a/mm/mempolicy.c b/mm/mempolicy.c |
1603 | index 1124d5fc06e9..b2061bb5af73 100644 |
1604 | --- a/mm/mempolicy.c |
1605 | +++ b/mm/mempolicy.c |
1606 | @@ -2086,7 +2086,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) |
1607 | } else |
1608 | *new = *old; |
1609 | |
1610 | - rcu_read_lock(); |
1611 | if (current_cpuset_is_being_rebound()) { |
1612 | nodemask_t mems = cpuset_mems_allowed(current); |
1613 | if (new->flags & MPOL_F_REBINDING) |
1614 | @@ -2094,7 +2093,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) |
1615 | else |
1616 | mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); |
1617 | } |
1618 | - rcu_read_unlock(); |
1619 | atomic_set(&new->refcnt, 1); |
1620 | return new; |
1621 | } |