Contents of /trunk/kernel-magellan/patches-3.15/0105-3.15.6-all-fixes.patch
Parent Directory | Revision Log
Revision 2486 -
(show annotations)
(download)
Mon Jul 21 12:24:54 2014 UTC (10 years, 2 months ago) by niro
File size: 99860 byte(s)
Mon Jul 21 12:24:54 2014 UTC (10 years, 2 months ago) by niro
File size: 99860 byte(s)
-linux-3.15.6
1 | diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt |
2 | index e742d21dbd96..a69ffe1d54d5 100644 |
3 | --- a/Documentation/cpu-freq/intel-pstate.txt |
4 | +++ b/Documentation/cpu-freq/intel-pstate.txt |
5 | @@ -15,10 +15,13 @@ New sysfs files for controlling P state selection have been added to |
6 | /sys/devices/system/cpu/intel_pstate/ |
7 | |
8 | max_perf_pct: limits the maximum P state that will be requested by |
9 | - the driver stated as a percentage of the available performance. |
10 | + the driver stated as a percentage of the available performance. The |
11 | + available (P states) performance may be reduced by the no_turbo |
12 | + setting described below. |
13 | |
14 | min_perf_pct: limits the minimum P state that will be requested by |
15 | - the driver stated as a percentage of the available performance. |
16 | + the driver stated as a percentage of the max (non-turbo) |
17 | + performance level. |
18 | |
19 | no_turbo: limits the driver to selecting P states below the turbo |
20 | frequency range. |
21 | diff --git a/Makefile b/Makefile |
22 | index e6b01ed8fd9f..fefa0237c2d1 100644 |
23 | --- a/Makefile |
24 | +++ b/Makefile |
25 | @@ -1,6 +1,6 @@ |
26 | VERSION = 3 |
27 | PATCHLEVEL = 15 |
28 | -SUBLEVEL = 5 |
29 | +SUBLEVEL = 6 |
30 | EXTRAVERSION = |
31 | NAME = Shuffling Zombie Juror |
32 | |
33 | diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h |
34 | index 993bce527b85..902eb708804a 100644 |
35 | --- a/arch/arm64/include/asm/memory.h |
36 | +++ b/arch/arm64/include/asm/memory.h |
37 | @@ -56,6 +56,8 @@ |
38 | #define TASK_SIZE_32 UL(0x100000000) |
39 | #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ |
40 | TASK_SIZE_32 : TASK_SIZE_64) |
41 | +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ |
42 | + TASK_SIZE_32 : TASK_SIZE_64) |
43 | #else |
44 | #define TASK_SIZE TASK_SIZE_64 |
45 | #endif /* CONFIG_COMPAT */ |
46 | diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c |
47 | index 608716f8496b..af3bc359dc70 100644 |
48 | --- a/arch/parisc/kernel/hardware.c |
49 | +++ b/arch/parisc/kernel/hardware.c |
50 | @@ -1210,7 +1210,8 @@ static struct hp_hardware hp_hardware_list[] = { |
51 | {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, |
52 | {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, |
53 | {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, |
54 | - {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"}, |
55 | + {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"}, |
56 | + {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"}, |
57 | {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, |
58 | {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, |
59 | {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, |
60 | diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c |
61 | index bb9f3b64de55..ec741fe02ab6 100644 |
62 | --- a/arch/parisc/kernel/sys_parisc32.c |
63 | +++ b/arch/parisc/kernel/sys_parisc32.c |
64 | @@ -4,6 +4,7 @@ |
65 | * Copyright (C) 2000-2001 Hewlett Packard Company |
66 | * Copyright (C) 2000 John Marvin |
67 | * Copyright (C) 2001 Matthew Wilcox |
68 | + * Copyright (C) 2014 Helge Deller <deller@gmx.de> |
69 | * |
70 | * These routines maintain argument size conversion between 32bit and 64bit |
71 | * environment. Based heavily on sys_ia32.c and sys_sparc32.c. |
72 | @@ -57,3 +58,12 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, |
73 | current->comm, current->pid, r20); |
74 | return -ENOSYS; |
75 | } |
76 | + |
77 | +asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags, |
78 | + compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd, |
79 | + const char __user * pathname) |
80 | +{ |
81 | + return sys_fanotify_mark(fanotify_fd, flags, |
82 | + ((__u64)mask1 << 32) | mask0, |
83 | + dfd, pathname); |
84 | +} |
85 | diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S |
86 | index c5fa7a697fba..84c5d3a58fa1 100644 |
87 | --- a/arch/parisc/kernel/syscall_table.S |
88 | +++ b/arch/parisc/kernel/syscall_table.S |
89 | @@ -418,7 +418,7 @@ |
90 | ENTRY_SAME(accept4) /* 320 */ |
91 | ENTRY_SAME(prlimit64) |
92 | ENTRY_SAME(fanotify_init) |
93 | - ENTRY_COMP(fanotify_mark) |
94 | + ENTRY_DIFF(fanotify_mark) |
95 | ENTRY_COMP(clock_adjtime) |
96 | ENTRY_SAME(name_to_handle_at) /* 325 */ |
97 | ENTRY_COMP(open_by_handle_at) |
98 | diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig |
99 | index e0998997943b..c95c4b8c3e74 100644 |
100 | --- a/arch/powerpc/Kconfig |
101 | +++ b/arch/powerpc/Kconfig |
102 | @@ -414,7 +414,7 @@ config KEXEC |
103 | config CRASH_DUMP |
104 | bool "Build a kdump crash kernel" |
105 | depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) |
106 | - select RELOCATABLE if PPC64 || 44x || FSL_BOOKE |
107 | + select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x || FSL_BOOKE |
108 | help |
109 | Build a kernel suitable for use as a kdump capture kernel. |
110 | The same kernel binary can be used as production kernel and dump |
111 | @@ -1009,6 +1009,7 @@ endmenu |
112 | if PPC64 |
113 | config RELOCATABLE |
114 | bool "Build a relocatable kernel" |
115 | + depends on !COMPILE_TEST |
116 | select NONSTATIC_KERNEL |
117 | help |
118 | This builds a kernel image that is capable of running anywhere |
119 | diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h |
120 | index 9ed737146dbb..b3e936027b26 100644 |
121 | --- a/arch/powerpc/include/asm/perf_event_server.h |
122 | +++ b/arch/powerpc/include/asm/perf_event_server.h |
123 | @@ -61,8 +61,7 @@ struct power_pmu { |
124 | #define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ |
125 | #define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ |
126 | #define PPMU_HAS_SIER 0x00000040 /* Has SIER */ |
127 | -#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */ |
128 | -#define PPMU_EBB 0x00000100 /* supports event based branch */ |
129 | +#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ |
130 | |
131 | /* |
132 | * Values for flags to get_alternatives() |
133 | diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S |
134 | index e18e3cfc32de..92be3fad1075 100644 |
135 | --- a/arch/powerpc/kvm/book3s_hv_interrupts.S |
136 | +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S |
137 | @@ -127,11 +127,6 @@ BEGIN_FTR_SECTION |
138 | stw r10, HSTATE_PMC + 24(r13) |
139 | stw r11, HSTATE_PMC + 28(r13) |
140 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
141 | -BEGIN_FTR_SECTION |
142 | - mfspr r9, SPRN_SIER |
143 | - std r8, HSTATE_MMCR + 40(r13) |
144 | - std r9, HSTATE_MMCR + 48(r13) |
145 | -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
146 | 31: |
147 | |
148 | /* |
149 | diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c |
150 | index 4520c9356b54..6b0641c3f03f 100644 |
151 | --- a/arch/powerpc/perf/core-book3s.c |
152 | +++ b/arch/powerpc/perf/core-book3s.c |
153 | @@ -485,7 +485,7 @@ static bool is_ebb_event(struct perf_event *event) |
154 | * check that the PMU supports EBB, meaning those that don't can still |
155 | * use bit 63 of the event code for something else if they wish. |
156 | */ |
157 | - return (ppmu->flags & PPMU_EBB) && |
158 | + return (ppmu->flags & PPMU_ARCH_207S) && |
159 | ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); |
160 | } |
161 | |
162 | @@ -777,7 +777,7 @@ void perf_event_print_debug(void) |
163 | if (ppmu->flags & PPMU_HAS_SIER) |
164 | sier = mfspr(SPRN_SIER); |
165 | |
166 | - if (ppmu->flags & PPMU_EBB) { |
167 | + if (ppmu->flags & PPMU_ARCH_207S) { |
168 | pr_info("MMCR2: %016lx EBBHR: %016lx\n", |
169 | mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR)); |
170 | pr_info("EBBRR: %016lx BESCR: %016lx\n", |
171 | @@ -996,7 +996,22 @@ static void power_pmu_read(struct perf_event *event) |
172 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); |
173 | |
174 | local64_add(delta, &event->count); |
175 | - local64_sub(delta, &event->hw.period_left); |
176 | + |
177 | + /* |
178 | + * A number of places program the PMC with (0x80000000 - period_left). |
179 | + * We never want period_left to be less than 1 because we will program |
180 | + * the PMC with a value >= 0x800000000 and an edge detected PMC will |
181 | + * roll around to 0 before taking an exception. We have seen this |
182 | + * on POWER8. |
183 | + * |
184 | + * To fix this, clamp the minimum value of period_left to 1. |
185 | + */ |
186 | + do { |
187 | + prev = local64_read(&event->hw.period_left); |
188 | + val = prev - delta; |
189 | + if (val < 1) |
190 | + val = 1; |
191 | + } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); |
192 | } |
193 | |
194 | /* |
195 | @@ -1300,6 +1315,9 @@ static void power_pmu_enable(struct pmu *pmu) |
196 | |
197 | write_mmcr0(cpuhw, mmcr0); |
198 | |
199 | + if (ppmu->flags & PPMU_ARCH_207S) |
200 | + mtspr(SPRN_MMCR2, 0); |
201 | + |
202 | /* |
203 | * Enable instruction sampling if necessary |
204 | */ |
205 | @@ -1696,7 +1714,7 @@ static int power_pmu_event_init(struct perf_event *event) |
206 | |
207 | if (has_branch_stack(event)) { |
208 | /* PMU has BHRB enabled */ |
209 | - if (!(ppmu->flags & PPMU_BHRB)) |
210 | + if (!(ppmu->flags & PPMU_ARCH_207S)) |
211 | return -EOPNOTSUPP; |
212 | } |
213 | |
214 | diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c |
215 | index fe2763b6e039..639cd9156585 100644 |
216 | --- a/arch/powerpc/perf/power8-pmu.c |
217 | +++ b/arch/powerpc/perf/power8-pmu.c |
218 | @@ -792,7 +792,7 @@ static struct power_pmu power8_pmu = { |
219 | .get_constraint = power8_get_constraint, |
220 | .get_alternatives = power8_get_alternatives, |
221 | .disable_pmc = power8_disable_pmc, |
222 | - .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, |
223 | + .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S, |
224 | .n_generic = ARRAY_SIZE(power8_generic_events), |
225 | .generic_events = power8_generic_events, |
226 | .cache_events = &power8_cache_events, |
227 | diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c |
228 | index f30cd10293f0..8626b03e83b7 100644 |
229 | --- a/arch/x86/crypto/sha512_ssse3_glue.c |
230 | +++ b/arch/x86/crypto/sha512_ssse3_glue.c |
231 | @@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out) |
232 | |
233 | /* save number of bits */ |
234 | bits[1] = cpu_to_be64(sctx->count[0] << 3); |
235 | - bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61; |
236 | + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); |
237 | |
238 | /* Pad out to 112 mod 128 and append length */ |
239 | index = sctx->count[0] & 0x7f; |
240 | diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c |
241 | index 597ac155c91c..bc7527e109c8 100644 |
242 | --- a/arch/x86/mm/ioremap.c |
243 | +++ b/arch/x86/mm/ioremap.c |
244 | @@ -50,6 +50,21 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
245 | return err; |
246 | } |
247 | |
248 | +static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, |
249 | + void *arg) |
250 | +{ |
251 | + unsigned long i; |
252 | + |
253 | + for (i = 0; i < nr_pages; ++i) |
254 | + if (pfn_valid(start_pfn + i) && |
255 | + !PageReserved(pfn_to_page(start_pfn + i))) |
256 | + return 1; |
257 | + |
258 | + WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); |
259 | + |
260 | + return 0; |
261 | +} |
262 | + |
263 | /* |
264 | * Remap an arbitrary physical address space into the kernel virtual |
265 | * address space. Needed when the kernel wants to access high addresses |
266 | @@ -93,14 +108,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, |
267 | /* |
268 | * Don't allow anybody to remap normal RAM that we're using.. |
269 | */ |
270 | + pfn = phys_addr >> PAGE_SHIFT; |
271 | last_pfn = last_addr >> PAGE_SHIFT; |
272 | - for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) { |
273 | - int is_ram = page_is_ram(pfn); |
274 | - |
275 | - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) |
276 | - return NULL; |
277 | - WARN_ON_ONCE(is_ram); |
278 | - } |
279 | + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, |
280 | + __ioremap_check_ram) == 1) |
281 | + return NULL; |
282 | |
283 | /* |
284 | * Mappings have to be page-aligned |
285 | diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c |
286 | index c67f6f5ad611..36b0e61f9c09 100644 |
287 | --- a/drivers/acpi/ac.c |
288 | +++ b/drivers/acpi/ac.c |
289 | @@ -30,6 +30,10 @@ |
290 | #include <linux/types.h> |
291 | #include <linux/dmi.h> |
292 | #include <linux/delay.h> |
293 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
294 | +#include <linux/proc_fs.h> |
295 | +#include <linux/seq_file.h> |
296 | +#endif |
297 | #include <linux/platform_device.h> |
298 | #include <linux/power_supply.h> |
299 | #include <linux/acpi.h> |
300 | @@ -52,6 +56,7 @@ MODULE_AUTHOR("Paul Diefenbaugh"); |
301 | MODULE_DESCRIPTION("ACPI AC Adapter Driver"); |
302 | MODULE_LICENSE("GPL"); |
303 | |
304 | + |
305 | static int acpi_ac_add(struct acpi_device *device); |
306 | static int acpi_ac_remove(struct acpi_device *device); |
307 | static void acpi_ac_notify(struct acpi_device *device, u32 event); |
308 | @@ -67,6 +72,13 @@ static int acpi_ac_resume(struct device *dev); |
309 | #endif |
310 | static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); |
311 | |
312 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
313 | +extern struct proc_dir_entry *acpi_lock_ac_dir(void); |
314 | +extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); |
315 | +static int acpi_ac_open_fs(struct inode *inode, struct file *file); |
316 | +#endif |
317 | + |
318 | + |
319 | static int ac_sleep_before_get_state_ms; |
320 | |
321 | static struct acpi_driver acpi_ac_driver = { |
322 | @@ -91,6 +103,16 @@ struct acpi_ac { |
323 | |
324 | #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger) |
325 | |
326 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
327 | +static const struct file_operations acpi_ac_fops = { |
328 | + .owner = THIS_MODULE, |
329 | + .open = acpi_ac_open_fs, |
330 | + .read = seq_read, |
331 | + .llseek = seq_lseek, |
332 | + .release = single_release, |
333 | +}; |
334 | +#endif |
335 | + |
336 | /* -------------------------------------------------------------------------- |
337 | AC Adapter Management |
338 | -------------------------------------------------------------------------- */ |
339 | @@ -143,6 +165,83 @@ static enum power_supply_property ac_props[] = { |
340 | POWER_SUPPLY_PROP_ONLINE, |
341 | }; |
342 | |
343 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
344 | +/* -------------------------------------------------------------------------- |
345 | + FS Interface (/proc) |
346 | + -------------------------------------------------------------------------- */ |
347 | + |
348 | +static struct proc_dir_entry *acpi_ac_dir; |
349 | + |
350 | +static int acpi_ac_seq_show(struct seq_file *seq, void *offset) |
351 | +{ |
352 | + struct acpi_ac *ac = seq->private; |
353 | + |
354 | + |
355 | + if (!ac) |
356 | + return 0; |
357 | + |
358 | + if (acpi_ac_get_state(ac)) { |
359 | + seq_puts(seq, "ERROR: Unable to read AC Adapter state\n"); |
360 | + return 0; |
361 | + } |
362 | + |
363 | + seq_puts(seq, "state: "); |
364 | + switch (ac->state) { |
365 | + case ACPI_AC_STATUS_OFFLINE: |
366 | + seq_puts(seq, "off-line\n"); |
367 | + break; |
368 | + case ACPI_AC_STATUS_ONLINE: |
369 | + seq_puts(seq, "on-line\n"); |
370 | + break; |
371 | + default: |
372 | + seq_puts(seq, "unknown\n"); |
373 | + break; |
374 | + } |
375 | + |
376 | + return 0; |
377 | +} |
378 | + |
379 | +static int acpi_ac_open_fs(struct inode *inode, struct file *file) |
380 | +{ |
381 | + return single_open(file, acpi_ac_seq_show, PDE_DATA(inode)); |
382 | +} |
383 | + |
384 | +static int acpi_ac_add_fs(struct acpi_ac *ac) |
385 | +{ |
386 | + struct proc_dir_entry *entry = NULL; |
387 | + |
388 | + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded," |
389 | + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); |
390 | + if (!acpi_device_dir(ac->device)) { |
391 | + acpi_device_dir(ac->device) = |
392 | + proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir); |
393 | + if (!acpi_device_dir(ac->device)) |
394 | + return -ENODEV; |
395 | + } |
396 | + |
397 | + /* 'state' [R] */ |
398 | + entry = proc_create_data(ACPI_AC_FILE_STATE, |
399 | + S_IRUGO, acpi_device_dir(ac->device), |
400 | + &acpi_ac_fops, ac); |
401 | + if (!entry) |
402 | + return -ENODEV; |
403 | + return 0; |
404 | +} |
405 | + |
406 | +static int acpi_ac_remove_fs(struct acpi_ac *ac) |
407 | +{ |
408 | + |
409 | + if (acpi_device_dir(ac->device)) { |
410 | + remove_proc_entry(ACPI_AC_FILE_STATE, |
411 | + acpi_device_dir(ac->device)); |
412 | + remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir); |
413 | + acpi_device_dir(ac->device) = NULL; |
414 | + } |
415 | + |
416 | + return 0; |
417 | +} |
418 | +#endif |
419 | + |
420 | /* -------------------------------------------------------------------------- |
421 | Driver Model |
422 | -------------------------------------------------------------------------- */ |
423 | @@ -243,6 +342,11 @@ static int acpi_ac_add(struct acpi_device *device) |
424 | goto end; |
425 | |
426 | ac->charger.name = acpi_device_bid(device); |
427 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
428 | + result = acpi_ac_add_fs(ac); |
429 | + if (result) |
430 | + goto end; |
431 | +#endif |
432 | ac->charger.type = POWER_SUPPLY_TYPE_MAINS; |
433 | ac->charger.properties = ac_props; |
434 | ac->charger.num_properties = ARRAY_SIZE(ac_props); |
435 | @@ -258,8 +362,12 @@ static int acpi_ac_add(struct acpi_device *device) |
436 | ac->battery_nb.notifier_call = acpi_ac_battery_notify; |
437 | register_acpi_notifier(&ac->battery_nb); |
438 | end: |
439 | - if (result) |
440 | + if (result) { |
441 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
442 | + acpi_ac_remove_fs(ac); |
443 | +#endif |
444 | kfree(ac); |
445 | + } |
446 | |
447 | dmi_check_system(ac_dmi_table); |
448 | return result; |
449 | @@ -303,6 +411,10 @@ static int acpi_ac_remove(struct acpi_device *device) |
450 | power_supply_unregister(&ac->charger); |
451 | unregister_acpi_notifier(&ac->battery_nb); |
452 | |
453 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
454 | + acpi_ac_remove_fs(ac); |
455 | +#endif |
456 | + |
457 | kfree(ac); |
458 | |
459 | return 0; |
460 | @@ -315,9 +427,20 @@ static int __init acpi_ac_init(void) |
461 | if (acpi_disabled) |
462 | return -ENODEV; |
463 | |
464 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
465 | + acpi_ac_dir = acpi_lock_ac_dir(); |
466 | + if (!acpi_ac_dir) |
467 | + return -ENODEV; |
468 | +#endif |
469 | + |
470 | + |
471 | result = acpi_bus_register_driver(&acpi_ac_driver); |
472 | - if (result < 0) |
473 | + if (result < 0) { |
474 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
475 | + acpi_unlock_ac_dir(acpi_ac_dir); |
476 | +#endif |
477 | return -ENODEV; |
478 | + } |
479 | |
480 | return 0; |
481 | } |
482 | @@ -325,6 +448,9 @@ static int __init acpi_ac_init(void) |
483 | static void __exit acpi_ac_exit(void) |
484 | { |
485 | acpi_bus_unregister_driver(&acpi_ac_driver); |
486 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
487 | + acpi_unlock_ac_dir(acpi_ac_dir); |
488 | +#endif |
489 | } |
490 | module_init(acpi_ac_init); |
491 | module_exit(acpi_ac_exit); |
492 | diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c |
493 | index 6e7b2a12860d..884c5674fda8 100644 |
494 | --- a/drivers/acpi/battery.c |
495 | +++ b/drivers/acpi/battery.c |
496 | @@ -34,6 +34,7 @@ |
497 | #include <linux/dmi.h> |
498 | #include <linux/slab.h> |
499 | #include <linux/suspend.h> |
500 | +#include <linux/delay.h> |
501 | #include <asm/unaligned.h> |
502 | |
503 | #ifdef CONFIG_ACPI_PROCFS_POWER |
504 | @@ -1069,6 +1070,28 @@ static struct dmi_system_id bat_dmi_table[] = { |
505 | {}, |
506 | }; |
507 | |
508 | +/* |
509 | + * Some machines'(E,G Lenovo Z480) ECs are not stable |
510 | + * during boot up and this causes battery driver fails to be |
511 | + * probed due to failure of getting battery information |
512 | + * from EC sometimes. After several retries, the operation |
513 | + * may work. So add retry code here and 20ms sleep between |
514 | + * every retries. |
515 | + */ |
516 | +static int acpi_battery_update_retry(struct acpi_battery *battery) |
517 | +{ |
518 | + int retry, ret; |
519 | + |
520 | + for (retry = 5; retry; retry--) { |
521 | + ret = acpi_battery_update(battery); |
522 | + if (!ret) |
523 | + break; |
524 | + |
525 | + msleep(20); |
526 | + } |
527 | + return ret; |
528 | +} |
529 | + |
530 | static int acpi_battery_add(struct acpi_device *device) |
531 | { |
532 | int result = 0; |
533 | @@ -1087,9 +1110,11 @@ static int acpi_battery_add(struct acpi_device *device) |
534 | mutex_init(&battery->sysfs_lock); |
535 | if (acpi_has_method(battery->device->handle, "_BIX")) |
536 | set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); |
537 | - result = acpi_battery_update(battery); |
538 | + |
539 | + result = acpi_battery_update_retry(battery); |
540 | if (result) |
541 | goto fail; |
542 | + |
543 | #ifdef CONFIG_ACPI_PROCFS_POWER |
544 | result = acpi_battery_add_fs(device); |
545 | #endif |
546 | diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c |
547 | index ad11ba4a412d..49d89909b4ed 100644 |
548 | --- a/drivers/acpi/ec.c |
549 | +++ b/drivers/acpi/ec.c |
550 | @@ -78,6 +78,9 @@ enum { |
551 | EC_FLAGS_BLOCKED, /* Transactions are blocked */ |
552 | }; |
553 | |
554 | +#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ |
555 | +#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */ |
556 | + |
557 | /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ |
558 | static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; |
559 | module_param(ec_delay, uint, 0644); |
560 | @@ -109,7 +112,7 @@ struct transaction { |
561 | u8 ri; |
562 | u8 wlen; |
563 | u8 rlen; |
564 | - bool done; |
565 | + u8 flags; |
566 | }; |
567 | |
568 | struct acpi_ec *boot_ec, *first_ec; |
569 | @@ -150,60 +153,74 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) |
570 | outb(data, ec->data_addr); |
571 | } |
572 | |
573 | -static int ec_transaction_done(struct acpi_ec *ec) |
574 | +static int ec_transaction_completed(struct acpi_ec *ec) |
575 | { |
576 | unsigned long flags; |
577 | int ret = 0; |
578 | spin_lock_irqsave(&ec->lock, flags); |
579 | - if (!ec->curr || ec->curr->done) |
580 | + if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) |
581 | ret = 1; |
582 | spin_unlock_irqrestore(&ec->lock, flags); |
583 | return ret; |
584 | } |
585 | |
586 | -static void start_transaction(struct acpi_ec *ec) |
587 | +static bool advance_transaction(struct acpi_ec *ec) |
588 | { |
589 | - ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; |
590 | - ec->curr->done = false; |
591 | - acpi_ec_write_cmd(ec, ec->curr->command); |
592 | -} |
593 | - |
594 | -static void advance_transaction(struct acpi_ec *ec, u8 status) |
595 | -{ |
596 | - unsigned long flags; |
597 | struct transaction *t; |
598 | + u8 status; |
599 | + bool wakeup = false; |
600 | |
601 | - spin_lock_irqsave(&ec->lock, flags); |
602 | + pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK"); |
603 | + status = acpi_ec_read_status(ec); |
604 | t = ec->curr; |
605 | if (!t) |
606 | - goto unlock; |
607 | - if (t->wlen > t->wi) { |
608 | - if ((status & ACPI_EC_FLAG_IBF) == 0) |
609 | - acpi_ec_write_data(ec, |
610 | - t->wdata[t->wi++]); |
611 | - else |
612 | - goto err; |
613 | - } else if (t->rlen > t->ri) { |
614 | - if ((status & ACPI_EC_FLAG_OBF) == 1) { |
615 | - t->rdata[t->ri++] = acpi_ec_read_data(ec); |
616 | - if (t->rlen == t->ri) |
617 | - t->done = true; |
618 | + goto err; |
619 | + if (t->flags & ACPI_EC_COMMAND_POLL) { |
620 | + if (t->wlen > t->wi) { |
621 | + if ((status & ACPI_EC_FLAG_IBF) == 0) |
622 | + acpi_ec_write_data(ec, t->wdata[t->wi++]); |
623 | + else |
624 | + goto err; |
625 | + } else if (t->rlen > t->ri) { |
626 | + if ((status & ACPI_EC_FLAG_OBF) == 1) { |
627 | + t->rdata[t->ri++] = acpi_ec_read_data(ec); |
628 | + if (t->rlen == t->ri) { |
629 | + t->flags |= ACPI_EC_COMMAND_COMPLETE; |
630 | + wakeup = true; |
631 | + } |
632 | + } else |
633 | + goto err; |
634 | + } else if (t->wlen == t->wi && |
635 | + (status & ACPI_EC_FLAG_IBF) == 0) { |
636 | + t->flags |= ACPI_EC_COMMAND_COMPLETE; |
637 | + wakeup = true; |
638 | + } |
639 | + return wakeup; |
640 | + } else { |
641 | + if ((status & ACPI_EC_FLAG_IBF) == 0) { |
642 | + acpi_ec_write_cmd(ec, t->command); |
643 | + t->flags |= ACPI_EC_COMMAND_POLL; |
644 | } else |
645 | goto err; |
646 | - } else if (t->wlen == t->wi && |
647 | - (status & ACPI_EC_FLAG_IBF) == 0) |
648 | - t->done = true; |
649 | - goto unlock; |
650 | + return wakeup; |
651 | + } |
652 | err: |
653 | /* |
654 | * If SCI bit is set, then don't think it's a false IRQ |
655 | * otherwise will take a not handled IRQ as a false one. |
656 | */ |
657 | - if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI)) |
658 | - ++t->irq_count; |
659 | + if (!(status & ACPI_EC_FLAG_SCI)) { |
660 | + if (in_interrupt() && t) |
661 | + ++t->irq_count; |
662 | + } |
663 | + return wakeup; |
664 | +} |
665 | |
666 | -unlock: |
667 | - spin_unlock_irqrestore(&ec->lock, flags); |
668 | +static void start_transaction(struct acpi_ec *ec) |
669 | +{ |
670 | + ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; |
671 | + ec->curr->flags = 0; |
672 | + (void)advance_transaction(ec); |
673 | } |
674 | |
675 | static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); |
676 | @@ -228,15 +245,17 @@ static int ec_poll(struct acpi_ec *ec) |
677 | /* don't sleep with disabled interrupts */ |
678 | if (EC_FLAGS_MSI || irqs_disabled()) { |
679 | udelay(ACPI_EC_MSI_UDELAY); |
680 | - if (ec_transaction_done(ec)) |
681 | + if (ec_transaction_completed(ec)) |
682 | return 0; |
683 | } else { |
684 | if (wait_event_timeout(ec->wait, |
685 | - ec_transaction_done(ec), |
686 | + ec_transaction_completed(ec), |
687 | msecs_to_jiffies(1))) |
688 | return 0; |
689 | } |
690 | - advance_transaction(ec, acpi_ec_read_status(ec)); |
691 | + spin_lock_irqsave(&ec->lock, flags); |
692 | + (void)advance_transaction(ec); |
693 | + spin_unlock_irqrestore(&ec->lock, flags); |
694 | } while (time_before(jiffies, delay)); |
695 | pr_debug("controller reset, restart transaction\n"); |
696 | spin_lock_irqsave(&ec->lock, flags); |
697 | @@ -268,23 +287,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, |
698 | return ret; |
699 | } |
700 | |
701 | -static int ec_check_ibf0(struct acpi_ec *ec) |
702 | -{ |
703 | - u8 status = acpi_ec_read_status(ec); |
704 | - return (status & ACPI_EC_FLAG_IBF) == 0; |
705 | -} |
706 | - |
707 | -static int ec_wait_ibf0(struct acpi_ec *ec) |
708 | -{ |
709 | - unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); |
710 | - /* interrupt wait manually if GPE mode is not active */ |
711 | - while (time_before(jiffies, delay)) |
712 | - if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), |
713 | - msecs_to_jiffies(1))) |
714 | - return 0; |
715 | - return -ETIME; |
716 | -} |
717 | - |
718 | static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) |
719 | { |
720 | int status; |
721 | @@ -305,12 +307,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) |
722 | goto unlock; |
723 | } |
724 | } |
725 | - if (ec_wait_ibf0(ec)) { |
726 | - pr_err("input buffer is not empty, " |
727 | - "aborting transaction\n"); |
728 | - status = -ETIME; |
729 | - goto end; |
730 | - } |
731 | pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n", |
732 | t->command, t->wdata ? t->wdata[0] : 0); |
733 | /* disable GPE during transaction if storm is detected */ |
734 | @@ -334,7 +330,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) |
735 | set_bit(EC_FLAGS_GPE_STORM, &ec->flags); |
736 | } |
737 | pr_debug("transaction end\n"); |
738 | -end: |
739 | if (ec->global_lock) |
740 | acpi_release_global_lock(glk); |
741 | unlock: |
742 | @@ -634,17 +629,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state) |
743 | static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, |
744 | u32 gpe_number, void *data) |
745 | { |
746 | + unsigned long flags; |
747 | struct acpi_ec *ec = data; |
748 | - u8 status = acpi_ec_read_status(ec); |
749 | |
750 | - pr_debug("~~~> interrupt, status:0x%02x\n", status); |
751 | - |
752 | - advance_transaction(ec, status); |
753 | - if (ec_transaction_done(ec) && |
754 | - (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { |
755 | + spin_lock_irqsave(&ec->lock, flags); |
756 | + if (advance_transaction(ec)) |
757 | wake_up(&ec->wait); |
758 | - ec_check_sci(ec, acpi_ec_read_status(ec)); |
759 | - } |
760 | + spin_unlock_irqrestore(&ec->lock, flags); |
761 | + ec_check_sci(ec, acpi_ec_read_status(ec)); |
762 | return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; |
763 | } |
764 | |
765 | diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c |
766 | index 0bdacc5e26a3..2ba8f02ced36 100644 |
767 | --- a/drivers/acpi/resource.c |
768 | +++ b/drivers/acpi/resource.c |
769 | @@ -77,7 +77,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) |
770 | switch (ares->type) { |
771 | case ACPI_RESOURCE_TYPE_MEMORY24: |
772 | memory24 = &ares->data.memory24; |
773 | - if (!memory24->address_length) |
774 | + if (!memory24->minimum && !memory24->address_length) |
775 | return false; |
776 | acpi_dev_get_memresource(res, memory24->minimum, |
777 | memory24->address_length, |
778 | @@ -85,7 +85,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) |
779 | break; |
780 | case ACPI_RESOURCE_TYPE_MEMORY32: |
781 | memory32 = &ares->data.memory32; |
782 | - if (!memory32->address_length) |
783 | + if (!memory32->minimum && !memory32->address_length) |
784 | return false; |
785 | acpi_dev_get_memresource(res, memory32->minimum, |
786 | memory32->address_length, |
787 | @@ -93,7 +93,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) |
788 | break; |
789 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: |
790 | fixed_memory32 = &ares->data.fixed_memory32; |
791 | - if (!fixed_memory32->address_length) |
792 | + if (!fixed_memory32->address && !fixed_memory32->address_length) |
793 | return false; |
794 | acpi_dev_get_memresource(res, fixed_memory32->address, |
795 | fixed_memory32->address_length, |
796 | @@ -150,7 +150,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) |
797 | switch (ares->type) { |
798 | case ACPI_RESOURCE_TYPE_IO: |
799 | io = &ares->data.io; |
800 | - if (!io->address_length) |
801 | + if (!io->minimum && !io->address_length) |
802 | return false; |
803 | acpi_dev_get_ioresource(res, io->minimum, |
804 | io->address_length, |
805 | @@ -158,7 +158,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) |
806 | break; |
807 | case ACPI_RESOURCE_TYPE_FIXED_IO: |
808 | fixed_io = &ares->data.fixed_io; |
809 | - if (!fixed_io->address_length) |
810 | + if (!fixed_io->address && !fixed_io->address_length) |
811 | return false; |
812 | acpi_dev_get_ioresource(res, fixed_io->address, |
813 | fixed_io->address_length, |
814 | diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c |
815 | index 8befeb69eeb1..926355e6eeea 100644 |
816 | --- a/drivers/ata/ahci_imx.c |
817 | +++ b/drivers/ata/ahci_imx.c |
818 | @@ -58,6 +58,8 @@ enum ahci_imx_type { |
819 | struct imx_ahci_priv { |
820 | struct platform_device *ahci_pdev; |
821 | enum ahci_imx_type type; |
822 | + struct clk *sata_clk; |
823 | + struct clk *sata_ref_clk; |
824 | struct clk *ahb_clk; |
825 | struct regmap *gpr; |
826 | bool no_device; |
827 | @@ -224,7 +226,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv) |
828 | return ret; |
829 | } |
830 | |
831 | - ret = ahci_platform_enable_clks(hpriv); |
832 | + ret = clk_prepare_enable(imxpriv->sata_ref_clk); |
833 | if (ret < 0) |
834 | goto disable_regulator; |
835 | |
836 | @@ -291,7 +293,7 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv) |
837 | !IMX6Q_GPR13_SATA_MPLL_CLK_EN); |
838 | } |
839 | |
840 | - ahci_platform_disable_clks(hpriv); |
841 | + clk_disable_unprepare(imxpriv->sata_ref_clk); |
842 | |
843 | if (hpriv->target_pwr) |
844 | regulator_disable(hpriv->target_pwr); |
845 | @@ -385,6 +387,19 @@ static int imx_ahci_probe(struct platform_device *pdev) |
846 | imxpriv->no_device = false; |
847 | imxpriv->first_time = true; |
848 | imxpriv->type = (enum ahci_imx_type)of_id->data; |
849 | + |
850 | + imxpriv->sata_clk = devm_clk_get(dev, "sata"); |
851 | + if (IS_ERR(imxpriv->sata_clk)) { |
852 | + dev_err(dev, "can't get sata clock.\n"); |
853 | + return PTR_ERR(imxpriv->sata_clk); |
854 | + } |
855 | + |
856 | + imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref"); |
857 | + if (IS_ERR(imxpriv->sata_ref_clk)) { |
858 | + dev_err(dev, "can't get sata_ref clock.\n"); |
859 | + return PTR_ERR(imxpriv->sata_ref_clk); |
860 | + } |
861 | + |
862 | imxpriv->ahb_clk = devm_clk_get(dev, "ahb"); |
863 | if (IS_ERR(imxpriv->ahb_clk)) { |
864 | dev_err(dev, "can't get ahb clock.\n"); |
865 | @@ -407,10 +422,14 @@ static int imx_ahci_probe(struct platform_device *pdev) |
866 | |
867 | hpriv->plat_data = imxpriv; |
868 | |
869 | - ret = imx_sata_enable(hpriv); |
870 | + ret = clk_prepare_enable(imxpriv->sata_clk); |
871 | if (ret) |
872 | return ret; |
873 | |
874 | + ret = imx_sata_enable(hpriv); |
875 | + if (ret) |
876 | + goto disable_clk; |
877 | + |
878 | /* |
879 | * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, |
880 | * and IP vendor specific register IMX_TIMER1MS. |
881 | @@ -434,16 +453,24 @@ static int imx_ahci_probe(struct platform_device *pdev) |
882 | |
883 | ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0); |
884 | if (ret) |
885 | - imx_sata_disable(hpriv); |
886 | + goto disable_sata; |
887 | + |
888 | + return 0; |
889 | |
890 | +disable_sata: |
891 | + imx_sata_disable(hpriv); |
892 | +disable_clk: |
893 | + clk_disable_unprepare(imxpriv->sata_clk); |
894 | return ret; |
895 | } |
896 | |
897 | static void ahci_imx_host_stop(struct ata_host *host) |
898 | { |
899 | struct ahci_host_priv *hpriv = host->private_data; |
900 | + struct imx_ahci_priv *imxpriv = hpriv->plat_data; |
901 | |
902 | imx_sata_disable(hpriv); |
903 | + clk_disable_unprepare(imxpriv->sata_clk); |
904 | } |
905 | |
906 | #ifdef CONFIG_PM_SLEEP |
907 | diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c |
908 | index 165c2c299e57..d3bffa478eca 100644 |
909 | --- a/drivers/base/dma-contiguous.c |
910 | +++ b/drivers/base/dma-contiguous.c |
911 | @@ -155,13 +155,23 @@ static int __init cma_activate_area(struct cma *cma) |
912 | base_pfn = pfn; |
913 | for (j = pageblock_nr_pages; j; --j, pfn++) { |
914 | WARN_ON_ONCE(!pfn_valid(pfn)); |
915 | + /* |
916 | + * alloc_contig_range requires the pfn range |
917 | + * specified to be in the same zone. Make this |
918 | + * simple by forcing the entire CMA resv range |
919 | + * to be in the same zone. |
920 | + */ |
921 | if (page_zone(pfn_to_page(pfn)) != zone) |
922 | - return -EINVAL; |
923 | + goto err; |
924 | } |
925 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); |
926 | } while (--i); |
927 | |
928 | return 0; |
929 | + |
930 | +err: |
931 | + kfree(cma->bitmap); |
932 | + return -EINVAL; |
933 | } |
934 | |
935 | static struct cma cma_areas[MAX_CMA_AREAS]; |
936 | diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c |
937 | index d915707d2ba1..93dcad0c1cbe 100644 |
938 | --- a/drivers/char/i8k.c |
939 | +++ b/drivers/char/i8k.c |
940 | @@ -138,7 +138,9 @@ static int i8k_smm(struct smm_regs *regs) |
941 | if (!alloc_cpumask_var(&old_mask, GFP_KERNEL)) |
942 | return -ENOMEM; |
943 | cpumask_copy(old_mask, ¤t->cpus_allowed); |
944 | - set_cpus_allowed_ptr(current, cpumask_of(0)); |
945 | + rc = set_cpus_allowed_ptr(current, cpumask_of(0)); |
946 | + if (rc) |
947 | + goto out; |
948 | if (smp_processor_id() != 0) { |
949 | rc = -EBUSY; |
950 | goto out; |
951 | diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c |
952 | index f2f62a1bf61a..cbc23a610d9f 100644 |
953 | --- a/drivers/clk/clk-s2mps11.c |
954 | +++ b/drivers/clk/clk-s2mps11.c |
955 | @@ -206,16 +206,13 @@ static int s2mps11_clk_probe(struct platform_device *pdev) |
956 | goto err_reg; |
957 | } |
958 | |
959 | - s2mps11_clk->lookup = devm_kzalloc(&pdev->dev, |
960 | - sizeof(struct clk_lookup), GFP_KERNEL); |
961 | + s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk, |
962 | + s2mps11_name(s2mps11_clk), NULL); |
963 | if (!s2mps11_clk->lookup) { |
964 | ret = -ENOMEM; |
965 | goto err_lup; |
966 | } |
967 | |
968 | - s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk); |
969 | - s2mps11_clk->lookup->clk = s2mps11_clk->clk; |
970 | - |
971 | clkdev_add(s2mps11_clk->lookup); |
972 | } |
973 | |
974 | diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c |
975 | index f9b59c7e48e9..9be47a829144 100644 |
976 | --- a/drivers/clk/qcom/mmcc-msm8960.c |
977 | +++ b/drivers/clk/qcom/mmcc-msm8960.c |
978 | @@ -1208,7 +1208,7 @@ static struct clk_branch rot_clk = { |
979 | |
980 | static u8 mmcc_pxo_hdmi_map[] = { |
981 | [P_PXO] = 0, |
982 | - [P_HDMI_PLL] = 2, |
983 | + [P_HDMI_PLL] = 3, |
984 | }; |
985 | |
986 | static const char *mmcc_pxo_hdmi[] = { |
987 | diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c |
988 | index c2d204315546..125eba86c844 100644 |
989 | --- a/drivers/clk/spear/spear3xx_clock.c |
990 | +++ b/drivers/clk/spear/spear3xx_clock.c |
991 | @@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { } |
992 | /* array of all spear 320 clock lookups */ |
993 | #ifdef CONFIG_MACH_SPEAR320 |
994 | |
995 | -#define SPEAR320_CONTROL_REG (soc_config_base + 0x0000) |
996 | +#define SPEAR320_CONTROL_REG (soc_config_base + 0x0010) |
997 | #define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018) |
998 | |
999 | #define SPEAR320_UARTX_PCLK_MASK 0x1 |
1000 | diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile |
1001 | index 0dbb963c1aef..3254f6cf56bf 100644 |
1002 | --- a/drivers/cpufreq/Makefile |
1003 | +++ b/drivers/cpufreq/Makefile |
1004 | @@ -47,7 +47,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o |
1005 | # LITTLE drivers, so that it is probed last. |
1006 | obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o |
1007 | |
1008 | -obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o |
1009 | +obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o |
1010 | obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o |
1011 | obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o |
1012 | obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o |
1013 | diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c |
1014 | index fcd0c9208e98..870eecc816ce 100644 |
1015 | --- a/drivers/cpufreq/intel_pstate.c |
1016 | +++ b/drivers/cpufreq/intel_pstate.c |
1017 | @@ -132,6 +132,7 @@ static struct pstate_funcs pstate_funcs; |
1018 | |
1019 | struct perf_limits { |
1020 | int no_turbo; |
1021 | + int turbo_disabled; |
1022 | int max_perf_pct; |
1023 | int min_perf_pct; |
1024 | int32_t max_perf; |
1025 | @@ -291,7 +292,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, |
1026 | if (ret != 1) |
1027 | return -EINVAL; |
1028 | limits.no_turbo = clamp_t(int, input, 0 , 1); |
1029 | - |
1030 | + if (limits.turbo_disabled) { |
1031 | + pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); |
1032 | + limits.no_turbo = limits.turbo_disabled; |
1033 | + } |
1034 | return count; |
1035 | } |
1036 | |
1037 | @@ -361,21 +365,21 @@ static int byt_get_min_pstate(void) |
1038 | { |
1039 | u64 value; |
1040 | rdmsrl(BYT_RATIOS, value); |
1041 | - return (value >> 8) & 0x3F; |
1042 | + return (value >> 8) & 0x7F; |
1043 | } |
1044 | |
1045 | static int byt_get_max_pstate(void) |
1046 | { |
1047 | u64 value; |
1048 | rdmsrl(BYT_RATIOS, value); |
1049 | - return (value >> 16) & 0x3F; |
1050 | + return (value >> 16) & 0x7F; |
1051 | } |
1052 | |
1053 | static int byt_get_turbo_pstate(void) |
1054 | { |
1055 | u64 value; |
1056 | rdmsrl(BYT_TURBO_RATIOS, value); |
1057 | - return value & 0x3F; |
1058 | + return value & 0x7F; |
1059 | } |
1060 | |
1061 | static void byt_set_pstate(struct cpudata *cpudata, int pstate) |
1062 | @@ -385,7 +389,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) |
1063 | u32 vid; |
1064 | |
1065 | val = pstate << 8; |
1066 | - if (limits.no_turbo) |
1067 | + if (limits.no_turbo && !limits.turbo_disabled) |
1068 | val |= (u64)1 << 32; |
1069 | |
1070 | vid_fp = cpudata->vid.min + mul_fp( |
1071 | @@ -409,8 +413,8 @@ static void byt_get_vid(struct cpudata *cpudata) |
1072 | |
1073 | |
1074 | rdmsrl(BYT_VIDS, value); |
1075 | - cpudata->vid.min = int_tofp((value >> 8) & 0x3f); |
1076 | - cpudata->vid.max = int_tofp((value >> 16) & 0x3f); |
1077 | + cpudata->vid.min = int_tofp((value >> 8) & 0x7f); |
1078 | + cpudata->vid.max = int_tofp((value >> 16) & 0x7f); |
1079 | cpudata->vid.ratio = div_fp( |
1080 | cpudata->vid.max - cpudata->vid.min, |
1081 | int_tofp(cpudata->pstate.max_pstate - |
1082 | @@ -452,7 +456,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate) |
1083 | u64 val; |
1084 | |
1085 | val = pstate << 8; |
1086 | - if (limits.no_turbo) |
1087 | + if (limits.no_turbo && !limits.turbo_disabled) |
1088 | val |= (u64)1 << 32; |
1089 | |
1090 | wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); |
1091 | @@ -705,9 +709,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum) |
1092 | |
1093 | cpu = all_cpu_data[cpunum]; |
1094 | |
1095 | - intel_pstate_get_cpu_pstates(cpu); |
1096 | - |
1097 | cpu->cpu = cpunum; |
1098 | + intel_pstate_get_cpu_pstates(cpu); |
1099 | |
1100 | init_timer_deferrable(&cpu->timer); |
1101 | cpu->timer.function = intel_pstate_timer_func; |
1102 | @@ -750,7 +753,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) |
1103 | limits.min_perf = int_tofp(1); |
1104 | limits.max_perf_pct = 100; |
1105 | limits.max_perf = int_tofp(1); |
1106 | - limits.no_turbo = 0; |
1107 | + limits.no_turbo = limits.turbo_disabled; |
1108 | return 0; |
1109 | } |
1110 | limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; |
1111 | @@ -793,6 +796,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) |
1112 | { |
1113 | struct cpudata *cpu; |
1114 | int rc; |
1115 | + u64 misc_en; |
1116 | |
1117 | rc = intel_pstate_init_cpu(policy->cpu); |
1118 | if (rc) |
1119 | @@ -800,8 +804,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) |
1120 | |
1121 | cpu = all_cpu_data[policy->cpu]; |
1122 | |
1123 | - if (!limits.no_turbo && |
1124 | - limits.min_perf_pct == 100 && limits.max_perf_pct == 100) |
1125 | + rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); |
1126 | + if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || |
1127 | + cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) { |
1128 | + limits.turbo_disabled = 1; |
1129 | + limits.no_turbo = 1; |
1130 | + } |
1131 | + if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) |
1132 | policy->policy = CPUFREQ_POLICY_PERFORMANCE; |
1133 | else |
1134 | policy->policy = CPUFREQ_POLICY_POWERSAVE; |
1135 | diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c |
1136 | index 1d80bd3636c5..b512a4ba7569 100644 |
1137 | --- a/drivers/crypto/caam/jr.c |
1138 | +++ b/drivers/crypto/caam/jr.c |
1139 | @@ -453,8 +453,8 @@ static int caam_jr_probe(struct platform_device *pdev) |
1140 | int error; |
1141 | |
1142 | jrdev = &pdev->dev; |
1143 | - jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), |
1144 | - GFP_KERNEL); |
1145 | + jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr), |
1146 | + GFP_KERNEL); |
1147 | if (!jrpriv) |
1148 | return -ENOMEM; |
1149 | |
1150 | @@ -487,10 +487,8 @@ static int caam_jr_probe(struct platform_device *pdev) |
1151 | |
1152 | /* Now do the platform independent part */ |
1153 | error = caam_jr_init(jrdev); /* now turn on hardware */ |
1154 | - if (error) { |
1155 | - kfree(jrpriv); |
1156 | + if (error) |
1157 | return error; |
1158 | - } |
1159 | |
1160 | jrpriv->dev = jrdev; |
1161 | spin_lock(&driver_data.jr_alloc_lock); |
1162 | diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h |
1163 | index 388c028e223c..44c79fd04523 100644 |
1164 | --- a/drivers/gpu/drm/i915/i915_drv.h |
1165 | +++ b/drivers/gpu/drm/i915/i915_drv.h |
1166 | @@ -803,6 +803,7 @@ enum intel_sbi_destination { |
1167 | #define QUIRK_PIPEA_FORCE (1<<0) |
1168 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
1169 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) |
1170 | +#define QUIRK_BACKLIGHT_PRESENT (1<<3) |
1171 | |
1172 | struct intel_fbdev; |
1173 | struct intel_fbc_work; |
1174 | diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c |
1175 | index 62ef55ba061c..7465ab0fd396 100644 |
1176 | --- a/drivers/gpu/drm/i915/i915_gem_stolen.c |
1177 | +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c |
1178 | @@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) |
1179 | if (base == 0) |
1180 | return 0; |
1181 | |
1182 | + /* make sure we don't clobber the GTT if it's within stolen memory */ |
1183 | + if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) { |
1184 | + struct { |
1185 | + u32 start, end; |
1186 | + } stolen[2] = { |
1187 | + { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
1188 | + { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
1189 | + }; |
1190 | + u64 gtt_start, gtt_end; |
1191 | + |
1192 | + gtt_start = I915_READ(PGTBL_CTL); |
1193 | + if (IS_GEN4(dev)) |
1194 | + gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) | |
1195 | + (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; |
1196 | + else |
1197 | + gtt_start &= PGTBL_ADDRESS_LO_MASK; |
1198 | + gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4; |
1199 | + |
1200 | + if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) |
1201 | + stolen[0].end = gtt_start; |
1202 | + if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end) |
1203 | + stolen[1].start = gtt_end; |
1204 | + |
1205 | + /* pick the larger of the two chunks */ |
1206 | + if (stolen[0].end - stolen[0].start > |
1207 | + stolen[1].end - stolen[1].start) { |
1208 | + base = stolen[0].start; |
1209 | + dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start; |
1210 | + } else { |
1211 | + base = stolen[1].start; |
1212 | + dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start; |
1213 | + } |
1214 | + |
1215 | + if (stolen[0].start != stolen[1].start || |
1216 | + stolen[0].end != stolen[1].end) { |
1217 | + DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", |
1218 | + (unsigned long long) gtt_start, |
1219 | + (unsigned long long) gtt_end - 1); |
1220 | + DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", |
1221 | + base, base + (u32) dev_priv->gtt.stolen_size - 1); |
1222 | + } |
1223 | + } |
1224 | + |
1225 | + |
1226 | /* Verify that nothing else uses this physical address. Stolen |
1227 | * memory should be reserved by the BIOS and hidden from the |
1228 | * kernel. So if the region is already marked as busy, something |
1229 | diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h |
1230 | index c77af69c2d8f..3f34dcf283a5 100644 |
1231 | --- a/drivers/gpu/drm/i915/i915_reg.h |
1232 | +++ b/drivers/gpu/drm/i915/i915_reg.h |
1233 | @@ -659,6 +659,9 @@ enum punit_power_well { |
1234 | /* |
1235 | * Instruction and interrupt control regs |
1236 | */ |
1237 | +#define PGTBL_CTL 0x02020 |
1238 | +#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */ |
1239 | +#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */ |
1240 | #define PGTBL_ER 0x02024 |
1241 | #define RENDER_RING_BASE 0x02000 |
1242 | #define BSD_RING_BASE 0x04000 |
1243 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
1244 | index 5b60e25baa32..b91dfbe9fe8e 100644 |
1245 | --- a/drivers/gpu/drm/i915/intel_display.c |
1246 | +++ b/drivers/gpu/drm/i915/intel_display.c |
1247 | @@ -11166,6 +11166,14 @@ static void quirk_invert_brightness(struct drm_device *dev) |
1248 | DRM_INFO("applying inverted panel brightness quirk\n"); |
1249 | } |
1250 | |
1251 | +/* Some VBT's incorrectly indicate no backlight is present */ |
1252 | +static void quirk_backlight_present(struct drm_device *dev) |
1253 | +{ |
1254 | + struct drm_i915_private *dev_priv = dev->dev_private; |
1255 | + dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; |
1256 | + DRM_INFO("applying backlight present quirk\n"); |
1257 | +} |
1258 | + |
1259 | struct intel_quirk { |
1260 | int device; |
1261 | int subsystem_vendor; |
1262 | @@ -11237,6 +11245,12 @@ static struct intel_quirk intel_quirks[] = { |
1263 | |
1264 | /* Acer Aspire 5336 */ |
1265 | { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, |
1266 | + |
1267 | + /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ |
1268 | + { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, |
1269 | + |
1270 | + /* Toshiba CB35 Chromebook (Celeron 2955U) */ |
1271 | + { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, |
1272 | }; |
1273 | |
1274 | static void intel_init_quirks(struct drm_device *dev) |
1275 | diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c |
1276 | index 8b2538356371..27ee96ba906a 100644 |
1277 | --- a/drivers/gpu/drm/i915/intel_panel.c |
1278 | +++ b/drivers/gpu/drm/i915/intel_panel.c |
1279 | @@ -1065,8 +1065,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector) |
1280 | int ret; |
1281 | |
1282 | if (!dev_priv->vbt.backlight.present) { |
1283 | - DRM_DEBUG_KMS("native backlight control not available per VBT\n"); |
1284 | - return 0; |
1285 | + if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) { |
1286 | + DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n"); |
1287 | + } else { |
1288 | + DRM_DEBUG_KMS("no backlight present per VBT\n"); |
1289 | + return 0; |
1290 | + } |
1291 | } |
1292 | |
1293 | /* set level and max in panel struct */ |
1294 | diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c |
1295 | index 10dae4106c08..584090ac3eb9 100644 |
1296 | --- a/drivers/gpu/drm/radeon/ci_dpm.c |
1297 | +++ b/drivers/gpu/drm/radeon/ci_dpm.c |
1298 | @@ -1179,7 +1179,7 @@ static int ci_stop_dpm(struct radeon_device *rdev) |
1299 | tmp &= ~GLOBAL_PWRMGT_EN; |
1300 | WREG32_SMC(GENERAL_PWRMGT, tmp); |
1301 | |
1302 | - tmp = RREG32(SCLK_PWRMGT_CNTL); |
1303 | + tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); |
1304 | tmp &= ~DYNAMIC_PM_EN; |
1305 | WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); |
1306 | |
1307 | diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c |
1308 | index 0f7a51a3694f..a61a9039f6f6 100644 |
1309 | --- a/drivers/gpu/drm/radeon/evergreen.c |
1310 | +++ b/drivers/gpu/drm/radeon/evergreen.c |
1311 | @@ -189,7 +189,7 @@ static const u32 evergreen_golden_registers[] = |
1312 | 0x8c1c, 0xffffffff, 0x00001010, |
1313 | 0x28350, 0xffffffff, 0x00000000, |
1314 | 0xa008, 0xffffffff, 0x00010000, |
1315 | - 0x5cc, 0xffffffff, 0x00000001, |
1316 | + 0x5c4, 0xffffffff, 0x00000001, |
1317 | 0x9508, 0xffffffff, 0x00000002, |
1318 | 0x913c, 0x0000000f, 0x0000000a |
1319 | }; |
1320 | @@ -476,7 +476,7 @@ static const u32 cedar_golden_registers[] = |
1321 | 0x8c1c, 0xffffffff, 0x00001010, |
1322 | 0x28350, 0xffffffff, 0x00000000, |
1323 | 0xa008, 0xffffffff, 0x00010000, |
1324 | - 0x5cc, 0xffffffff, 0x00000001, |
1325 | + 0x5c4, 0xffffffff, 0x00000001, |
1326 | 0x9508, 0xffffffff, 0x00000002 |
1327 | }; |
1328 | |
1329 | @@ -635,7 +635,7 @@ static const u32 juniper_mgcg_init[] = |
1330 | static const u32 supersumo_golden_registers[] = |
1331 | { |
1332 | 0x5eb4, 0xffffffff, 0x00000002, |
1333 | - 0x5cc, 0xffffffff, 0x00000001, |
1334 | + 0x5c4, 0xffffffff, 0x00000001, |
1335 | 0x7030, 0xffffffff, 0x00000011, |
1336 | 0x7c30, 0xffffffff, 0x00000011, |
1337 | 0x6104, 0x01000300, 0x00000000, |
1338 | @@ -719,7 +719,7 @@ static const u32 sumo_golden_registers[] = |
1339 | static const u32 wrestler_golden_registers[] = |
1340 | { |
1341 | 0x5eb4, 0xffffffff, 0x00000002, |
1342 | - 0x5cc, 0xffffffff, 0x00000001, |
1343 | + 0x5c4, 0xffffffff, 0x00000001, |
1344 | 0x7030, 0xffffffff, 0x00000011, |
1345 | 0x7c30, 0xffffffff, 0x00000011, |
1346 | 0x6104, 0x01000300, 0x00000000, |
1347 | diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c |
1348 | index c11b71d249e3..c8c48aa4181e 100644 |
1349 | --- a/drivers/gpu/drm/radeon/radeon_vm.c |
1350 | +++ b/drivers/gpu/drm/radeon/radeon_vm.c |
1351 | @@ -493,7 +493,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, |
1352 | mutex_unlock(&vm->mutex); |
1353 | |
1354 | r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, |
1355 | - RADEON_GPU_PAGE_SIZE, false, |
1356 | + RADEON_GPU_PAGE_SIZE, true, |
1357 | RADEON_GEM_DOMAIN_VRAM, NULL, &pt); |
1358 | if (r) |
1359 | return r; |
1360 | @@ -913,7 +913,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) |
1361 | return -ENOMEM; |
1362 | } |
1363 | |
1364 | - r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false, |
1365 | + r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, true, |
1366 | RADEON_GEM_DOMAIN_VRAM, NULL, |
1367 | &vm->page_directory); |
1368 | if (r) |
1369 | diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c |
1370 | index 72d3616de08e..95b693c11640 100644 |
1371 | --- a/drivers/gpu/drm/radeon/rs600.c |
1372 | +++ b/drivers/gpu/drm/radeon/rs600.c |
1373 | @@ -646,8 +646,10 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
1374 | return -EINVAL; |
1375 | } |
1376 | addr = addr & 0xFFFFFFFFFFFFF000ULL; |
1377 | - addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; |
1378 | - addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; |
1379 | + if (addr != rdev->dummy_page.addr) |
1380 | + addr |= R600_PTE_VALID | R600_PTE_READABLE | |
1381 | + R600_PTE_WRITEABLE; |
1382 | + addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED; |
1383 | writeq(addr, ptr + (i * 8)); |
1384 | return 0; |
1385 | } |
1386 | diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c |
1387 | index da041a43d82e..3c76e1dcdf04 100644 |
1388 | --- a/drivers/gpu/drm/radeon/rv770_dpm.c |
1389 | +++ b/drivers/gpu/drm/radeon/rv770_dpm.c |
1390 | @@ -2329,12 +2329,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev) |
1391 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
1392 | ASIC_INTERNAL_MEMORY_SS, 0); |
1393 | |
1394 | - /* disable ss, causes hangs on some cayman boards */ |
1395 | - if (rdev->family == CHIP_CAYMAN) { |
1396 | - pi->sclk_ss = false; |
1397 | - pi->mclk_ss = false; |
1398 | - } |
1399 | - |
1400 | if (pi->sclk_ss || pi->mclk_ss) |
1401 | pi->dynamic_ss = true; |
1402 | else |
1403 | diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c |
1404 | index 05827eccc53a..ce5a9f2584f3 100644 |
1405 | --- a/drivers/hv/connection.c |
1406 | +++ b/drivers/hv/connection.c |
1407 | @@ -319,9 +319,13 @@ static void process_chn_event(u32 relid) |
1408 | */ |
1409 | |
1410 | do { |
1411 | - hv_begin_read(&channel->inbound); |
1412 | + if (read_state) |
1413 | + hv_begin_read(&channel->inbound); |
1414 | channel->onchannel_callback(arg); |
1415 | - bytes_to_read = hv_end_read(&channel->inbound); |
1416 | + if (read_state) |
1417 | + bytes_to_read = hv_end_read(&channel->inbound); |
1418 | + else |
1419 | + bytes_to_read = 0; |
1420 | } while (read_state && (bytes_to_read != 0)); |
1421 | } else { |
1422 | pr_err("no channel callback for relid - %u\n", relid); |
1423 | diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c |
1424 | index 5ffd81f19d01..0625e50d7a6e 100644 |
1425 | --- a/drivers/hwmon/adc128d818.c |
1426 | +++ b/drivers/hwmon/adc128d818.c |
1427 | @@ -239,50 +239,50 @@ static ssize_t adc128_show_alarm(struct device *dev, |
1428 | return sprintf(buf, "%u\n", !!(alarms & mask)); |
1429 | } |
1430 | |
1431 | -static SENSOR_DEVICE_ATTR_2(in0_input, S_IWUSR | S_IRUGO, |
1432 | - adc128_show_in, adc128_set_in, 0, 0); |
1433 | +static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, |
1434 | + adc128_show_in, NULL, 0, 0); |
1435 | static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO, |
1436 | adc128_show_in, adc128_set_in, 0, 1); |
1437 | static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO, |
1438 | adc128_show_in, adc128_set_in, 0, 2); |
1439 | |
1440 | -static SENSOR_DEVICE_ATTR_2(in1_input, S_IWUSR | S_IRUGO, |
1441 | - adc128_show_in, adc128_set_in, 1, 0); |
1442 | +static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, |
1443 | + adc128_show_in, NULL, 1, 0); |
1444 | static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO, |
1445 | adc128_show_in, adc128_set_in, 1, 1); |
1446 | static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO, |
1447 | adc128_show_in, adc128_set_in, 1, 2); |
1448 | |
1449 | -static SENSOR_DEVICE_ATTR_2(in2_input, S_IWUSR | S_IRUGO, |
1450 | - adc128_show_in, adc128_set_in, 2, 0); |
1451 | +static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, |
1452 | + adc128_show_in, NULL, 2, 0); |
1453 | static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO, |
1454 | adc128_show_in, adc128_set_in, 2, 1); |
1455 | static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO, |
1456 | adc128_show_in, adc128_set_in, 2, 2); |
1457 | |
1458 | -static SENSOR_DEVICE_ATTR_2(in3_input, S_IWUSR | S_IRUGO, |
1459 | - adc128_show_in, adc128_set_in, 3, 0); |
1460 | +static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO, |
1461 | + adc128_show_in, NULL, 3, 0); |
1462 | static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO, |
1463 | adc128_show_in, adc128_set_in, 3, 1); |
1464 | static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO, |
1465 | adc128_show_in, adc128_set_in, 3, 2); |
1466 | |
1467 | -static SENSOR_DEVICE_ATTR_2(in4_input, S_IWUSR | S_IRUGO, |
1468 | - adc128_show_in, adc128_set_in, 4, 0); |
1469 | +static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO, |
1470 | + adc128_show_in, NULL, 4, 0); |
1471 | static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO, |
1472 | adc128_show_in, adc128_set_in, 4, 1); |
1473 | static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO, |
1474 | adc128_show_in, adc128_set_in, 4, 2); |
1475 | |
1476 | -static SENSOR_DEVICE_ATTR_2(in5_input, S_IWUSR | S_IRUGO, |
1477 | - adc128_show_in, adc128_set_in, 5, 0); |
1478 | +static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO, |
1479 | + adc128_show_in, NULL, 5, 0); |
1480 | static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO, |
1481 | adc128_show_in, adc128_set_in, 5, 1); |
1482 | static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO, |
1483 | adc128_show_in, adc128_set_in, 5, 2); |
1484 | |
1485 | -static SENSOR_DEVICE_ATTR_2(in6_input, S_IWUSR | S_IRUGO, |
1486 | - adc128_show_in, adc128_set_in, 6, 0); |
1487 | +static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO, |
1488 | + adc128_show_in, NULL, 6, 0); |
1489 | static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO, |
1490 | adc128_show_in, adc128_set_in, 6, 1); |
1491 | static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO, |
1492 | diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c |
1493 | index 3eb4281689b5..d74241bb278c 100644 |
1494 | --- a/drivers/hwmon/adm1021.c |
1495 | +++ b/drivers/hwmon/adm1021.c |
1496 | @@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev, |
1497 | struct adm1021_data *data = dev_get_drvdata(dev); |
1498 | struct i2c_client *client = data->client; |
1499 | long temp; |
1500 | - int err; |
1501 | + int reg_val, err; |
1502 | |
1503 | err = kstrtol(buf, 10, &temp); |
1504 | if (err) |
1505 | @@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev, |
1506 | temp /= 1000; |
1507 | |
1508 | mutex_lock(&data->update_lock); |
1509 | - data->temp_max[index] = clamp_val(temp, -128, 127); |
1510 | + reg_val = clamp_val(temp, -128, 127); |
1511 | + data->temp_max[index] = reg_val * 1000; |
1512 | if (!read_only) |
1513 | i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index), |
1514 | - data->temp_max[index]); |
1515 | + reg_val); |
1516 | mutex_unlock(&data->update_lock); |
1517 | |
1518 | return count; |
1519 | @@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev, |
1520 | struct adm1021_data *data = dev_get_drvdata(dev); |
1521 | struct i2c_client *client = data->client; |
1522 | long temp; |
1523 | - int err; |
1524 | + int reg_val, err; |
1525 | |
1526 | err = kstrtol(buf, 10, &temp); |
1527 | if (err) |
1528 | @@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev, |
1529 | temp /= 1000; |
1530 | |
1531 | mutex_lock(&data->update_lock); |
1532 | - data->temp_min[index] = clamp_val(temp, -128, 127); |
1533 | + reg_val = clamp_val(temp, -128, 127); |
1534 | + data->temp_min[index] = reg_val * 1000; |
1535 | if (!read_only) |
1536 | i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index), |
1537 | - data->temp_min[index]); |
1538 | + reg_val); |
1539 | mutex_unlock(&data->update_lock); |
1540 | |
1541 | return count; |
1542 | diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c |
1543 | index d19c790e410a..e38115ce0350 100644 |
1544 | --- a/drivers/hwmon/adm1029.c |
1545 | +++ b/drivers/hwmon/adm1029.c |
1546 | @@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev, |
1547 | /* Update the value */ |
1548 | reg = (reg & 0x3F) | (val << 6); |
1549 | |
1550 | + /* Update the cache */ |
1551 | + data->fan_div[attr->index] = reg; |
1552 | + |
1553 | /* Write value */ |
1554 | i2c_smbus_write_byte_data(client, |
1555 | ADM1029_REG_FAN_DIV[attr->index], reg); |
1556 | diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c |
1557 | index a8a540ca8c34..51c1a5a165ab 100644 |
1558 | --- a/drivers/hwmon/adm1031.c |
1559 | +++ b/drivers/hwmon/adm1031.c |
1560 | @@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr, |
1561 | if (ret) |
1562 | return ret; |
1563 | |
1564 | + val = clamp_val(val, 0, 127000); |
1565 | mutex_lock(&data->update_lock); |
1566 | data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]); |
1567 | adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), |
1568 | @@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr, |
1569 | if (ret) |
1570 | return ret; |
1571 | |
1572 | + val = clamp_val(val, 0, 127000); |
1573 | mutex_lock(&data->update_lock); |
1574 | data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], |
1575 | data->pwm[nr]); |
1576 | @@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, |
1577 | if (ret) |
1578 | return ret; |
1579 | |
1580 | - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); |
1581 | + val = clamp_val(val, -55000, 127000); |
1582 | mutex_lock(&data->update_lock); |
1583 | data->temp_min[nr] = TEMP_TO_REG(val); |
1584 | adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr), |
1585 | @@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, |
1586 | if (ret) |
1587 | return ret; |
1588 | |
1589 | - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); |
1590 | + val = clamp_val(val, -55000, 127000); |
1591 | mutex_lock(&data->update_lock); |
1592 | data->temp_max[nr] = TEMP_TO_REG(val); |
1593 | adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr), |
1594 | @@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr, |
1595 | if (ret) |
1596 | return ret; |
1597 | |
1598 | - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); |
1599 | + val = clamp_val(val, -55000, 127000); |
1600 | mutex_lock(&data->update_lock); |
1601 | data->temp_crit[nr] = TEMP_TO_REG(val); |
1602 | adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr), |
1603 | diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c |
1604 | index eea817296513..9f2be3dd28f3 100644 |
1605 | --- a/drivers/hwmon/amc6821.c |
1606 | +++ b/drivers/hwmon/amc6821.c |
1607 | @@ -704,7 +704,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, |
1608 | get_temp_alarm, NULL, IDX_TEMP1_MAX); |
1609 | static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, |
1610 | get_temp_alarm, NULL, IDX_TEMP1_CRIT); |
1611 | -static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR, |
1612 | +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, |
1613 | get_temp, NULL, IDX_TEMP2_INPUT); |
1614 | static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp, |
1615 | set_temp, IDX_TEMP2_MIN); |
1616 | diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c |
1617 | index fd892dd48e4c..78002de46cb6 100644 |
1618 | --- a/drivers/hwmon/emc2103.c |
1619 | +++ b/drivers/hwmon/emc2103.c |
1620 | @@ -250,9 +250,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da, |
1621 | if (result < 0) |
1622 | return result; |
1623 | |
1624 | - val = DIV_ROUND_CLOSEST(val, 1000); |
1625 | - if ((val < -63) || (val > 127)) |
1626 | - return -EINVAL; |
1627 | + val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127); |
1628 | |
1629 | mutex_lock(&data->update_lock); |
1630 | data->temp_min[nr] = val; |
1631 | @@ -274,9 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da, |
1632 | if (result < 0) |
1633 | return result; |
1634 | |
1635 | - val = DIV_ROUND_CLOSEST(val, 1000); |
1636 | - if ((val < -63) || (val > 127)) |
1637 | - return -EINVAL; |
1638 | + val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127); |
1639 | |
1640 | mutex_lock(&data->update_lock); |
1641 | data->temp_max[nr] = val; |
1642 | @@ -390,15 +386,14 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da, |
1643 | { |
1644 | struct emc2103_data *data = emc2103_update_device(dev); |
1645 | struct i2c_client *client = to_i2c_client(dev); |
1646 | - long rpm_target; |
1647 | + unsigned long rpm_target; |
1648 | |
1649 | - int result = kstrtol(buf, 10, &rpm_target); |
1650 | + int result = kstrtoul(buf, 10, &rpm_target); |
1651 | if (result < 0) |
1652 | return result; |
1653 | |
1654 | /* Datasheet states 16384 as maximum RPM target (table 3.2) */ |
1655 | - if ((rpm_target < 0) || (rpm_target > 16384)) |
1656 | - return -EINVAL; |
1657 | + rpm_target = clamp_val(rpm_target, 0, 16384); |
1658 | |
1659 | mutex_lock(&data->update_lock); |
1660 | |
1661 | diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c |
1662 | index a4db3026bec6..d5dc4c6ce86c 100644 |
1663 | --- a/drivers/iio/adc/ti_am335x_adc.c |
1664 | +++ b/drivers/iio/adc/ti_am335x_adc.c |
1665 | @@ -374,7 +374,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, |
1666 | return -EAGAIN; |
1667 | } |
1668 | } |
1669 | - map_val = chan->channel + TOTAL_CHANNELS; |
1670 | + map_val = adc_dev->channel_step[chan->scan_index]; |
1671 | |
1672 | /* |
1673 | * We check the complete FIFO. We programmed just one entry but in case |
1674 | diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c |
1675 | index 3842ac738f98..db404a0f7e2c 100644 |
1676 | --- a/drivers/md/dm-io.c |
1677 | +++ b/drivers/md/dm-io.c |
1678 | @@ -10,6 +10,7 @@ |
1679 | #include <linux/device-mapper.h> |
1680 | |
1681 | #include <linux/bio.h> |
1682 | +#include <linux/completion.h> |
1683 | #include <linux/mempool.h> |
1684 | #include <linux/module.h> |
1685 | #include <linux/sched.h> |
1686 | @@ -32,7 +33,7 @@ struct dm_io_client { |
1687 | struct io { |
1688 | unsigned long error_bits; |
1689 | atomic_t count; |
1690 | - struct task_struct *sleeper; |
1691 | + struct completion *wait; |
1692 | struct dm_io_client *client; |
1693 | io_notify_fn callback; |
1694 | void *context; |
1695 | @@ -121,8 +122,8 @@ static void dec_count(struct io *io, unsigned int region, int error) |
1696 | invalidate_kernel_vmap_range(io->vma_invalidate_address, |
1697 | io->vma_invalidate_size); |
1698 | |
1699 | - if (io->sleeper) |
1700 | - wake_up_process(io->sleeper); |
1701 | + if (io->wait) |
1702 | + complete(io->wait); |
1703 | |
1704 | else { |
1705 | unsigned long r = io->error_bits; |
1706 | @@ -387,6 +388,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
1707 | */ |
1708 | volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; |
1709 | struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); |
1710 | + DECLARE_COMPLETION_ONSTACK(wait); |
1711 | |
1712 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
1713 | WARN_ON(1); |
1714 | @@ -395,7 +397,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
1715 | |
1716 | io->error_bits = 0; |
1717 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
1718 | - io->sleeper = current; |
1719 | + io->wait = &wait; |
1720 | io->client = client; |
1721 | |
1722 | io->vma_invalidate_address = dp->vma_invalidate_address; |
1723 | @@ -403,15 +405,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
1724 | |
1725 | dispatch_io(rw, num_regions, where, dp, io, 1); |
1726 | |
1727 | - while (1) { |
1728 | - set_current_state(TASK_UNINTERRUPTIBLE); |
1729 | - |
1730 | - if (!atomic_read(&io->count)) |
1731 | - break; |
1732 | - |
1733 | - io_schedule(); |
1734 | - } |
1735 | - set_current_state(TASK_RUNNING); |
1736 | + wait_for_completion_io(&wait); |
1737 | |
1738 | if (error_bits) |
1739 | *error_bits = io->error_bits; |
1740 | @@ -434,7 +428,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, |
1741 | io = mempool_alloc(client->pool, GFP_NOIO); |
1742 | io->error_bits = 0; |
1743 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
1744 | - io->sleeper = NULL; |
1745 | + io->wait = NULL; |
1746 | io->client = client; |
1747 | io->callback = fn; |
1748 | io->context = context; |
1749 | diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c |
1750 | index ebfa411d1a7d..9efb863ad052 100644 |
1751 | --- a/drivers/md/dm-mpath.c |
1752 | +++ b/drivers/md/dm-mpath.c |
1753 | @@ -1620,8 +1620,9 @@ static int multipath_busy(struct dm_target *ti) |
1754 | |
1755 | spin_lock_irqsave(&m->lock, flags); |
1756 | |
1757 | - /* pg_init in progress, requeue until done */ |
1758 | - if (!pg_ready(m)) { |
1759 | + /* pg_init in progress or no paths available */ |
1760 | + if (m->pg_init_in_progress || |
1761 | + (!m->nr_valid_paths && m->queue_if_no_path)) { |
1762 | busy = 1; |
1763 | goto out; |
1764 | } |
1765 | diff --git a/drivers/md/dm.c b/drivers/md/dm.c |
1766 | index 455e64916498..490ac238772d 100644 |
1767 | --- a/drivers/md/dm.c |
1768 | +++ b/drivers/md/dm.c |
1769 | @@ -54,6 +54,8 @@ static void do_deferred_remove(struct work_struct *w); |
1770 | |
1771 | static DECLARE_WORK(deferred_remove_work, do_deferred_remove); |
1772 | |
1773 | +static struct workqueue_struct *deferred_remove_workqueue; |
1774 | + |
1775 | /* |
1776 | * For bio-based dm. |
1777 | * One of these is allocated per bio. |
1778 | @@ -276,16 +278,24 @@ static int __init local_init(void) |
1779 | if (r) |
1780 | goto out_free_rq_tio_cache; |
1781 | |
1782 | + deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); |
1783 | + if (!deferred_remove_workqueue) { |
1784 | + r = -ENOMEM; |
1785 | + goto out_uevent_exit; |
1786 | + } |
1787 | + |
1788 | _major = major; |
1789 | r = register_blkdev(_major, _name); |
1790 | if (r < 0) |
1791 | - goto out_uevent_exit; |
1792 | + goto out_free_workqueue; |
1793 | |
1794 | if (!_major) |
1795 | _major = r; |
1796 | |
1797 | return 0; |
1798 | |
1799 | +out_free_workqueue: |
1800 | + destroy_workqueue(deferred_remove_workqueue); |
1801 | out_uevent_exit: |
1802 | dm_uevent_exit(); |
1803 | out_free_rq_tio_cache: |
1804 | @@ -299,6 +309,7 @@ out_free_io_cache: |
1805 | static void local_exit(void) |
1806 | { |
1807 | flush_scheduled_work(); |
1808 | + destroy_workqueue(deferred_remove_workqueue); |
1809 | |
1810 | kmem_cache_destroy(_rq_tio_cache); |
1811 | kmem_cache_destroy(_io_cache); |
1812 | @@ -407,7 +418,7 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode) |
1813 | |
1814 | if (atomic_dec_and_test(&md->open_count) && |
1815 | (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) |
1816 | - schedule_work(&deferred_remove_work); |
1817 | + queue_work(deferred_remove_workqueue, &deferred_remove_work); |
1818 | |
1819 | dm_put(md); |
1820 | |
1821 | diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c |
1822 | index 0a685089c3fd..69faeec7fa4c 100644 |
1823 | --- a/drivers/mtd/nand/omap2.c |
1824 | +++ b/drivers/mtd/nand/omap2.c |
1825 | @@ -1162,7 +1162,7 @@ static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, |
1826 | struct gpmc_nand_regs *gpmc_regs = &info->reg; |
1827 | u8 *ecc_code; |
1828 | unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; |
1829 | - int i; |
1830 | + int i, j; |
1831 | |
1832 | nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; |
1833 | for (i = 0; i < nsectors; i++) { |
1834 | @@ -1210,8 +1210,8 @@ static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, |
1835 | case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: |
1836 | /* Add constant polynomial to remainder, so that |
1837 | * ECC of blank pages results in 0x0 on reading back */ |
1838 | - for (i = 0; i < eccbytes; i++) |
1839 | - ecc_calc[i] ^= bch4_polynomial[i]; |
1840 | + for (j = 0; j < eccbytes; j++) |
1841 | + ecc_calc[j] ^= bch4_polynomial[j]; |
1842 | break; |
1843 | case OMAP_ECC_BCH4_CODE_HW: |
1844 | /* Set 8th ECC byte as 0x0 for ROM compatibility */ |
1845 | @@ -1220,8 +1220,8 @@ static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, |
1846 | case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: |
1847 | /* Add constant polynomial to remainder, so that |
1848 | * ECC of blank pages results in 0x0 on reading back */ |
1849 | - for (i = 0; i < eccbytes; i++) |
1850 | - ecc_calc[i] ^= bch8_polynomial[i]; |
1851 | + for (j = 0; j < eccbytes; j++) |
1852 | + ecc_calc[j] ^= bch8_polynomial[j]; |
1853 | break; |
1854 | case OMAP_ECC_BCH8_CODE_HW: |
1855 | /* Set 14th ECC byte as 0x0 for ROM compatibility */ |
1856 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c |
1857 | index cf0761f08911..71dfd79f391a 100644 |
1858 | --- a/drivers/net/ethernet/intel/i40e/i40e_main.c |
1859 | +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c |
1860 | @@ -4310,7 +4310,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi) |
1861 | goto err_setup_rx; |
1862 | |
1863 | if (!vsi->netdev) { |
1864 | - err = EINVAL; |
1865 | + err = -EINVAL; |
1866 | goto err_setup_rx; |
1867 | } |
1868 | snprintf(int_name, sizeof(int_name) - 1, "%s-%s", |
1869 | diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c |
1870 | index 83b01fa02400..bba3726ab510 100644 |
1871 | --- a/drivers/pci/pci.c |
1872 | +++ b/drivers/pci/pci.c |
1873 | @@ -3109,8 +3109,13 @@ static int pci_af_flr(struct pci_dev *dev, int probe) |
1874 | if (probe) |
1875 | return 0; |
1876 | |
1877 | - /* Wait for Transaction Pending bit clean */ |
1878 | - if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP)) |
1879 | + /* |
1880 | + * Wait for Transaction Pending bit to clear. A word-aligned test |
1881 | + * is used, so we use the conrol offset rather than status and shift |
1882 | + * the test bit to match. |
1883 | + */ |
1884 | + if (pci_wait_for_pending(dev, pos + PCI_AF_CTRL, |
1885 | + PCI_AF_STATUS_TP << 8)) |
1886 | goto clear; |
1887 | |
1888 | dev_err(&dev->dev, "transaction is not cleared; " |
1889 | diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c |
1890 | index c64a2f3b2d62..49c446530101 100644 |
1891 | --- a/drivers/phy/phy-core.c |
1892 | +++ b/drivers/phy/phy-core.c |
1893 | @@ -614,8 +614,9 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, |
1894 | return phy; |
1895 | |
1896 | put_dev: |
1897 | - put_device(&phy->dev); |
1898 | - ida_remove(&phy_ida, phy->id); |
1899 | + put_device(&phy->dev); /* calls phy_release() which frees resources */ |
1900 | + return ERR_PTR(ret); |
1901 | + |
1902 | free_phy: |
1903 | kfree(phy); |
1904 | return ERR_PTR(ret); |
1905 | @@ -799,7 +800,7 @@ static void phy_release(struct device *dev) |
1906 | |
1907 | phy = to_phy(dev); |
1908 | dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); |
1909 | - ida_remove(&phy_ida, phy->id); |
1910 | + ida_simple_remove(&phy_ida, phy->id); |
1911 | kfree(phy); |
1912 | } |
1913 | |
1914 | diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c |
1915 | index 1ecfe3bd92ac..1cff2a21db67 100644 |
1916 | --- a/drivers/rtc/rtc-puv3.c |
1917 | +++ b/drivers/rtc/rtc-puv3.c |
1918 | @@ -71,7 +71,7 @@ static int puv3_rtc_setpie(struct device *dev, int enabled) |
1919 | { |
1920 | unsigned int tmp; |
1921 | |
1922 | - dev_debug(dev, "%s: pie=%d\n", __func__, enabled); |
1923 | + dev_dbg(dev, "%s: pie=%d\n", __func__, enabled); |
1924 | |
1925 | spin_lock_irq(&puv3_rtc_pie_lock); |
1926 | tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE; |
1927 | @@ -140,7 +140,7 @@ static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) |
1928 | rtc_tm_to_time(tm, &rtcalarm_count); |
1929 | writel(rtcalarm_count, RTC_RTAR); |
1930 | |
1931 | - puv3_rtc_setaie(&dev->dev, alrm->enabled); |
1932 | + puv3_rtc_setaie(dev, alrm->enabled); |
1933 | |
1934 | if (alrm->enabled) |
1935 | enable_irq_wake(puv3_rtc_alarmno); |
1936 | diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c |
1937 | index fdb07199d9c2..1967bee4f076 100644 |
1938 | --- a/drivers/thermal/thermal_hwmon.c |
1939 | +++ b/drivers/thermal/thermal_hwmon.c |
1940 | @@ -140,6 +140,12 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon, |
1941 | return NULL; |
1942 | } |
1943 | |
1944 | +static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz) |
1945 | +{ |
1946 | + unsigned long temp; |
1947 | + return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp); |
1948 | +} |
1949 | + |
1950 | int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) |
1951 | { |
1952 | struct thermal_hwmon_device *hwmon; |
1953 | @@ -189,21 +195,18 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) |
1954 | if (result) |
1955 | goto free_temp_mem; |
1956 | |
1957 | - if (tz->ops->get_crit_temp) { |
1958 | - unsigned long temperature; |
1959 | - if (!tz->ops->get_crit_temp(tz, &temperature)) { |
1960 | - snprintf(temp->temp_crit.name, |
1961 | - sizeof(temp->temp_crit.name), |
1962 | + if (thermal_zone_crit_temp_valid(tz)) { |
1963 | + snprintf(temp->temp_crit.name, |
1964 | + sizeof(temp->temp_crit.name), |
1965 | "temp%d_crit", hwmon->count); |
1966 | - temp->temp_crit.attr.attr.name = temp->temp_crit.name; |
1967 | - temp->temp_crit.attr.attr.mode = 0444; |
1968 | - temp->temp_crit.attr.show = temp_crit_show; |
1969 | - sysfs_attr_init(&temp->temp_crit.attr.attr); |
1970 | - result = device_create_file(hwmon->device, |
1971 | - &temp->temp_crit.attr); |
1972 | - if (result) |
1973 | - goto unregister_input; |
1974 | - } |
1975 | + temp->temp_crit.attr.attr.name = temp->temp_crit.name; |
1976 | + temp->temp_crit.attr.attr.mode = 0444; |
1977 | + temp->temp_crit.attr.show = temp_crit_show; |
1978 | + sysfs_attr_init(&temp->temp_crit.attr.attr); |
1979 | + result = device_create_file(hwmon->device, |
1980 | + &temp->temp_crit.attr); |
1981 | + if (result) |
1982 | + goto unregister_input; |
1983 | } |
1984 | |
1985 | mutex_lock(&thermal_hwmon_list_lock); |
1986 | @@ -250,7 +253,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) |
1987 | } |
1988 | |
1989 | device_remove_file(hwmon->device, &temp->temp_input.attr); |
1990 | - if (tz->ops->get_crit_temp) |
1991 | + if (thermal_zone_crit_temp_valid(tz)) |
1992 | device_remove_file(hwmon->device, &temp->temp_crit.attr); |
1993 | |
1994 | mutex_lock(&thermal_hwmon_list_lock); |
1995 | diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c |
1996 | index 3b6c1a2e25de..be1c842ec747 100644 |
1997 | --- a/drivers/tty/serial/imx.c |
1998 | +++ b/drivers/tty/serial/imx.c |
1999 | @@ -563,6 +563,9 @@ static void imx_start_tx(struct uart_port *port) |
2000 | struct imx_port *sport = (struct imx_port *)port; |
2001 | unsigned long temp; |
2002 | |
2003 | + if (uart_circ_empty(&port->state->xmit)) |
2004 | + return; |
2005 | + |
2006 | if (USE_IRDA(sport)) { |
2007 | /* half duplex in IrDA mode; have to disable receive mode */ |
2008 | temp = readl(sport->port.membase + UCR4); |
2009 | diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c |
2010 | index 1efd4c36ba0c..99b7b8697861 100644 |
2011 | --- a/drivers/tty/serial/ip22zilog.c |
2012 | +++ b/drivers/tty/serial/ip22zilog.c |
2013 | @@ -603,6 +603,8 @@ static void ip22zilog_start_tx(struct uart_port *port) |
2014 | } else { |
2015 | struct circ_buf *xmit = &port->state->xmit; |
2016 | |
2017 | + if (uart_circ_empty(xmit)) |
2018 | + return; |
2019 | writeb(xmit->buf[xmit->tail], &channel->data); |
2020 | ZSDELAY(); |
2021 | ZS_WSYNC(channel); |
2022 | diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c |
2023 | index 68f2c53e0b54..5702828fb62e 100644 |
2024 | --- a/drivers/tty/serial/m32r_sio.c |
2025 | +++ b/drivers/tty/serial/m32r_sio.c |
2026 | @@ -266,9 +266,11 @@ static void m32r_sio_start_tx(struct uart_port *port) |
2027 | if (!(up->ier & UART_IER_THRI)) { |
2028 | up->ier |= UART_IER_THRI; |
2029 | serial_out(up, UART_IER, up->ier); |
2030 | - serial_out(up, UART_TX, xmit->buf[xmit->tail]); |
2031 | - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
2032 | - up->port.icount.tx++; |
2033 | + if (!uart_circ_empty(xmit)) { |
2034 | + serial_out(up, UART_TX, xmit->buf[xmit->tail]); |
2035 | + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
2036 | + up->port.icount.tx++; |
2037 | + } |
2038 | } |
2039 | while((serial_in(up, UART_LSR) & UART_EMPTY) != UART_EMPTY); |
2040 | #else |
2041 | diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c |
2042 | index 8193635103ee..f7ad5b903055 100644 |
2043 | --- a/drivers/tty/serial/pmac_zilog.c |
2044 | +++ b/drivers/tty/serial/pmac_zilog.c |
2045 | @@ -653,6 +653,8 @@ static void pmz_start_tx(struct uart_port *port) |
2046 | } else { |
2047 | struct circ_buf *xmit = &port->state->xmit; |
2048 | |
2049 | + if (uart_circ_empty(xmit)) |
2050 | + goto out; |
2051 | write_zsdata(uap, xmit->buf[xmit->tail]); |
2052 | zssync(uap); |
2053 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
2054 | @@ -661,6 +663,7 @@ static void pmz_start_tx(struct uart_port *port) |
2055 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
2056 | uart_write_wakeup(&uap->port); |
2057 | } |
2058 | + out: |
2059 | pmz_debug("pmz: start_tx() done.\n"); |
2060 | } |
2061 | |
2062 | diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c |
2063 | index 80a58eca785b..2f57df9a71d9 100644 |
2064 | --- a/drivers/tty/serial/sunsab.c |
2065 | +++ b/drivers/tty/serial/sunsab.c |
2066 | @@ -427,6 +427,9 @@ static void sunsab_start_tx(struct uart_port *port) |
2067 | struct circ_buf *xmit = &up->port.state->xmit; |
2068 | int i; |
2069 | |
2070 | + if (uart_circ_empty(xmit)) |
2071 | + return; |
2072 | + |
2073 | up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR); |
2074 | writeb(up->interrupt_mask1, &up->regs->w.imr1); |
2075 | |
2076 | diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c |
2077 | index a85db8b87156..02df3940b95e 100644 |
2078 | --- a/drivers/tty/serial/sunzilog.c |
2079 | +++ b/drivers/tty/serial/sunzilog.c |
2080 | @@ -703,6 +703,8 @@ static void sunzilog_start_tx(struct uart_port *port) |
2081 | } else { |
2082 | struct circ_buf *xmit = &port->state->xmit; |
2083 | |
2084 | + if (uart_circ_empty(xmit)) |
2085 | + return; |
2086 | writeb(xmit->buf[xmit->tail], &channel->data); |
2087 | ZSDELAY(); |
2088 | ZS_WSYNC(channel); |
2089 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
2090 | index 762e4a5f5ae9..330df5ce435b 100644 |
2091 | --- a/drivers/usb/serial/cp210x.c |
2092 | +++ b/drivers/usb/serial/cp210x.c |
2093 | @@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = { |
2094 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ |
2095 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ |
2096 | { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ |
2097 | + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ |
2098 | { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ |
2099 | { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ |
2100 | { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ |
2101 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
2102 | index 115662c16dcc..8a3813be1b28 100644 |
2103 | --- a/drivers/usb/serial/ftdi_sio.c |
2104 | +++ b/drivers/usb/serial/ftdi_sio.c |
2105 | @@ -720,7 +720,8 @@ static const struct usb_device_id id_table_combined[] = { |
2106 | { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, |
2107 | { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, |
2108 | { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, |
2109 | - { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) }, |
2110 | + { USB_DEVICE(TESTO_VID, TESTO_1_PID) }, |
2111 | + { USB_DEVICE(TESTO_VID, TESTO_3_PID) }, |
2112 | { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) }, |
2113 | { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) }, |
2114 | { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, |
2115 | @@ -944,6 +945,8 @@ static const struct usb_device_id id_table_combined[] = { |
2116 | { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, |
2117 | { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, |
2118 | { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, |
2119 | + /* Infineon Devices */ |
2120 | + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, |
2121 | { } /* Terminating entry */ |
2122 | }; |
2123 | |
2124 | diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h |
2125 | index 500474c48f4b..c4777bc6aee0 100644 |
2126 | --- a/drivers/usb/serial/ftdi_sio_ids.h |
2127 | +++ b/drivers/usb/serial/ftdi_sio_ids.h |
2128 | @@ -584,6 +584,12 @@ |
2129 | #define RATOC_PRODUCT_ID_USB60F 0xb020 |
2130 | |
2131 | /* |
2132 | + * Infineon Technologies |
2133 | + */ |
2134 | +#define INFINEON_VID 0x058b |
2135 | +#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ |
2136 | + |
2137 | +/* |
2138 | * Acton Research Corp. |
2139 | */ |
2140 | #define ACTON_VID 0x0647 /* Vendor ID */ |
2141 | @@ -798,7 +804,8 @@ |
2142 | * Submitted by Colin Leroy |
2143 | */ |
2144 | #define TESTO_VID 0x128D |
2145 | -#define TESTO_USB_INTERFACE_PID 0x0001 |
2146 | +#define TESTO_1_PID 0x0001 |
2147 | +#define TESTO_3_PID 0x0003 |
2148 | |
2149 | /* |
2150 | * Mobility Electronics products. |
2151 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
2152 | index e25e8ca09fe2..9da566a3f5c8 100644 |
2153 | --- a/drivers/usb/serial/option.c |
2154 | +++ b/drivers/usb/serial/option.c |
2155 | @@ -1487,6 +1487,8 @@ static const struct usb_device_id option_ids[] = { |
2156 | .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, |
2157 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ |
2158 | .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, |
2159 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ |
2160 | + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, |
2161 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, |
2162 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, |
2163 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, |
2164 | diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c |
2165 | index 0ebc21204b51..c7d7d5fc3ac3 100644 |
2166 | --- a/fs/ext4/extents_status.c |
2167 | +++ b/fs/ext4/extents_status.c |
2168 | @@ -960,10 +960,10 @@ retry: |
2169 | continue; |
2170 | } |
2171 | |
2172 | - if (ei->i_es_lru_nr == 0 || ei == locked_ei) |
2173 | + if (ei->i_es_lru_nr == 0 || ei == locked_ei || |
2174 | + !write_trylock(&ei->i_es_lock)) |
2175 | continue; |
2176 | |
2177 | - write_lock(&ei->i_es_lock); |
2178 | shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan); |
2179 | if (ei->i_es_lru_nr == 0) |
2180 | list_del_init(&ei->i_es_lru); |
2181 | diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c |
2182 | index 0ee59a6644e2..64bb32f17903 100644 |
2183 | --- a/fs/ext4/ialloc.c |
2184 | +++ b/fs/ext4/ialloc.c |
2185 | @@ -851,6 +851,13 @@ got: |
2186 | goto out; |
2187 | } |
2188 | |
2189 | + BUFFER_TRACE(group_desc_bh, "get_write_access"); |
2190 | + err = ext4_journal_get_write_access(handle, group_desc_bh); |
2191 | + if (err) { |
2192 | + ext4_std_error(sb, err); |
2193 | + goto out; |
2194 | + } |
2195 | + |
2196 | /* We may have to initialize the block bitmap if it isn't already */ |
2197 | if (ext4_has_group_desc_csum(sb) && |
2198 | gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
2199 | @@ -887,13 +894,6 @@ got: |
2200 | } |
2201 | } |
2202 | |
2203 | - BUFFER_TRACE(group_desc_bh, "get_write_access"); |
2204 | - err = ext4_journal_get_write_access(handle, group_desc_bh); |
2205 | - if (err) { |
2206 | - ext4_std_error(sb, err); |
2207 | - goto out; |
2208 | - } |
2209 | - |
2210 | /* Update the relevant bg descriptor fields */ |
2211 | if (ext4_has_group_desc_csum(sb)) { |
2212 | int free; |
2213 | diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c |
2214 | index fe4e668d3023..2735a72d1ec4 100644 |
2215 | --- a/fs/ext4/mballoc.c |
2216 | +++ b/fs/ext4/mballoc.c |
2217 | @@ -751,8 +751,8 @@ void ext4_mb_generate_buddy(struct super_block *sb, |
2218 | |
2219 | if (free != grp->bb_free) { |
2220 | ext4_grp_locked_error(sb, group, 0, 0, |
2221 | - "%u clusters in bitmap, %u in gd; " |
2222 | - "block bitmap corrupt.", |
2223 | + "block bitmap and bg descriptor " |
2224 | + "inconsistent: %u vs %u free clusters", |
2225 | free, grp->bb_free); |
2226 | /* |
2227 | * If we intend to continue, we consider group descriptor |
2228 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
2229 | index 6f9e6fadac04..29a403c0c003 100644 |
2230 | --- a/fs/ext4/super.c |
2231 | +++ b/fs/ext4/super.c |
2232 | @@ -1524,8 +1524,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, |
2233 | arg = JBD2_DEFAULT_MAX_COMMIT_AGE; |
2234 | sbi->s_commit_interval = HZ * arg; |
2235 | } else if (token == Opt_max_batch_time) { |
2236 | - if (arg == 0) |
2237 | - arg = EXT4_DEF_MAX_BATCH_TIME; |
2238 | sbi->s_max_batch_time = arg; |
2239 | } else if (token == Opt_min_batch_time) { |
2240 | sbi->s_min_batch_time = arg; |
2241 | @@ -2798,10 +2796,11 @@ static void print_daily_error_info(unsigned long arg) |
2242 | es = sbi->s_es; |
2243 | |
2244 | if (es->s_error_count) |
2245 | - ext4_msg(sb, KERN_NOTICE, "error count: %u", |
2246 | + /* fsck newer than v1.41.13 is needed to clean this condition. */ |
2247 | + ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", |
2248 | le32_to_cpu(es->s_error_count)); |
2249 | if (es->s_first_error_time) { |
2250 | - printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d", |
2251 | + printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d", |
2252 | sb->s_id, le32_to_cpu(es->s_first_error_time), |
2253 | (int) sizeof(es->s_first_error_func), |
2254 | es->s_first_error_func, |
2255 | @@ -2815,7 +2814,7 @@ static void print_daily_error_info(unsigned long arg) |
2256 | printk("\n"); |
2257 | } |
2258 | if (es->s_last_error_time) { |
2259 | - printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d", |
2260 | + printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d", |
2261 | sb->s_id, le32_to_cpu(es->s_last_error_time), |
2262 | (int) sizeof(es->s_last_error_func), |
2263 | es->s_last_error_func, |
2264 | @@ -3869,38 +3868,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
2265 | goto failed_mount2; |
2266 | } |
2267 | } |
2268 | - |
2269 | - /* |
2270 | - * set up enough so that it can read an inode, |
2271 | - * and create new inode for buddy allocator |
2272 | - */ |
2273 | - sbi->s_gdb_count = db_count; |
2274 | - if (!test_opt(sb, NOLOAD) && |
2275 | - EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) |
2276 | - sb->s_op = &ext4_sops; |
2277 | - else |
2278 | - sb->s_op = &ext4_nojournal_sops; |
2279 | - |
2280 | - ext4_ext_init(sb); |
2281 | - err = ext4_mb_init(sb); |
2282 | - if (err) { |
2283 | - ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", |
2284 | - err); |
2285 | - goto failed_mount2; |
2286 | - } |
2287 | - |
2288 | if (!ext4_check_descriptors(sb, &first_not_zeroed)) { |
2289 | ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); |
2290 | - goto failed_mount2a; |
2291 | + goto failed_mount2; |
2292 | } |
2293 | if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) |
2294 | if (!ext4_fill_flex_info(sb)) { |
2295 | ext4_msg(sb, KERN_ERR, |
2296 | "unable to initialize " |
2297 | "flex_bg meta info!"); |
2298 | - goto failed_mount2a; |
2299 | + goto failed_mount2; |
2300 | } |
2301 | |
2302 | + sbi->s_gdb_count = db_count; |
2303 | get_random_bytes(&sbi->s_next_generation, sizeof(u32)); |
2304 | spin_lock_init(&sbi->s_next_gen_lock); |
2305 | |
2306 | @@ -3935,6 +3915,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
2307 | sbi->s_stripe = ext4_get_stripe_size(sbi); |
2308 | sbi->s_extent_max_zeroout_kb = 32; |
2309 | |
2310 | + /* |
2311 | + * set up enough so that it can read an inode |
2312 | + */ |
2313 | + if (!test_opt(sb, NOLOAD) && |
2314 | + EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) |
2315 | + sb->s_op = &ext4_sops; |
2316 | + else |
2317 | + sb->s_op = &ext4_nojournal_sops; |
2318 | sb->s_export_op = &ext4_export_ops; |
2319 | sb->s_xattr = ext4_xattr_handlers; |
2320 | #ifdef CONFIG_QUOTA |
2321 | @@ -4124,13 +4112,21 @@ no_journal: |
2322 | if (err) { |
2323 | ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " |
2324 | "reserved pool", ext4_calculate_resv_clusters(sb)); |
2325 | - goto failed_mount5; |
2326 | + goto failed_mount4a; |
2327 | } |
2328 | |
2329 | err = ext4_setup_system_zone(sb); |
2330 | if (err) { |
2331 | ext4_msg(sb, KERN_ERR, "failed to initialize system " |
2332 | "zone (%d)", err); |
2333 | + goto failed_mount4a; |
2334 | + } |
2335 | + |
2336 | + ext4_ext_init(sb); |
2337 | + err = ext4_mb_init(sb); |
2338 | + if (err) { |
2339 | + ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", |
2340 | + err); |
2341 | goto failed_mount5; |
2342 | } |
2343 | |
2344 | @@ -4207,8 +4203,11 @@ failed_mount8: |
2345 | failed_mount7: |
2346 | ext4_unregister_li_request(sb); |
2347 | failed_mount6: |
2348 | - ext4_release_system_zone(sb); |
2349 | + ext4_mb_release(sb); |
2350 | failed_mount5: |
2351 | + ext4_ext_release(sb); |
2352 | + ext4_release_system_zone(sb); |
2353 | +failed_mount4a: |
2354 | dput(sb->s_root); |
2355 | sb->s_root = NULL; |
2356 | failed_mount4: |
2357 | @@ -4232,14 +4231,11 @@ failed_mount3: |
2358 | percpu_counter_destroy(&sbi->s_extent_cache_cnt); |
2359 | if (sbi->s_mmp_tsk) |
2360 | kthread_stop(sbi->s_mmp_tsk); |
2361 | -failed_mount2a: |
2362 | - ext4_mb_release(sb); |
2363 | failed_mount2: |
2364 | for (i = 0; i < db_count; i++) |
2365 | brelse(sbi->s_group_desc[i]); |
2366 | ext4_kvfree(sbi->s_group_desc); |
2367 | failed_mount: |
2368 | - ext4_ext_release(sb); |
2369 | if (sbi->s_chksum_driver) |
2370 | crypto_free_shash(sbi->s_chksum_driver); |
2371 | if (sbi->s_proc) { |
2372 | diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c |
2373 | index bc077f3c8868..a63e20fee78a 100644 |
2374 | --- a/fs/f2fs/data.c |
2375 | +++ b/fs/f2fs/data.c |
2376 | @@ -869,7 +869,8 @@ static int f2fs_write_data_pages(struct address_space *mapping, |
2377 | return 0; |
2378 | |
2379 | if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && |
2380 | - get_dirty_dents(inode) < nr_pages_to_skip(sbi, DATA)) |
2381 | + get_dirty_dents(inode) < nr_pages_to_skip(sbi, DATA) && |
2382 | + available_free_memory(sbi, DIRTY_DENTS)) |
2383 | goto skip_write; |
2384 | |
2385 | diff = nr_pages_to_write(sbi, DATA, wbc); |
2386 | diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h |
2387 | index 2ecac8312359..656523cb4513 100644 |
2388 | --- a/fs/f2fs/f2fs.h |
2389 | +++ b/fs/f2fs/f2fs.h |
2390 | @@ -1140,6 +1140,7 @@ f2fs_hash_t f2fs_dentry_hash(const char *, size_t); |
2391 | struct dnode_of_data; |
2392 | struct node_info; |
2393 | |
2394 | +bool available_free_memory(struct f2fs_sb_info *, int); |
2395 | int is_checkpointed_node(struct f2fs_sb_info *, nid_t); |
2396 | bool fsync_mark_done(struct f2fs_sb_info *, nid_t); |
2397 | void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *); |
2398 | diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c |
2399 | index a161e955c4c8..5696bde95702 100644 |
2400 | --- a/fs/f2fs/node.c |
2401 | +++ b/fs/f2fs/node.c |
2402 | @@ -26,20 +26,28 @@ |
2403 | static struct kmem_cache *nat_entry_slab; |
2404 | static struct kmem_cache *free_nid_slab; |
2405 | |
2406 | -static inline bool available_free_memory(struct f2fs_nm_info *nm_i, int type) |
2407 | +bool available_free_memory(struct f2fs_sb_info *sbi, int type) |
2408 | { |
2409 | + struct f2fs_nm_info *nm_i = NM_I(sbi); |
2410 | struct sysinfo val; |
2411 | unsigned long mem_size = 0; |
2412 | + bool res = false; |
2413 | |
2414 | si_meminfo(&val); |
2415 | - if (type == FREE_NIDS) |
2416 | - mem_size = nm_i->fcnt * sizeof(struct free_nid); |
2417 | - else if (type == NAT_ENTRIES) |
2418 | - mem_size += nm_i->nat_cnt * sizeof(struct nat_entry); |
2419 | - mem_size >>= 12; |
2420 | - |
2421 | - /* give 50:50 memory for free nids and nat caches respectively */ |
2422 | - return (mem_size < ((val.totalram * nm_i->ram_thresh) >> 11)); |
2423 | + /* give 25%, 25%, 50% memory for each components respectively */ |
2424 | + if (type == FREE_NIDS) { |
2425 | + mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 12; |
2426 | + res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2); |
2427 | + } else if (type == NAT_ENTRIES) { |
2428 | + mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12; |
2429 | + res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2); |
2430 | + } else if (type == DIRTY_DENTS) { |
2431 | + if (sbi->sb->s_bdi->dirty_exceeded) |
2432 | + return false; |
2433 | + mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); |
2434 | + res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1); |
2435 | + } |
2436 | + return res; |
2437 | } |
2438 | |
2439 | static void clear_node_page_dirty(struct page *page) |
2440 | @@ -243,7 +251,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) |
2441 | { |
2442 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2443 | |
2444 | - if (available_free_memory(nm_i, NAT_ENTRIES)) |
2445 | + if (available_free_memory(sbi, NAT_ENTRIES)) |
2446 | return 0; |
2447 | |
2448 | write_lock(&nm_i->nat_tree_lock); |
2449 | @@ -1315,13 +1323,14 @@ static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i, |
2450 | radix_tree_delete(&nm_i->free_nid_root, i->nid); |
2451 | } |
2452 | |
2453 | -static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build) |
2454 | +static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) |
2455 | { |
2456 | + struct f2fs_nm_info *nm_i = NM_I(sbi); |
2457 | struct free_nid *i; |
2458 | struct nat_entry *ne; |
2459 | bool allocated = false; |
2460 | |
2461 | - if (!available_free_memory(nm_i, FREE_NIDS)) |
2462 | + if (!available_free_memory(sbi, FREE_NIDS)) |
2463 | return -1; |
2464 | |
2465 | /* 0 nid should not be used */ |
2466 | @@ -1374,9 +1383,10 @@ static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) |
2467 | kmem_cache_free(free_nid_slab, i); |
2468 | } |
2469 | |
2470 | -static void scan_nat_page(struct f2fs_nm_info *nm_i, |
2471 | +static void scan_nat_page(struct f2fs_sb_info *sbi, |
2472 | struct page *nat_page, nid_t start_nid) |
2473 | { |
2474 | + struct f2fs_nm_info *nm_i = NM_I(sbi); |
2475 | struct f2fs_nat_block *nat_blk = page_address(nat_page); |
2476 | block_t blk_addr; |
2477 | int i; |
2478 | @@ -1391,7 +1401,7 @@ static void scan_nat_page(struct f2fs_nm_info *nm_i, |
2479 | blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); |
2480 | f2fs_bug_on(blk_addr == NEW_ADDR); |
2481 | if (blk_addr == NULL_ADDR) { |
2482 | - if (add_free_nid(nm_i, start_nid, true) < 0) |
2483 | + if (add_free_nid(sbi, start_nid, true) < 0) |
2484 | break; |
2485 | } |
2486 | } |
2487 | @@ -1415,7 +1425,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi) |
2488 | while (1) { |
2489 | struct page *page = get_current_nat_page(sbi, nid); |
2490 | |
2491 | - scan_nat_page(nm_i, page, nid); |
2492 | + scan_nat_page(sbi, page, nid); |
2493 | f2fs_put_page(page, 1); |
2494 | |
2495 | nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); |
2496 | @@ -1435,7 +1445,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi) |
2497 | block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); |
2498 | nid = le32_to_cpu(nid_in_journal(sum, i)); |
2499 | if (addr == NULL_ADDR) |
2500 | - add_free_nid(nm_i, nid, true); |
2501 | + add_free_nid(sbi, nid, true); |
2502 | else |
2503 | remove_free_nid(nm_i, nid); |
2504 | } |
2505 | @@ -1512,7 +1522,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) |
2506 | spin_lock(&nm_i->free_nid_list_lock); |
2507 | i = __lookup_free_nid_list(nm_i, nid); |
2508 | f2fs_bug_on(!i || i->state != NID_ALLOC); |
2509 | - if (!available_free_memory(nm_i, FREE_NIDS)) { |
2510 | + if (!available_free_memory(sbi, FREE_NIDS)) { |
2511 | __del_from_free_nid_list(nm_i, i); |
2512 | need_free = true; |
2513 | } else { |
2514 | @@ -1843,7 +1853,7 @@ flush_now: |
2515 | } |
2516 | |
2517 | if (nat_get_blkaddr(ne) == NULL_ADDR && |
2518 | - add_free_nid(NM_I(sbi), nid, false) <= 0) { |
2519 | + add_free_nid(sbi, nid, false) <= 0) { |
2520 | write_lock(&nm_i->nat_tree_lock); |
2521 | __del_from_nat_cache(nm_i, ne); |
2522 | write_unlock(&nm_i->nat_tree_lock); |
2523 | diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h |
2524 | index 5decc1a375f0..b5170776b8dc 100644 |
2525 | --- a/fs/f2fs/node.h |
2526 | +++ b/fs/f2fs/node.h |
2527 | @@ -75,9 +75,10 @@ static inline void node_info_from_raw_nat(struct node_info *ni, |
2528 | ni->version = raw_ne->version; |
2529 | } |
2530 | |
2531 | -enum nid_type { |
2532 | +enum mem_type { |
2533 | FREE_NIDS, /* indicates the free nid list */ |
2534 | - NAT_ENTRIES /* indicates the cached nat entry */ |
2535 | + NAT_ENTRIES, /* indicates the cached nat entry */ |
2536 | + DIRTY_DENTS /* indicates dirty dentry pages */ |
2537 | }; |
2538 | |
2539 | /* |
2540 | diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c |
2541 | index 38cfcf5f6fce..6f0f590cc5a3 100644 |
2542 | --- a/fs/jbd2/transaction.c |
2543 | +++ b/fs/jbd2/transaction.c |
2544 | @@ -1588,9 +1588,12 @@ int jbd2_journal_stop(handle_t *handle) |
2545 | * to perform a synchronous write. We do this to detect the |
2546 | * case where a single process is doing a stream of sync |
2547 | * writes. No point in waiting for joiners in that case. |
2548 | + * |
2549 | + * Setting max_batch_time to 0 disables this completely. |
2550 | */ |
2551 | pid = current->pid; |
2552 | - if (handle->h_sync && journal->j_last_sync_writer != pid) { |
2553 | + if (handle->h_sync && journal->j_last_sync_writer != pid && |
2554 | + journal->j_max_batch_time) { |
2555 | u64 commit_time, trans_time; |
2556 | |
2557 | journal->j_last_sync_writer = pid; |
2558 | diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c |
2559 | index ac127cd008bf..a693f5b01ae6 100644 |
2560 | --- a/fs/kernfs/dir.c |
2561 | +++ b/fs/kernfs/dir.c |
2562 | @@ -714,6 +714,7 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, |
2563 | return ERR_PTR(-ENOMEM); |
2564 | |
2565 | ida_init(&root->ino_ida); |
2566 | + INIT_LIST_HEAD(&root->supers); |
2567 | |
2568 | kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO, |
2569 | KERNFS_DIR); |
2570 | diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h |
2571 | index 8be13b2a079b..dc84a3ef9ca2 100644 |
2572 | --- a/fs/kernfs/kernfs-internal.h |
2573 | +++ b/fs/kernfs/kernfs-internal.h |
2574 | @@ -49,6 +49,8 @@ static inline struct kernfs_root *kernfs_root(struct kernfs_node *kn) |
2575 | * mount.c |
2576 | */ |
2577 | struct kernfs_super_info { |
2578 | + struct super_block *sb; |
2579 | + |
2580 | /* |
2581 | * The root associated with this super_block. Each super_block is |
2582 | * identified by the root and ns it's associated with. |
2583 | @@ -62,6 +64,9 @@ struct kernfs_super_info { |
2584 | * an array and compare kernfs_node tag against every entry. |
2585 | */ |
2586 | const void *ns; |
2587 | + |
2588 | + /* anchored at kernfs_root->supers, protected by kernfs_mutex */ |
2589 | + struct list_head node; |
2590 | }; |
2591 | #define kernfs_info(SB) ((struct kernfs_super_info *)(SB->s_fs_info)) |
2592 | |
2593 | diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c |
2594 | index 95dcd1d558bb..f973ae9b05f1 100644 |
2595 | --- a/fs/kernfs/mount.c |
2596 | +++ b/fs/kernfs/mount.c |
2597 | @@ -68,6 +68,7 @@ static int kernfs_fill_super(struct super_block *sb, unsigned long magic) |
2598 | struct inode *inode; |
2599 | struct dentry *root; |
2600 | |
2601 | + info->sb = sb; |
2602 | sb->s_blocksize = PAGE_CACHE_SIZE; |
2603 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
2604 | sb->s_magic = magic; |
2605 | @@ -167,12 +168,18 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, |
2606 | *new_sb_created = !sb->s_root; |
2607 | |
2608 | if (!sb->s_root) { |
2609 | + struct kernfs_super_info *info = kernfs_info(sb); |
2610 | + |
2611 | error = kernfs_fill_super(sb, magic); |
2612 | if (error) { |
2613 | deactivate_locked_super(sb); |
2614 | return ERR_PTR(error); |
2615 | } |
2616 | sb->s_flags |= MS_ACTIVE; |
2617 | + |
2618 | + mutex_lock(&kernfs_mutex); |
2619 | + list_add(&info->node, &root->supers); |
2620 | + mutex_unlock(&kernfs_mutex); |
2621 | } |
2622 | |
2623 | return dget(sb->s_root); |
2624 | @@ -191,6 +198,10 @@ void kernfs_kill_sb(struct super_block *sb) |
2625 | struct kernfs_super_info *info = kernfs_info(sb); |
2626 | struct kernfs_node *root_kn = sb->s_root->d_fsdata; |
2627 | |
2628 | + mutex_lock(&kernfs_mutex); |
2629 | + list_del(&info->node); |
2630 | + mutex_unlock(&kernfs_mutex); |
2631 | + |
2632 | /* |
2633 | * Remove the superblock from fs_supers/s_instances |
2634 | * so we can't find it, before freeing kernfs_super_info. |
2635 | @@ -200,6 +211,36 @@ void kernfs_kill_sb(struct super_block *sb) |
2636 | kernfs_put(root_kn); |
2637 | } |
2638 | |
2639 | +/** |
2640 | + * kernfs_pin_sb: try to pin the superblock associated with a kernfs_root |
2641 | + * @kernfs_root: the kernfs_root in question |
2642 | + * @ns: the namespace tag |
2643 | + * |
2644 | + * Pin the superblock so the superblock won't be destroyed in subsequent |
2645 | + * operations. This can be used to block ->kill_sb() which may be useful |
2646 | + * for kernfs users which dynamically manage superblocks. |
2647 | + * |
2648 | + * Returns NULL if there's no superblock associated to this kernfs_root, or |
2649 | + * -EINVAL if the superblock is being freed. |
2650 | + */ |
2651 | +struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns) |
2652 | +{ |
2653 | + struct kernfs_super_info *info; |
2654 | + struct super_block *sb = NULL; |
2655 | + |
2656 | + mutex_lock(&kernfs_mutex); |
2657 | + list_for_each_entry(info, &root->supers, node) { |
2658 | + if (info->ns == ns) { |
2659 | + sb = info->sb; |
2660 | + if (!atomic_inc_not_zero(&info->sb->s_active)) |
2661 | + sb = ERR_PTR(-EINVAL); |
2662 | + break; |
2663 | + } |
2664 | + } |
2665 | + mutex_unlock(&kernfs_mutex); |
2666 | + return sb; |
2667 | +} |
2668 | + |
2669 | void __init kernfs_init(void) |
2670 | { |
2671 | kernfs_node_cache = kmem_cache_create("kernfs_node_cache", |
2672 | diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h |
2673 | index 52bf5677db0b..20f493564917 100644 |
2674 | --- a/include/linux/kernfs.h |
2675 | +++ b/include/linux/kernfs.h |
2676 | @@ -161,6 +161,10 @@ struct kernfs_root { |
2677 | /* private fields, do not use outside kernfs proper */ |
2678 | struct ida ino_ida; |
2679 | struct kernfs_syscall_ops *syscall_ops; |
2680 | + |
2681 | + /* list of kernfs_super_info of this root, protected by kernfs_mutex */ |
2682 | + struct list_head supers; |
2683 | + |
2684 | wait_queue_head_t deactivate_waitq; |
2685 | }; |
2686 | |
2687 | @@ -300,6 +304,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, |
2688 | struct kernfs_root *root, unsigned long magic, |
2689 | bool *new_sb_created, const void *ns); |
2690 | void kernfs_kill_sb(struct super_block *sb); |
2691 | +struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns); |
2692 | |
2693 | void kernfs_init(void); |
2694 | |
2695 | diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h |
2696 | index d69cf637a15a..49a4d6f59108 100644 |
2697 | --- a/include/linux/ring_buffer.h |
2698 | +++ b/include/linux/ring_buffer.h |
2699 | @@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k |
2700 | __ring_buffer_alloc((size), (flags), &__key); \ |
2701 | }) |
2702 | |
2703 | -void ring_buffer_wait(struct ring_buffer *buffer, int cpu); |
2704 | +int ring_buffer_wait(struct ring_buffer *buffer, int cpu); |
2705 | int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, |
2706 | struct file *filp, poll_table *poll_table); |
2707 | |
2708 | diff --git a/kernel/cgroup.c b/kernel/cgroup.c |
2709 | index ceee0c54c6a4..073226b3dfb0 100644 |
2710 | --- a/kernel/cgroup.c |
2711 | +++ b/kernel/cgroup.c |
2712 | @@ -1484,10 +1484,13 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, |
2713 | int flags, const char *unused_dev_name, |
2714 | void *data) |
2715 | { |
2716 | + struct super_block *pinned_sb = NULL; |
2717 | + struct cgroup_subsys *ss; |
2718 | struct cgroup_root *root; |
2719 | struct cgroup_sb_opts opts; |
2720 | struct dentry *dentry; |
2721 | int ret; |
2722 | + int i; |
2723 | bool new_sb; |
2724 | |
2725 | /* |
2726 | @@ -1514,6 +1517,29 @@ retry: |
2727 | goto out_unlock; |
2728 | } |
2729 | |
2730 | + /* |
2731 | + * Destruction of cgroup root is asynchronous, so subsystems may |
2732 | + * still be dying after the previous unmount. Let's drain the |
2733 | + * dying subsystems. We just need to ensure that the ones |
2734 | + * unmounted previously finish dying and don't care about new ones |
2735 | + * starting. Testing ref liveliness is good enough. |
2736 | + */ |
2737 | + for_each_subsys(ss, i) { |
2738 | + if (!(opts.subsys_mask & (1 << i)) || |
2739 | + ss->root == &cgrp_dfl_root) |
2740 | + continue; |
2741 | + |
2742 | + if (!atomic_inc_not_zero(&ss->root->cgrp.refcnt)) { |
2743 | + mutex_unlock(&cgroup_mutex); |
2744 | + mutex_unlock(&cgroup_tree_mutex); |
2745 | + msleep(10); |
2746 | + mutex_lock(&cgroup_tree_mutex); |
2747 | + mutex_lock(&cgroup_mutex); |
2748 | + goto retry; |
2749 | + } |
2750 | + cgroup_put(&ss->root->cgrp); |
2751 | + } |
2752 | + |
2753 | for_each_root(root) { |
2754 | bool name_match = false; |
2755 | |
2756 | @@ -1559,10 +1585,25 @@ retry: |
2757 | * destruction to complete so that the subsystems are free. |
2758 | * We can use wait_queue for the wait but this path is |
2759 | * super cold. Let's just sleep for a bit and retry. |
2760 | + |
2761 | + * We want to reuse @root whose lifetime is governed by its |
2762 | + * ->cgrp. Let's check whether @root is alive and keep it |
2763 | + * that way. As cgroup_kill_sb() can happen anytime, we |
2764 | + * want to block it by pinning the sb so that @root doesn't |
2765 | + * get killed before mount is complete. |
2766 | + * |
2767 | + * With the sb pinned, inc_not_zero can reliably indicate |
2768 | + * whether @root can be reused. If it's being killed, |
2769 | + * drain it. We can use wait_queue for the wait but this |
2770 | + * path is super cold. Let's just sleep a bit and retry. |
2771 | */ |
2772 | - if (!atomic_inc_not_zero(&root->cgrp.refcnt)) { |
2773 | + pinned_sb = kernfs_pin_sb(root->kf_root, NULL); |
2774 | + if (IS_ERR(pinned_sb) || |
2775 | + !atomic_inc_not_zero(&root->cgrp.refcnt)) { |
2776 | mutex_unlock(&cgroup_mutex); |
2777 | mutex_unlock(&cgroup_tree_mutex); |
2778 | + if (!IS_ERR_OR_NULL(pinned_sb)) |
2779 | + deactivate_super(pinned_sb); |
2780 | msleep(10); |
2781 | mutex_lock(&cgroup_tree_mutex); |
2782 | mutex_lock(&cgroup_mutex); |
2783 | @@ -1609,6 +1650,16 @@ out_unlock: |
2784 | CGROUP_SUPER_MAGIC, &new_sb); |
2785 | if (IS_ERR(dentry) || !new_sb) |
2786 | cgroup_put(&root->cgrp); |
2787 | + |
2788 | + /* |
2789 | + * If @pinned_sb, we're reusing an existing root and holding an |
2790 | + * extra ref on its sb. Mount is complete. Put the extra ref. |
2791 | + */ |
2792 | + if (pinned_sb) { |
2793 | + WARN_ON(new_sb); |
2794 | + deactivate_super(pinned_sb); |
2795 | + } |
2796 | + |
2797 | return dentry; |
2798 | } |
2799 | |
2800 | diff --git a/kernel/cpuset.c b/kernel/cpuset.c |
2801 | index 3d54c418bd06..a735402837ac 100644 |
2802 | --- a/kernel/cpuset.c |
2803 | +++ b/kernel/cpuset.c |
2804 | @@ -1188,7 +1188,13 @@ done: |
2805 | |
2806 | int current_cpuset_is_being_rebound(void) |
2807 | { |
2808 | - return task_cs(current) == cpuset_being_rebound; |
2809 | + int ret; |
2810 | + |
2811 | + rcu_read_lock(); |
2812 | + ret = task_cs(current) == cpuset_being_rebound; |
2813 | + rcu_read_unlock(); |
2814 | + |
2815 | + return ret; |
2816 | } |
2817 | |
2818 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
2819 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c |
2820 | index c634868c2921..7c56c3d06943 100644 |
2821 | --- a/kernel/trace/ring_buffer.c |
2822 | +++ b/kernel/trace/ring_buffer.c |
2823 | @@ -543,7 +543,7 @@ static void rb_wake_up_waiters(struct irq_work *work) |
2824 | * as data is added to any of the @buffer's cpu buffers. Otherwise |
2825 | * it will wait for data to be added to a specific cpu buffer. |
2826 | */ |
2827 | -void ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
2828 | +int ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
2829 | { |
2830 | struct ring_buffer_per_cpu *cpu_buffer; |
2831 | DEFINE_WAIT(wait); |
2832 | @@ -557,6 +557,8 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
2833 | if (cpu == RING_BUFFER_ALL_CPUS) |
2834 | work = &buffer->irq_work; |
2835 | else { |
2836 | + if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2837 | + return -ENODEV; |
2838 | cpu_buffer = buffer->buffers[cpu]; |
2839 | work = &cpu_buffer->irq_work; |
2840 | } |
2841 | @@ -591,6 +593,7 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
2842 | schedule(); |
2843 | |
2844 | finish_wait(&work->waiters, &wait); |
2845 | + return 0; |
2846 | } |
2847 | |
2848 | /** |
2849 | diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
2850 | index 1848dc6278b7..39a12265c253 100644 |
2851 | --- a/kernel/trace/trace.c |
2852 | +++ b/kernel/trace/trace.c |
2853 | @@ -1103,13 +1103,13 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) |
2854 | } |
2855 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
2856 | |
2857 | -static void default_wait_pipe(struct trace_iterator *iter) |
2858 | +static int default_wait_pipe(struct trace_iterator *iter) |
2859 | { |
2860 | /* Iterators are static, they should be filled or empty */ |
2861 | if (trace_buffer_iter(iter, iter->cpu_file)) |
2862 | - return; |
2863 | + return 0; |
2864 | |
2865 | - ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); |
2866 | + return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); |
2867 | } |
2868 | |
2869 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
2870 | @@ -4236,17 +4236,19 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) |
2871 | * |
2872 | * Anyway, this is really very primitive wakeup. |
2873 | */ |
2874 | -void poll_wait_pipe(struct trace_iterator *iter) |
2875 | +int poll_wait_pipe(struct trace_iterator *iter) |
2876 | { |
2877 | set_current_state(TASK_INTERRUPTIBLE); |
2878 | /* sleep for 100 msecs, and try again. */ |
2879 | schedule_timeout(HZ / 10); |
2880 | + return 0; |
2881 | } |
2882 | |
2883 | /* Must be called with trace_types_lock mutex held. */ |
2884 | static int tracing_wait_pipe(struct file *filp) |
2885 | { |
2886 | struct trace_iterator *iter = filp->private_data; |
2887 | + int ret; |
2888 | |
2889 | while (trace_empty(iter)) { |
2890 | |
2891 | @@ -4256,10 +4258,13 @@ static int tracing_wait_pipe(struct file *filp) |
2892 | |
2893 | mutex_unlock(&iter->mutex); |
2894 | |
2895 | - iter->trace->wait_pipe(iter); |
2896 | + ret = iter->trace->wait_pipe(iter); |
2897 | |
2898 | mutex_lock(&iter->mutex); |
2899 | |
2900 | + if (ret) |
2901 | + return ret; |
2902 | + |
2903 | if (signal_pending(current)) |
2904 | return -EINTR; |
2905 | |
2906 | @@ -5196,8 +5201,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, |
2907 | goto out_unlock; |
2908 | } |
2909 | mutex_unlock(&trace_types_lock); |
2910 | - iter->trace->wait_pipe(iter); |
2911 | + ret = iter->trace->wait_pipe(iter); |
2912 | mutex_lock(&trace_types_lock); |
2913 | + if (ret) { |
2914 | + size = ret; |
2915 | + goto out_unlock; |
2916 | + } |
2917 | if (signal_pending(current)) { |
2918 | size = -EINTR; |
2919 | goto out_unlock; |
2920 | @@ -5407,8 +5416,10 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, |
2921 | goto out; |
2922 | } |
2923 | mutex_unlock(&trace_types_lock); |
2924 | - iter->trace->wait_pipe(iter); |
2925 | + ret = iter->trace->wait_pipe(iter); |
2926 | mutex_lock(&trace_types_lock); |
2927 | + if (ret) |
2928 | + goto out; |
2929 | if (signal_pending(current)) { |
2930 | ret = -EINTR; |
2931 | goto out; |
2932 | diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h |
2933 | index 2e29d7ba5a52..99676cd9e28a 100644 |
2934 | --- a/kernel/trace/trace.h |
2935 | +++ b/kernel/trace/trace.h |
2936 | @@ -342,7 +342,7 @@ struct tracer { |
2937 | void (*stop)(struct trace_array *tr); |
2938 | void (*open)(struct trace_iterator *iter); |
2939 | void (*pipe_open)(struct trace_iterator *iter); |
2940 | - void (*wait_pipe)(struct trace_iterator *iter); |
2941 | + int (*wait_pipe)(struct trace_iterator *iter); |
2942 | void (*close)(struct trace_iterator *iter); |
2943 | void (*pipe_close)(struct trace_iterator *iter); |
2944 | ssize_t (*read)(struct trace_iterator *iter, |
2945 | @@ -560,7 +560,7 @@ void trace_init_global_iter(struct trace_iterator *iter); |
2946 | |
2947 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
2948 | |
2949 | -void poll_wait_pipe(struct trace_iterator *iter); |
2950 | +int poll_wait_pipe(struct trace_iterator *iter); |
2951 | |
2952 | void tracing_sched_switch_trace(struct trace_array *tr, |
2953 | struct task_struct *prev, |
2954 | diff --git a/kernel/workqueue.c b/kernel/workqueue.c |
2955 | index 8edc87185427..7ba589779a6b 100644 |
2956 | --- a/kernel/workqueue.c |
2957 | +++ b/kernel/workqueue.c |
2958 | @@ -3422,6 +3422,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) |
2959 | } |
2960 | } |
2961 | |
2962 | + dev_set_uevent_suppress(&wq_dev->dev, false); |
2963 | kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); |
2964 | return 0; |
2965 | } |
2966 | @@ -5033,7 +5034,7 @@ static void __init wq_numa_init(void) |
2967 | BUG_ON(!tbl); |
2968 | |
2969 | for_each_node(node) |
2970 | - BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, |
2971 | + BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL, |
2972 | node_online(node) ? node : NUMA_NO_NODE)); |
2973 | |
2974 | for_each_possible_cpu(cpu) { |
2975 | diff --git a/mm/mempolicy.c b/mm/mempolicy.c |
2976 | index 35f9f91278fd..6b65d10f0df8 100644 |
2977 | --- a/mm/mempolicy.c |
2978 | +++ b/mm/mempolicy.c |
2979 | @@ -2136,7 +2136,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) |
2980 | } else |
2981 | *new = *old; |
2982 | |
2983 | - rcu_read_lock(); |
2984 | if (current_cpuset_is_being_rebound()) { |
2985 | nodemask_t mems = cpuset_mems_allowed(current); |
2986 | if (new->flags & MPOL_F_REBINDING) |
2987 | @@ -2144,7 +2143,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) |
2988 | else |
2989 | mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); |
2990 | } |
2991 | - rcu_read_unlock(); |
2992 | atomic_set(&new->refcnt, 1); |
2993 | return new; |
2994 | } |