Contents of /trunk/kernel-alx/patches-3.14/0112-3.14.13-all-fixes.patch
Parent Directory | Revision Log
Revision 2506 -
(show annotations)
(download)
Fri Oct 17 07:55:45 2014 UTC (9 years, 11 months ago) by niro
File size: 70794 byte(s)
Fri Oct 17 07:55:45 2014 UTC (9 years, 11 months ago) by niro
File size: 70794 byte(s)
-patches for 3.14
1 | diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt |
2 | index e742d21dbd96..a69ffe1d54d5 100644 |
3 | --- a/Documentation/cpu-freq/intel-pstate.txt |
4 | +++ b/Documentation/cpu-freq/intel-pstate.txt |
5 | @@ -15,10 +15,13 @@ New sysfs files for controlling P state selection have been added to |
6 | /sys/devices/system/cpu/intel_pstate/ |
7 | |
8 | max_perf_pct: limits the maximum P state that will be requested by |
9 | - the driver stated as a percentage of the available performance. |
10 | + the driver stated as a percentage of the available performance. The |
11 | + available (P states) performance may be reduced by the no_turbo |
12 | + setting described below. |
13 | |
14 | min_perf_pct: limits the minimum P state that will be requested by |
15 | - the driver stated as a percentage of the available performance. |
16 | + the driver stated as a percentage of the max (non-turbo) |
17 | + performance level. |
18 | |
19 | no_turbo: limits the driver to selecting P states below the turbo |
20 | frequency range. |
21 | diff --git a/Makefile b/Makefile |
22 | index 13d8f323ae43..7a2981c972ae 100644 |
23 | --- a/Makefile |
24 | +++ b/Makefile |
25 | @@ -1,6 +1,6 @@ |
26 | VERSION = 3 |
27 | PATCHLEVEL = 14 |
28 | -SUBLEVEL = 12 |
29 | +SUBLEVEL = 13 |
30 | EXTRAVERSION = |
31 | NAME = Remembering Coco |
32 | |
33 | diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h |
34 | index 9dc5dc39fded..11ad59b856c6 100644 |
35 | --- a/arch/arm64/include/asm/memory.h |
36 | +++ b/arch/arm64/include/asm/memory.h |
37 | @@ -56,6 +56,8 @@ |
38 | #define TASK_SIZE_32 UL(0x100000000) |
39 | #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ |
40 | TASK_SIZE_32 : TASK_SIZE_64) |
41 | +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ |
42 | + TASK_SIZE_32 : TASK_SIZE_64) |
43 | #else |
44 | #define TASK_SIZE TASK_SIZE_64 |
45 | #endif /* CONFIG_COMPAT */ |
46 | diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h |
47 | index 3be8581af495..a8a37477c66e 100644 |
48 | --- a/arch/metag/include/asm/processor.h |
49 | +++ b/arch/metag/include/asm/processor.h |
50 | @@ -23,7 +23,7 @@ |
51 | #define STACK_TOP (TASK_SIZE - PAGE_SIZE) |
52 | #define STACK_TOP_MAX STACK_TOP |
53 | /* Maximum virtual space for stack */ |
54 | -#define STACK_SIZE_MAX (1 << 28) /* 256 MB */ |
55 | +#define STACK_SIZE_MAX (CONFIG_MAX_STACK_SIZE_MB*1024*1024) |
56 | |
57 | /* This decides where the kernel will search for a free chunk of vm |
58 | * space during mmap's. |
59 | diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h |
60 | index 86522ef09d52..d951c9681ab3 100644 |
61 | --- a/arch/parisc/include/asm/processor.h |
62 | +++ b/arch/parisc/include/asm/processor.h |
63 | @@ -55,7 +55,10 @@ |
64 | #define STACK_TOP TASK_SIZE |
65 | #define STACK_TOP_MAX DEFAULT_TASK_SIZE |
66 | |
67 | -#define STACK_SIZE_MAX (1 << 30) /* 1 GB */ |
68 | +/* Allow bigger stacks for 64-bit processes */ |
69 | +#define STACK_SIZE_MAX (USER_WIDE_MODE \ |
70 | + ? (1 << 30) /* 1 GB */ \ |
71 | + : (CONFIG_MAX_STACK_SIZE_MB*1024*1024)) |
72 | |
73 | #endif |
74 | |
75 | diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c |
76 | index 608716f8496b..af3bc359dc70 100644 |
77 | --- a/arch/parisc/kernel/hardware.c |
78 | +++ b/arch/parisc/kernel/hardware.c |
79 | @@ -1210,7 +1210,8 @@ static struct hp_hardware hp_hardware_list[] = { |
80 | {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, |
81 | {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, |
82 | {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, |
83 | - {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"}, |
84 | + {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"}, |
85 | + {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"}, |
86 | {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, |
87 | {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, |
88 | {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, |
89 | diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c |
90 | index 31ffa9b55322..e1ffea2f9a0b 100644 |
91 | --- a/arch/parisc/kernel/sys_parisc.c |
92 | +++ b/arch/parisc/kernel/sys_parisc.c |
93 | @@ -72,10 +72,10 @@ static unsigned long mmap_upper_limit(void) |
94 | { |
95 | unsigned long stack_base; |
96 | |
97 | - /* Limit stack size to 1GB - see setup_arg_pages() in fs/exec.c */ |
98 | + /* Limit stack size - see setup_arg_pages() in fs/exec.c */ |
99 | stack_base = rlimit_max(RLIMIT_STACK); |
100 | - if (stack_base > (1 << 30)) |
101 | - stack_base = 1 << 30; |
102 | + if (stack_base > STACK_SIZE_MAX) |
103 | + stack_base = STACK_SIZE_MAX; |
104 | |
105 | return PAGE_ALIGN(STACK_TOP - stack_base); |
106 | } |
107 | diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c |
108 | index bb9f3b64de55..ec741fe02ab6 100644 |
109 | --- a/arch/parisc/kernel/sys_parisc32.c |
110 | +++ b/arch/parisc/kernel/sys_parisc32.c |
111 | @@ -4,6 +4,7 @@ |
112 | * Copyright (C) 2000-2001 Hewlett Packard Company |
113 | * Copyright (C) 2000 John Marvin |
114 | * Copyright (C) 2001 Matthew Wilcox |
115 | + * Copyright (C) 2014 Helge Deller <deller@gmx.de> |
116 | * |
117 | * These routines maintain argument size conversion between 32bit and 64bit |
118 | * environment. Based heavily on sys_ia32.c and sys_sparc32.c. |
119 | @@ -57,3 +58,12 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, |
120 | current->comm, current->pid, r20); |
121 | return -ENOSYS; |
122 | } |
123 | + |
124 | +asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags, |
125 | + compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd, |
126 | + const char __user * pathname) |
127 | +{ |
128 | + return sys_fanotify_mark(fanotify_fd, flags, |
129 | + ((__u64)mask1 << 32) | mask0, |
130 | + dfd, pathname); |
131 | +} |
132 | diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S |
133 | index 83ead0ea127d..7dd8a3b22147 100644 |
134 | --- a/arch/parisc/kernel/syscall_table.S |
135 | +++ b/arch/parisc/kernel/syscall_table.S |
136 | @@ -418,7 +418,7 @@ |
137 | ENTRY_SAME(accept4) /* 320 */ |
138 | ENTRY_SAME(prlimit64) |
139 | ENTRY_SAME(fanotify_init) |
140 | - ENTRY_COMP(fanotify_mark) |
141 | + ENTRY_DIFF(fanotify_mark) |
142 | ENTRY_COMP(clock_adjtime) |
143 | ENTRY_SAME(name_to_handle_at) /* 325 */ |
144 | ENTRY_COMP(open_by_handle_at) |
145 | diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig |
146 | index 957bf344c0f5..2156fa2d25fe 100644 |
147 | --- a/arch/powerpc/Kconfig |
148 | +++ b/arch/powerpc/Kconfig |
149 | @@ -410,7 +410,7 @@ config KEXEC |
150 | config CRASH_DUMP |
151 | bool "Build a kdump crash kernel" |
152 | depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) |
153 | - select RELOCATABLE if PPC64 || 44x || FSL_BOOKE |
154 | + select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x || FSL_BOOKE |
155 | help |
156 | Build a kernel suitable for use as a kdump capture kernel. |
157 | The same kernel binary can be used as production kernel and dump |
158 | @@ -1000,6 +1000,7 @@ endmenu |
159 | if PPC64 |
160 | config RELOCATABLE |
161 | bool "Build a relocatable kernel" |
162 | + depends on !COMPILE_TEST |
163 | select NONSTATIC_KERNEL |
164 | help |
165 | This builds a kernel image that is capable of running anywhere |
166 | diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h |
167 | index 3fd2f1b6f906..cefc7b4f4fb1 100644 |
168 | --- a/arch/powerpc/include/asm/perf_event_server.h |
169 | +++ b/arch/powerpc/include/asm/perf_event_server.h |
170 | @@ -60,8 +60,7 @@ struct power_pmu { |
171 | #define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ |
172 | #define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ |
173 | #define PPMU_HAS_SIER 0x00000040 /* Has SIER */ |
174 | -#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */ |
175 | -#define PPMU_EBB 0x00000100 /* supports event based branch */ |
176 | +#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ |
177 | |
178 | /* |
179 | * Values for flags to get_alternatives() |
180 | diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c |
181 | index 67cf22083f4c..38265dc85318 100644 |
182 | --- a/arch/powerpc/perf/core-book3s.c |
183 | +++ b/arch/powerpc/perf/core-book3s.c |
184 | @@ -483,7 +483,7 @@ static bool is_ebb_event(struct perf_event *event) |
185 | * check that the PMU supports EBB, meaning those that don't can still |
186 | * use bit 63 of the event code for something else if they wish. |
187 | */ |
188 | - return (ppmu->flags & PPMU_EBB) && |
189 | + return (ppmu->flags & PPMU_ARCH_207S) && |
190 | ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); |
191 | } |
192 | |
193 | @@ -851,7 +851,22 @@ static void power_pmu_read(struct perf_event *event) |
194 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); |
195 | |
196 | local64_add(delta, &event->count); |
197 | - local64_sub(delta, &event->hw.period_left); |
198 | + |
199 | + /* |
200 | + * A number of places program the PMC with (0x80000000 - period_left). |
201 | + * We never want period_left to be less than 1 because we will program |
202 | + * the PMC with a value >= 0x800000000 and an edge detected PMC will |
203 | + * roll around to 0 before taking an exception. We have seen this |
204 | + * on POWER8. |
205 | + * |
206 | + * To fix this, clamp the minimum value of period_left to 1. |
207 | + */ |
208 | + do { |
209 | + prev = local64_read(&event->hw.period_left); |
210 | + val = prev - delta; |
211 | + if (val < 1) |
212 | + val = 1; |
213 | + } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); |
214 | } |
215 | |
216 | /* |
217 | @@ -1152,6 +1167,9 @@ static void power_pmu_enable(struct pmu *pmu) |
218 | |
219 | write_mmcr0(cpuhw, mmcr0); |
220 | |
221 | + if (ppmu->flags & PPMU_ARCH_207S) |
222 | + mtspr(SPRN_MMCR2, 0); |
223 | + |
224 | /* |
225 | * Enable instruction sampling if necessary |
226 | */ |
227 | @@ -1548,7 +1566,7 @@ static int power_pmu_event_init(struct perf_event *event) |
228 | |
229 | if (has_branch_stack(event)) { |
230 | /* PMU has BHRB enabled */ |
231 | - if (!(ppmu->flags & PPMU_BHRB)) |
232 | + if (!(ppmu->flags & PPMU_ARCH_207S)) |
233 | return -EOPNOTSUPP; |
234 | } |
235 | |
236 | diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c |
237 | index 96cee20dcd34..4a06530cbb71 100644 |
238 | --- a/arch/powerpc/perf/power8-pmu.c |
239 | +++ b/arch/powerpc/perf/power8-pmu.c |
240 | @@ -751,7 +751,7 @@ static struct power_pmu power8_pmu = { |
241 | .get_constraint = power8_get_constraint, |
242 | .get_alternatives = power8_get_alternatives, |
243 | .disable_pmc = power8_disable_pmc, |
244 | - .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, |
245 | + .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S, |
246 | .n_generic = ARRAY_SIZE(power8_generic_events), |
247 | .generic_events = power8_generic_events, |
248 | .cache_events = &power8_cache_events, |
249 | diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c |
250 | index f30cd10293f0..8626b03e83b7 100644 |
251 | --- a/arch/x86/crypto/sha512_ssse3_glue.c |
252 | +++ b/arch/x86/crypto/sha512_ssse3_glue.c |
253 | @@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out) |
254 | |
255 | /* save number of bits */ |
256 | bits[1] = cpu_to_be64(sctx->count[0] << 3); |
257 | - bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61; |
258 | + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); |
259 | |
260 | /* Pad out to 112 mod 128 and append length */ |
261 | index = sctx->count[0] & 0x7f; |
262 | diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c |
263 | index 799580cabc78..94bd24771812 100644 |
264 | --- a/arch/x86/mm/ioremap.c |
265 | +++ b/arch/x86/mm/ioremap.c |
266 | @@ -50,6 +50,21 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
267 | return err; |
268 | } |
269 | |
270 | +static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, |
271 | + void *arg) |
272 | +{ |
273 | + unsigned long i; |
274 | + |
275 | + for (i = 0; i < nr_pages; ++i) |
276 | + if (pfn_valid(start_pfn + i) && |
277 | + !PageReserved(pfn_to_page(start_pfn + i))) |
278 | + return 1; |
279 | + |
280 | + WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); |
281 | + |
282 | + return 0; |
283 | +} |
284 | + |
285 | /* |
286 | * Remap an arbitrary physical address space into the kernel virtual |
287 | * address space. Needed when the kernel wants to access high addresses |
288 | @@ -93,14 +108,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, |
289 | /* |
290 | * Don't allow anybody to remap normal RAM that we're using.. |
291 | */ |
292 | + pfn = phys_addr >> PAGE_SHIFT; |
293 | last_pfn = last_addr >> PAGE_SHIFT; |
294 | - for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) { |
295 | - int is_ram = page_is_ram(pfn); |
296 | - |
297 | - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) |
298 | - return NULL; |
299 | - WARN_ON_ONCE(is_ram); |
300 | - } |
301 | + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, |
302 | + __ioremap_check_ram) == 1) |
303 | + return NULL; |
304 | |
305 | /* |
306 | * Mappings have to be page-aligned |
307 | diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c |
308 | index 7c1f8452918a..0bebc6905383 100644 |
309 | --- a/drivers/acpi/ac.c |
310 | +++ b/drivers/acpi/ac.c |
311 | @@ -30,6 +30,10 @@ |
312 | #include <linux/types.h> |
313 | #include <linux/dmi.h> |
314 | #include <linux/delay.h> |
315 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
316 | +#include <linux/proc_fs.h> |
317 | +#include <linux/seq_file.h> |
318 | +#endif |
319 | #include <linux/platform_device.h> |
320 | #include <linux/power_supply.h> |
321 | #include <linux/acpi.h> |
322 | @@ -51,6 +55,7 @@ MODULE_AUTHOR("Paul Diefenbaugh"); |
323 | MODULE_DESCRIPTION("ACPI AC Adapter Driver"); |
324 | MODULE_LICENSE("GPL"); |
325 | |
326 | + |
327 | static int acpi_ac_add(struct acpi_device *device); |
328 | static int acpi_ac_remove(struct acpi_device *device); |
329 | static void acpi_ac_notify(struct acpi_device *device, u32 event); |
330 | @@ -66,6 +71,13 @@ static int acpi_ac_resume(struct device *dev); |
331 | #endif |
332 | static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); |
333 | |
334 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
335 | +extern struct proc_dir_entry *acpi_lock_ac_dir(void); |
336 | +extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); |
337 | +static int acpi_ac_open_fs(struct inode *inode, struct file *file); |
338 | +#endif |
339 | + |
340 | + |
341 | static int ac_sleep_before_get_state_ms; |
342 | |
343 | static struct acpi_driver acpi_ac_driver = { |
344 | @@ -89,6 +101,16 @@ struct acpi_ac { |
345 | |
346 | #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger) |
347 | |
348 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
349 | +static const struct file_operations acpi_ac_fops = { |
350 | + .owner = THIS_MODULE, |
351 | + .open = acpi_ac_open_fs, |
352 | + .read = seq_read, |
353 | + .llseek = seq_lseek, |
354 | + .release = single_release, |
355 | +}; |
356 | +#endif |
357 | + |
358 | /* -------------------------------------------------------------------------- |
359 | AC Adapter Management |
360 | -------------------------------------------------------------------------- */ |
361 | @@ -141,6 +163,83 @@ static enum power_supply_property ac_props[] = { |
362 | POWER_SUPPLY_PROP_ONLINE, |
363 | }; |
364 | |
365 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
366 | +/* -------------------------------------------------------------------------- |
367 | + FS Interface (/proc) |
368 | + -------------------------------------------------------------------------- */ |
369 | + |
370 | +static struct proc_dir_entry *acpi_ac_dir; |
371 | + |
372 | +static int acpi_ac_seq_show(struct seq_file *seq, void *offset) |
373 | +{ |
374 | + struct acpi_ac *ac = seq->private; |
375 | + |
376 | + |
377 | + if (!ac) |
378 | + return 0; |
379 | + |
380 | + if (acpi_ac_get_state(ac)) { |
381 | + seq_puts(seq, "ERROR: Unable to read AC Adapter state\n"); |
382 | + return 0; |
383 | + } |
384 | + |
385 | + seq_puts(seq, "state: "); |
386 | + switch (ac->state) { |
387 | + case ACPI_AC_STATUS_OFFLINE: |
388 | + seq_puts(seq, "off-line\n"); |
389 | + break; |
390 | + case ACPI_AC_STATUS_ONLINE: |
391 | + seq_puts(seq, "on-line\n"); |
392 | + break; |
393 | + default: |
394 | + seq_puts(seq, "unknown\n"); |
395 | + break; |
396 | + } |
397 | + |
398 | + return 0; |
399 | +} |
400 | + |
401 | +static int acpi_ac_open_fs(struct inode *inode, struct file *file) |
402 | +{ |
403 | + return single_open(file, acpi_ac_seq_show, PDE_DATA(inode)); |
404 | +} |
405 | + |
406 | +static int acpi_ac_add_fs(struct acpi_ac *ac) |
407 | +{ |
408 | + struct proc_dir_entry *entry = NULL; |
409 | + |
410 | + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded," |
411 | + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); |
412 | + if (!acpi_device_dir(ac->device)) { |
413 | + acpi_device_dir(ac->device) = |
414 | + proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir); |
415 | + if (!acpi_device_dir(ac->device)) |
416 | + return -ENODEV; |
417 | + } |
418 | + |
419 | + /* 'state' [R] */ |
420 | + entry = proc_create_data(ACPI_AC_FILE_STATE, |
421 | + S_IRUGO, acpi_device_dir(ac->device), |
422 | + &acpi_ac_fops, ac); |
423 | + if (!entry) |
424 | + return -ENODEV; |
425 | + return 0; |
426 | +} |
427 | + |
428 | +static int acpi_ac_remove_fs(struct acpi_ac *ac) |
429 | +{ |
430 | + |
431 | + if (acpi_device_dir(ac->device)) { |
432 | + remove_proc_entry(ACPI_AC_FILE_STATE, |
433 | + acpi_device_dir(ac->device)); |
434 | + remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir); |
435 | + acpi_device_dir(ac->device) = NULL; |
436 | + } |
437 | + |
438 | + return 0; |
439 | +} |
440 | +#endif |
441 | + |
442 | /* -------------------------------------------------------------------------- |
443 | Driver Model |
444 | -------------------------------------------------------------------------- */ |
445 | @@ -221,6 +320,11 @@ static int acpi_ac_add(struct acpi_device *device) |
446 | goto end; |
447 | |
448 | ac->charger.name = acpi_device_bid(device); |
449 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
450 | + result = acpi_ac_add_fs(ac); |
451 | + if (result) |
452 | + goto end; |
453 | +#endif |
454 | ac->charger.type = POWER_SUPPLY_TYPE_MAINS; |
455 | ac->charger.properties = ac_props; |
456 | ac->charger.num_properties = ARRAY_SIZE(ac_props); |
457 | @@ -234,8 +338,12 @@ static int acpi_ac_add(struct acpi_device *device) |
458 | ac->state ? "on-line" : "off-line"); |
459 | |
460 | end: |
461 | - if (result) |
462 | + if (result) { |
463 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
464 | + acpi_ac_remove_fs(ac); |
465 | +#endif |
466 | kfree(ac); |
467 | + } |
468 | |
469 | dmi_check_system(ac_dmi_table); |
470 | return result; |
471 | @@ -278,6 +386,10 @@ static int acpi_ac_remove(struct acpi_device *device) |
472 | if (ac->charger.dev) |
473 | power_supply_unregister(&ac->charger); |
474 | |
475 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
476 | + acpi_ac_remove_fs(ac); |
477 | +#endif |
478 | + |
479 | kfree(ac); |
480 | |
481 | return 0; |
482 | @@ -290,9 +402,20 @@ static int __init acpi_ac_init(void) |
483 | if (acpi_disabled) |
484 | return -ENODEV; |
485 | |
486 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
487 | + acpi_ac_dir = acpi_lock_ac_dir(); |
488 | + if (!acpi_ac_dir) |
489 | + return -ENODEV; |
490 | +#endif |
491 | + |
492 | + |
493 | result = acpi_bus_register_driver(&acpi_ac_driver); |
494 | - if (result < 0) |
495 | + if (result < 0) { |
496 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
497 | + acpi_unlock_ac_dir(acpi_ac_dir); |
498 | +#endif |
499 | return -ENODEV; |
500 | + } |
501 | |
502 | return 0; |
503 | } |
504 | @@ -300,6 +423,9 @@ static int __init acpi_ac_init(void) |
505 | static void __exit acpi_ac_exit(void) |
506 | { |
507 | acpi_bus_unregister_driver(&acpi_ac_driver); |
508 | +#ifdef CONFIG_ACPI_PROCFS_POWER |
509 | + acpi_unlock_ac_dir(acpi_ac_dir); |
510 | +#endif |
511 | } |
512 | module_init(acpi_ac_init); |
513 | module_exit(acpi_ac_exit); |
514 | diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c |
515 | index efa71d66e8b0..0f004159a317 100644 |
516 | --- a/drivers/acpi/battery.c |
517 | +++ b/drivers/acpi/battery.c |
518 | @@ -34,6 +34,7 @@ |
519 | #include <linux/dmi.h> |
520 | #include <linux/slab.h> |
521 | #include <linux/suspend.h> |
522 | +#include <linux/delay.h> |
523 | #include <asm/unaligned.h> |
524 | |
525 | #ifdef CONFIG_ACPI_PROCFS_POWER |
526 | @@ -1070,6 +1071,28 @@ static struct dmi_system_id bat_dmi_table[] = { |
527 | {}, |
528 | }; |
529 | |
530 | +/* |
531 | + * Some machines'(E,G Lenovo Z480) ECs are not stable |
532 | + * during boot up and this causes battery driver fails to be |
533 | + * probed due to failure of getting battery information |
534 | + * from EC sometimes. After several retries, the operation |
535 | + * may work. So add retry code here and 20ms sleep between |
536 | + * every retries. |
537 | + */ |
538 | +static int acpi_battery_update_retry(struct acpi_battery *battery) |
539 | +{ |
540 | + int retry, ret; |
541 | + |
542 | + for (retry = 5; retry; retry--) { |
543 | + ret = acpi_battery_update(battery); |
544 | + if (!ret) |
545 | + break; |
546 | + |
547 | + msleep(20); |
548 | + } |
549 | + return ret; |
550 | +} |
551 | + |
552 | static int acpi_battery_add(struct acpi_device *device) |
553 | { |
554 | int result = 0; |
555 | @@ -1088,9 +1111,11 @@ static int acpi_battery_add(struct acpi_device *device) |
556 | mutex_init(&battery->sysfs_lock); |
557 | if (acpi_has_method(battery->device->handle, "_BIX")) |
558 | set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); |
559 | - result = acpi_battery_update(battery); |
560 | + |
561 | + result = acpi_battery_update_retry(battery); |
562 | if (result) |
563 | goto fail; |
564 | + |
565 | #ifdef CONFIG_ACPI_PROCFS_POWER |
566 | result = acpi_battery_add_fs(device); |
567 | #endif |
568 | diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c |
569 | index ad11ba4a412d..49d89909b4ed 100644 |
570 | --- a/drivers/acpi/ec.c |
571 | +++ b/drivers/acpi/ec.c |
572 | @@ -78,6 +78,9 @@ enum { |
573 | EC_FLAGS_BLOCKED, /* Transactions are blocked */ |
574 | }; |
575 | |
576 | +#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ |
577 | +#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */ |
578 | + |
579 | /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ |
580 | static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; |
581 | module_param(ec_delay, uint, 0644); |
582 | @@ -109,7 +112,7 @@ struct transaction { |
583 | u8 ri; |
584 | u8 wlen; |
585 | u8 rlen; |
586 | - bool done; |
587 | + u8 flags; |
588 | }; |
589 | |
590 | struct acpi_ec *boot_ec, *first_ec; |
591 | @@ -150,60 +153,74 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) |
592 | outb(data, ec->data_addr); |
593 | } |
594 | |
595 | -static int ec_transaction_done(struct acpi_ec *ec) |
596 | +static int ec_transaction_completed(struct acpi_ec *ec) |
597 | { |
598 | unsigned long flags; |
599 | int ret = 0; |
600 | spin_lock_irqsave(&ec->lock, flags); |
601 | - if (!ec->curr || ec->curr->done) |
602 | + if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) |
603 | ret = 1; |
604 | spin_unlock_irqrestore(&ec->lock, flags); |
605 | return ret; |
606 | } |
607 | |
608 | -static void start_transaction(struct acpi_ec *ec) |
609 | +static bool advance_transaction(struct acpi_ec *ec) |
610 | { |
611 | - ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; |
612 | - ec->curr->done = false; |
613 | - acpi_ec_write_cmd(ec, ec->curr->command); |
614 | -} |
615 | - |
616 | -static void advance_transaction(struct acpi_ec *ec, u8 status) |
617 | -{ |
618 | - unsigned long flags; |
619 | struct transaction *t; |
620 | + u8 status; |
621 | + bool wakeup = false; |
622 | |
623 | - spin_lock_irqsave(&ec->lock, flags); |
624 | + pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK"); |
625 | + status = acpi_ec_read_status(ec); |
626 | t = ec->curr; |
627 | if (!t) |
628 | - goto unlock; |
629 | - if (t->wlen > t->wi) { |
630 | - if ((status & ACPI_EC_FLAG_IBF) == 0) |
631 | - acpi_ec_write_data(ec, |
632 | - t->wdata[t->wi++]); |
633 | - else |
634 | - goto err; |
635 | - } else if (t->rlen > t->ri) { |
636 | - if ((status & ACPI_EC_FLAG_OBF) == 1) { |
637 | - t->rdata[t->ri++] = acpi_ec_read_data(ec); |
638 | - if (t->rlen == t->ri) |
639 | - t->done = true; |
640 | + goto err; |
641 | + if (t->flags & ACPI_EC_COMMAND_POLL) { |
642 | + if (t->wlen > t->wi) { |
643 | + if ((status & ACPI_EC_FLAG_IBF) == 0) |
644 | + acpi_ec_write_data(ec, t->wdata[t->wi++]); |
645 | + else |
646 | + goto err; |
647 | + } else if (t->rlen > t->ri) { |
648 | + if ((status & ACPI_EC_FLAG_OBF) == 1) { |
649 | + t->rdata[t->ri++] = acpi_ec_read_data(ec); |
650 | + if (t->rlen == t->ri) { |
651 | + t->flags |= ACPI_EC_COMMAND_COMPLETE; |
652 | + wakeup = true; |
653 | + } |
654 | + } else |
655 | + goto err; |
656 | + } else if (t->wlen == t->wi && |
657 | + (status & ACPI_EC_FLAG_IBF) == 0) { |
658 | + t->flags |= ACPI_EC_COMMAND_COMPLETE; |
659 | + wakeup = true; |
660 | + } |
661 | + return wakeup; |
662 | + } else { |
663 | + if ((status & ACPI_EC_FLAG_IBF) == 0) { |
664 | + acpi_ec_write_cmd(ec, t->command); |
665 | + t->flags |= ACPI_EC_COMMAND_POLL; |
666 | } else |
667 | goto err; |
668 | - } else if (t->wlen == t->wi && |
669 | - (status & ACPI_EC_FLAG_IBF) == 0) |
670 | - t->done = true; |
671 | - goto unlock; |
672 | + return wakeup; |
673 | + } |
674 | err: |
675 | /* |
676 | * If SCI bit is set, then don't think it's a false IRQ |
677 | * otherwise will take a not handled IRQ as a false one. |
678 | */ |
679 | - if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI)) |
680 | - ++t->irq_count; |
681 | + if (!(status & ACPI_EC_FLAG_SCI)) { |
682 | + if (in_interrupt() && t) |
683 | + ++t->irq_count; |
684 | + } |
685 | + return wakeup; |
686 | +} |
687 | |
688 | -unlock: |
689 | - spin_unlock_irqrestore(&ec->lock, flags); |
690 | +static void start_transaction(struct acpi_ec *ec) |
691 | +{ |
692 | + ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; |
693 | + ec->curr->flags = 0; |
694 | + (void)advance_transaction(ec); |
695 | } |
696 | |
697 | static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); |
698 | @@ -228,15 +245,17 @@ static int ec_poll(struct acpi_ec *ec) |
699 | /* don't sleep with disabled interrupts */ |
700 | if (EC_FLAGS_MSI || irqs_disabled()) { |
701 | udelay(ACPI_EC_MSI_UDELAY); |
702 | - if (ec_transaction_done(ec)) |
703 | + if (ec_transaction_completed(ec)) |
704 | return 0; |
705 | } else { |
706 | if (wait_event_timeout(ec->wait, |
707 | - ec_transaction_done(ec), |
708 | + ec_transaction_completed(ec), |
709 | msecs_to_jiffies(1))) |
710 | return 0; |
711 | } |
712 | - advance_transaction(ec, acpi_ec_read_status(ec)); |
713 | + spin_lock_irqsave(&ec->lock, flags); |
714 | + (void)advance_transaction(ec); |
715 | + spin_unlock_irqrestore(&ec->lock, flags); |
716 | } while (time_before(jiffies, delay)); |
717 | pr_debug("controller reset, restart transaction\n"); |
718 | spin_lock_irqsave(&ec->lock, flags); |
719 | @@ -268,23 +287,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, |
720 | return ret; |
721 | } |
722 | |
723 | -static int ec_check_ibf0(struct acpi_ec *ec) |
724 | -{ |
725 | - u8 status = acpi_ec_read_status(ec); |
726 | - return (status & ACPI_EC_FLAG_IBF) == 0; |
727 | -} |
728 | - |
729 | -static int ec_wait_ibf0(struct acpi_ec *ec) |
730 | -{ |
731 | - unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); |
732 | - /* interrupt wait manually if GPE mode is not active */ |
733 | - while (time_before(jiffies, delay)) |
734 | - if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), |
735 | - msecs_to_jiffies(1))) |
736 | - return 0; |
737 | - return -ETIME; |
738 | -} |
739 | - |
740 | static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) |
741 | { |
742 | int status; |
743 | @@ -305,12 +307,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) |
744 | goto unlock; |
745 | } |
746 | } |
747 | - if (ec_wait_ibf0(ec)) { |
748 | - pr_err("input buffer is not empty, " |
749 | - "aborting transaction\n"); |
750 | - status = -ETIME; |
751 | - goto end; |
752 | - } |
753 | pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n", |
754 | t->command, t->wdata ? t->wdata[0] : 0); |
755 | /* disable GPE during transaction if storm is detected */ |
756 | @@ -334,7 +330,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) |
757 | set_bit(EC_FLAGS_GPE_STORM, &ec->flags); |
758 | } |
759 | pr_debug("transaction end\n"); |
760 | -end: |
761 | if (ec->global_lock) |
762 | acpi_release_global_lock(glk); |
763 | unlock: |
764 | @@ -634,17 +629,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state) |
765 | static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, |
766 | u32 gpe_number, void *data) |
767 | { |
768 | + unsigned long flags; |
769 | struct acpi_ec *ec = data; |
770 | - u8 status = acpi_ec_read_status(ec); |
771 | |
772 | - pr_debug("~~~> interrupt, status:0x%02x\n", status); |
773 | - |
774 | - advance_transaction(ec, status); |
775 | - if (ec_transaction_done(ec) && |
776 | - (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { |
777 | + spin_lock_irqsave(&ec->lock, flags); |
778 | + if (advance_transaction(ec)) |
779 | wake_up(&ec->wait); |
780 | - ec_check_sci(ec, acpi_ec_read_status(ec)); |
781 | - } |
782 | + spin_unlock_irqrestore(&ec->lock, flags); |
783 | + ec_check_sci(ec, acpi_ec_read_status(ec)); |
784 | return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; |
785 | } |
786 | |
787 | diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c |
788 | index 0bdacc5e26a3..2ba8f02ced36 100644 |
789 | --- a/drivers/acpi/resource.c |
790 | +++ b/drivers/acpi/resource.c |
791 | @@ -77,7 +77,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) |
792 | switch (ares->type) { |
793 | case ACPI_RESOURCE_TYPE_MEMORY24: |
794 | memory24 = &ares->data.memory24; |
795 | - if (!memory24->address_length) |
796 | + if (!memory24->minimum && !memory24->address_length) |
797 | return false; |
798 | acpi_dev_get_memresource(res, memory24->minimum, |
799 | memory24->address_length, |
800 | @@ -85,7 +85,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) |
801 | break; |
802 | case ACPI_RESOURCE_TYPE_MEMORY32: |
803 | memory32 = &ares->data.memory32; |
804 | - if (!memory32->address_length) |
805 | + if (!memory32->minimum && !memory32->address_length) |
806 | return false; |
807 | acpi_dev_get_memresource(res, memory32->minimum, |
808 | memory32->address_length, |
809 | @@ -93,7 +93,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) |
810 | break; |
811 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: |
812 | fixed_memory32 = &ares->data.fixed_memory32; |
813 | - if (!fixed_memory32->address_length) |
814 | + if (!fixed_memory32->address && !fixed_memory32->address_length) |
815 | return false; |
816 | acpi_dev_get_memresource(res, fixed_memory32->address, |
817 | fixed_memory32->address_length, |
818 | @@ -150,7 +150,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) |
819 | switch (ares->type) { |
820 | case ACPI_RESOURCE_TYPE_IO: |
821 | io = &ares->data.io; |
822 | - if (!io->address_length) |
823 | + if (!io->minimum && !io->address_length) |
824 | return false; |
825 | acpi_dev_get_ioresource(res, io->minimum, |
826 | io->address_length, |
827 | @@ -158,7 +158,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) |
828 | break; |
829 | case ACPI_RESOURCE_TYPE_FIXED_IO: |
830 | fixed_io = &ares->data.fixed_io; |
831 | - if (!fixed_io->address_length) |
832 | + if (!fixed_io->address && !fixed_io->address_length) |
833 | return false; |
834 | acpi_dev_get_ioresource(res, fixed_io->address, |
835 | fixed_io->address_length, |
836 | diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c |
837 | index 165c2c299e57..d3bffa478eca 100644 |
838 | --- a/drivers/base/dma-contiguous.c |
839 | +++ b/drivers/base/dma-contiguous.c |
840 | @@ -155,13 +155,23 @@ static int __init cma_activate_area(struct cma *cma) |
841 | base_pfn = pfn; |
842 | for (j = pageblock_nr_pages; j; --j, pfn++) { |
843 | WARN_ON_ONCE(!pfn_valid(pfn)); |
844 | + /* |
845 | + * alloc_contig_range requires the pfn range |
846 | + * specified to be in the same zone. Make this |
847 | + * simple by forcing the entire CMA resv range |
848 | + * to be in the same zone. |
849 | + */ |
850 | if (page_zone(pfn_to_page(pfn)) != zone) |
851 | - return -EINVAL; |
852 | + goto err; |
853 | } |
854 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); |
855 | } while (--i); |
856 | |
857 | return 0; |
858 | + |
859 | +err: |
860 | + kfree(cma->bitmap); |
861 | + return -EINVAL; |
862 | } |
863 | |
864 | static struct cma cma_areas[MAX_CMA_AREAS]; |
865 | diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c |
866 | index d915707d2ba1..93dcad0c1cbe 100644 |
867 | --- a/drivers/char/i8k.c |
868 | +++ b/drivers/char/i8k.c |
869 | @@ -138,7 +138,9 @@ static int i8k_smm(struct smm_regs *regs) |
870 | if (!alloc_cpumask_var(&old_mask, GFP_KERNEL)) |
871 | return -ENOMEM; |
872 | cpumask_copy(old_mask, ¤t->cpus_allowed); |
873 | - set_cpus_allowed_ptr(current, cpumask_of(0)); |
874 | + rc = set_cpus_allowed_ptr(current, cpumask_of(0)); |
875 | + if (rc) |
876 | + goto out; |
877 | if (smp_processor_id() != 0) { |
878 | rc = -EBUSY; |
879 | goto out; |
880 | diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c |
881 | index 27c83e45eaed..611b936ecffe 100644 |
882 | --- a/drivers/clk/clk-s2mps11.c |
883 | +++ b/drivers/clk/clk-s2mps11.c |
884 | @@ -190,16 +190,13 @@ static int s2mps11_clk_probe(struct platform_device *pdev) |
885 | goto err_reg; |
886 | } |
887 | |
888 | - s2mps11_clk->lookup = devm_kzalloc(&pdev->dev, |
889 | - sizeof(struct clk_lookup), GFP_KERNEL); |
890 | + s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk, |
891 | + s2mps11_name(s2mps11_clk), NULL); |
892 | if (!s2mps11_clk->lookup) { |
893 | ret = -ENOMEM; |
894 | goto err_lup; |
895 | } |
896 | |
897 | - s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk); |
898 | - s2mps11_clk->lookup->clk = s2mps11_clk->clk; |
899 | - |
900 | clkdev_add(s2mps11_clk->lookup); |
901 | } |
902 | |
903 | diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c |
904 | index f9b59c7e48e9..9be47a829144 100644 |
905 | --- a/drivers/clk/qcom/mmcc-msm8960.c |
906 | +++ b/drivers/clk/qcom/mmcc-msm8960.c |
907 | @@ -1208,7 +1208,7 @@ static struct clk_branch rot_clk = { |
908 | |
909 | static u8 mmcc_pxo_hdmi_map[] = { |
910 | [P_PXO] = 0, |
911 | - [P_HDMI_PLL] = 2, |
912 | + [P_HDMI_PLL] = 3, |
913 | }; |
914 | |
915 | static const char *mmcc_pxo_hdmi[] = { |
916 | diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c |
917 | index c2d204315546..125eba86c844 100644 |
918 | --- a/drivers/clk/spear/spear3xx_clock.c |
919 | +++ b/drivers/clk/spear/spear3xx_clock.c |
920 | @@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { } |
921 | /* array of all spear 320 clock lookups */ |
922 | #ifdef CONFIG_MACH_SPEAR320 |
923 | |
924 | -#define SPEAR320_CONTROL_REG (soc_config_base + 0x0000) |
925 | +#define SPEAR320_CONTROL_REG (soc_config_base + 0x0010) |
926 | #define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018) |
927 | |
928 | #define SPEAR320_UARTX_PCLK_MASK 0x1 |
929 | diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile |
930 | index 74945652dd7a..dac58f67307a 100644 |
931 | --- a/drivers/cpufreq/Makefile |
932 | +++ b/drivers/cpufreq/Makefile |
933 | @@ -47,7 +47,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o |
934 | # LITTLE drivers, so that it is probed last. |
935 | obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o |
936 | |
937 | -obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o |
938 | +obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o |
939 | obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o |
940 | obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o |
941 | obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o |
942 | diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c |
943 | index 6d98c37c87ad..ae52c777339d 100644 |
944 | --- a/drivers/cpufreq/intel_pstate.c |
945 | +++ b/drivers/cpufreq/intel_pstate.c |
946 | @@ -132,6 +132,7 @@ static struct pstate_funcs pstate_funcs; |
947 | |
948 | struct perf_limits { |
949 | int no_turbo; |
950 | + int turbo_disabled; |
951 | int max_perf_pct; |
952 | int min_perf_pct; |
953 | int32_t max_perf; |
954 | @@ -291,7 +292,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, |
955 | if (ret != 1) |
956 | return -EINVAL; |
957 | limits.no_turbo = clamp_t(int, input, 0 , 1); |
958 | - |
959 | + if (limits.turbo_disabled) { |
960 | + pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); |
961 | + limits.no_turbo = limits.turbo_disabled; |
962 | + } |
963 | return count; |
964 | } |
965 | |
966 | @@ -361,21 +365,21 @@ static int byt_get_min_pstate(void) |
967 | { |
968 | u64 value; |
969 | rdmsrl(BYT_RATIOS, value); |
970 | - return (value >> 8) & 0x3F; |
971 | + return (value >> 8) & 0x7F; |
972 | } |
973 | |
974 | static int byt_get_max_pstate(void) |
975 | { |
976 | u64 value; |
977 | rdmsrl(BYT_RATIOS, value); |
978 | - return (value >> 16) & 0x3F; |
979 | + return (value >> 16) & 0x7F; |
980 | } |
981 | |
982 | static int byt_get_turbo_pstate(void) |
983 | { |
984 | u64 value; |
985 | rdmsrl(BYT_TURBO_RATIOS, value); |
986 | - return value & 0x3F; |
987 | + return value & 0x7F; |
988 | } |
989 | |
990 | static void byt_set_pstate(struct cpudata *cpudata, int pstate) |
991 | @@ -385,7 +389,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) |
992 | u32 vid; |
993 | |
994 | val = pstate << 8; |
995 | - if (limits.no_turbo) |
996 | + if (limits.no_turbo && !limits.turbo_disabled) |
997 | val |= (u64)1 << 32; |
998 | |
999 | vid_fp = cpudata->vid.min + mul_fp( |
1000 | @@ -409,8 +413,8 @@ static void byt_get_vid(struct cpudata *cpudata) |
1001 | |
1002 | |
1003 | rdmsrl(BYT_VIDS, value); |
1004 | - cpudata->vid.min = int_tofp((value >> 8) & 0x3f); |
1005 | - cpudata->vid.max = int_tofp((value >> 16) & 0x3f); |
1006 | + cpudata->vid.min = int_tofp((value >> 8) & 0x7f); |
1007 | + cpudata->vid.max = int_tofp((value >> 16) & 0x7f); |
1008 | cpudata->vid.ratio = div_fp( |
1009 | cpudata->vid.max - cpudata->vid.min, |
1010 | int_tofp(cpudata->pstate.max_pstate - |
1011 | @@ -452,7 +456,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate) |
1012 | u64 val; |
1013 | |
1014 | val = pstate << 8; |
1015 | - if (limits.no_turbo) |
1016 | + if (limits.no_turbo && !limits.turbo_disabled) |
1017 | val |= (u64)1 << 32; |
1018 | |
1019 | wrmsrl(MSR_IA32_PERF_CTL, val); |
1020 | @@ -705,9 +709,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum) |
1021 | |
1022 | cpu = all_cpu_data[cpunum]; |
1023 | |
1024 | - intel_pstate_get_cpu_pstates(cpu); |
1025 | - |
1026 | cpu->cpu = cpunum; |
1027 | + intel_pstate_get_cpu_pstates(cpu); |
1028 | |
1029 | init_timer_deferrable(&cpu->timer); |
1030 | cpu->timer.function = intel_pstate_timer_func; |
1031 | @@ -750,7 +753,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) |
1032 | limits.min_perf = int_tofp(1); |
1033 | limits.max_perf_pct = 100; |
1034 | limits.max_perf = int_tofp(1); |
1035 | - limits.no_turbo = 0; |
1036 | + limits.no_turbo = limits.turbo_disabled; |
1037 | return 0; |
1038 | } |
1039 | limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; |
1040 | @@ -790,6 +793,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) |
1041 | { |
1042 | struct cpudata *cpu; |
1043 | int rc; |
1044 | + u64 misc_en; |
1045 | |
1046 | rc = intel_pstate_init_cpu(policy->cpu); |
1047 | if (rc) |
1048 | @@ -797,8 +801,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) |
1049 | |
1050 | cpu = all_cpu_data[policy->cpu]; |
1051 | |
1052 | - if (!limits.no_turbo && |
1053 | - limits.min_perf_pct == 100 && limits.max_perf_pct == 100) |
1054 | + rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); |
1055 | + if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || |
1056 | + cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) { |
1057 | + limits.turbo_disabled = 1; |
1058 | + limits.no_turbo = 1; |
1059 | + } |
1060 | + if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) |
1061 | policy->policy = CPUFREQ_POLICY_PERFORMANCE; |
1062 | else |
1063 | policy->policy = CPUFREQ_POLICY_POWERSAVE; |
1064 | diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c |
1065 | index 1d80bd3636c5..b512a4ba7569 100644 |
1066 | --- a/drivers/crypto/caam/jr.c |
1067 | +++ b/drivers/crypto/caam/jr.c |
1068 | @@ -453,8 +453,8 @@ static int caam_jr_probe(struct platform_device *pdev) |
1069 | int error; |
1070 | |
1071 | jrdev = &pdev->dev; |
1072 | - jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), |
1073 | - GFP_KERNEL); |
1074 | + jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr), |
1075 | + GFP_KERNEL); |
1076 | if (!jrpriv) |
1077 | return -ENOMEM; |
1078 | |
1079 | @@ -487,10 +487,8 @@ static int caam_jr_probe(struct platform_device *pdev) |
1080 | |
1081 | /* Now do the platform independent part */ |
1082 | error = caam_jr_init(jrdev); /* now turn on hardware */ |
1083 | - if (error) { |
1084 | - kfree(jrpriv); |
1085 | + if (error) |
1086 | return error; |
1087 | - } |
1088 | |
1089 | jrpriv->dev = jrdev; |
1090 | spin_lock(&driver_data.jr_alloc_lock); |
1091 | diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c |
1092 | index 28d24caa49f3..3c78b2268209 100644 |
1093 | --- a/drivers/gpu/drm/i915/i915_gem_stolen.c |
1094 | +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c |
1095 | @@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) |
1096 | if (base == 0) |
1097 | return 0; |
1098 | |
1099 | + /* make sure we don't clobber the GTT if it's within stolen memory */ |
1100 | + if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) { |
1101 | + struct { |
1102 | + u32 start, end; |
1103 | + } stolen[2] = { |
1104 | + { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
1105 | + { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
1106 | + }; |
1107 | + u64 gtt_start, gtt_end; |
1108 | + |
1109 | + gtt_start = I915_READ(PGTBL_CTL); |
1110 | + if (IS_GEN4(dev)) |
1111 | + gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) | |
1112 | + (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; |
1113 | + else |
1114 | + gtt_start &= PGTBL_ADDRESS_LO_MASK; |
1115 | + gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4; |
1116 | + |
1117 | + if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) |
1118 | + stolen[0].end = gtt_start; |
1119 | + if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end) |
1120 | + stolen[1].start = gtt_end; |
1121 | + |
1122 | + /* pick the larger of the two chunks */ |
1123 | + if (stolen[0].end - stolen[0].start > |
1124 | + stolen[1].end - stolen[1].start) { |
1125 | + base = stolen[0].start; |
1126 | + dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start; |
1127 | + } else { |
1128 | + base = stolen[1].start; |
1129 | + dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start; |
1130 | + } |
1131 | + |
1132 | + if (stolen[0].start != stolen[1].start || |
1133 | + stolen[0].end != stolen[1].end) { |
1134 | + DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", |
1135 | + (unsigned long long) gtt_start, |
1136 | + (unsigned long long) gtt_end - 1); |
1137 | + DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", |
1138 | + base, base + (u32) dev_priv->gtt.stolen_size - 1); |
1139 | + } |
1140 | + } |
1141 | + |
1142 | + |
1143 | /* Verify that nothing else uses this physical address. Stolen |
1144 | * memory should be reserved by the BIOS and hidden from the |
1145 | * kernel. So if the region is already marked as busy, something |
1146 | diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h |
1147 | index a48b7cad6f11..0a3b9386eb43 100644 |
1148 | --- a/drivers/gpu/drm/i915/i915_reg.h |
1149 | +++ b/drivers/gpu/drm/i915/i915_reg.h |
1150 | @@ -631,6 +631,9 @@ |
1151 | /* |
1152 | * Instruction and interrupt control regs |
1153 | */ |
1154 | +#define PGTBL_CTL 0x02020 |
1155 | +#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */ |
1156 | +#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */ |
1157 | #define PGTBL_ER 0x02024 |
1158 | #define RENDER_RING_BASE 0x02000 |
1159 | #define BSD_RING_BASE 0x04000 |
1160 | diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c |
1161 | index 9241e96f8502..5fa854c84d62 100644 |
1162 | --- a/drivers/gpu/drm/radeon/ci_dpm.c |
1163 | +++ b/drivers/gpu/drm/radeon/ci_dpm.c |
1164 | @@ -1161,7 +1161,7 @@ static int ci_stop_dpm(struct radeon_device *rdev) |
1165 | tmp &= ~GLOBAL_PWRMGT_EN; |
1166 | WREG32_SMC(GENERAL_PWRMGT, tmp); |
1167 | |
1168 | - tmp = RREG32(SCLK_PWRMGT_CNTL); |
1169 | + tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); |
1170 | tmp &= ~DYNAMIC_PM_EN; |
1171 | WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); |
1172 | |
1173 | diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c |
1174 | index 971d9339ce80..64108dbc7d45 100644 |
1175 | --- a/drivers/gpu/drm/radeon/evergreen.c |
1176 | +++ b/drivers/gpu/drm/radeon/evergreen.c |
1177 | @@ -189,7 +189,7 @@ static const u32 evergreen_golden_registers[] = |
1178 | 0x8c1c, 0xffffffff, 0x00001010, |
1179 | 0x28350, 0xffffffff, 0x00000000, |
1180 | 0xa008, 0xffffffff, 0x00010000, |
1181 | - 0x5cc, 0xffffffff, 0x00000001, |
1182 | + 0x5c4, 0xffffffff, 0x00000001, |
1183 | 0x9508, 0xffffffff, 0x00000002, |
1184 | 0x913c, 0x0000000f, 0x0000000a |
1185 | }; |
1186 | @@ -476,7 +476,7 @@ static const u32 cedar_golden_registers[] = |
1187 | 0x8c1c, 0xffffffff, 0x00001010, |
1188 | 0x28350, 0xffffffff, 0x00000000, |
1189 | 0xa008, 0xffffffff, 0x00010000, |
1190 | - 0x5cc, 0xffffffff, 0x00000001, |
1191 | + 0x5c4, 0xffffffff, 0x00000001, |
1192 | 0x9508, 0xffffffff, 0x00000002 |
1193 | }; |
1194 | |
1195 | @@ -635,7 +635,7 @@ static const u32 juniper_mgcg_init[] = |
1196 | static const u32 supersumo_golden_registers[] = |
1197 | { |
1198 | 0x5eb4, 0xffffffff, 0x00000002, |
1199 | - 0x5cc, 0xffffffff, 0x00000001, |
1200 | + 0x5c4, 0xffffffff, 0x00000001, |
1201 | 0x7030, 0xffffffff, 0x00000011, |
1202 | 0x7c30, 0xffffffff, 0x00000011, |
1203 | 0x6104, 0x01000300, 0x00000000, |
1204 | @@ -719,7 +719,7 @@ static const u32 sumo_golden_registers[] = |
1205 | static const u32 wrestler_golden_registers[] = |
1206 | { |
1207 | 0x5eb4, 0xffffffff, 0x00000002, |
1208 | - 0x5cc, 0xffffffff, 0x00000001, |
1209 | + 0x5c4, 0xffffffff, 0x00000001, |
1210 | 0x7030, 0xffffffff, 0x00000011, |
1211 | 0x7c30, 0xffffffff, 0x00000011, |
1212 | 0x6104, 0x01000300, 0x00000000, |
1213 | diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c |
1214 | index 72d3616de08e..95b693c11640 100644 |
1215 | --- a/drivers/gpu/drm/radeon/rs600.c |
1216 | +++ b/drivers/gpu/drm/radeon/rs600.c |
1217 | @@ -646,8 +646,10 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
1218 | return -EINVAL; |
1219 | } |
1220 | addr = addr & 0xFFFFFFFFFFFFF000ULL; |
1221 | - addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; |
1222 | - addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; |
1223 | + if (addr != rdev->dummy_page.addr) |
1224 | + addr |= R600_PTE_VALID | R600_PTE_READABLE | |
1225 | + R600_PTE_WRITEABLE; |
1226 | + addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED; |
1227 | writeq(addr, ptr + (i * 8)); |
1228 | return 0; |
1229 | } |
1230 | diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c |
1231 | index b5f63f5e22a3..8fcb932a3a55 100644 |
1232 | --- a/drivers/gpu/drm/radeon/rv770_dpm.c |
1233 | +++ b/drivers/gpu/drm/radeon/rv770_dpm.c |
1234 | @@ -2332,12 +2332,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev) |
1235 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
1236 | ASIC_INTERNAL_MEMORY_SS, 0); |
1237 | |
1238 | - /* disable ss, causes hangs on some cayman boards */ |
1239 | - if (rdev->family == CHIP_CAYMAN) { |
1240 | - pi->sclk_ss = false; |
1241 | - pi->mclk_ss = false; |
1242 | - } |
1243 | - |
1244 | if (pi->sclk_ss || pi->mclk_ss) |
1245 | pi->dynamic_ss = true; |
1246 | else |
1247 | diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c |
1248 | index 05827eccc53a..ce5a9f2584f3 100644 |
1249 | --- a/drivers/hv/connection.c |
1250 | +++ b/drivers/hv/connection.c |
1251 | @@ -319,9 +319,13 @@ static void process_chn_event(u32 relid) |
1252 | */ |
1253 | |
1254 | do { |
1255 | - hv_begin_read(&channel->inbound); |
1256 | + if (read_state) |
1257 | + hv_begin_read(&channel->inbound); |
1258 | channel->onchannel_callback(arg); |
1259 | - bytes_to_read = hv_end_read(&channel->inbound); |
1260 | + if (read_state) |
1261 | + bytes_to_read = hv_end_read(&channel->inbound); |
1262 | + else |
1263 | + bytes_to_read = 0; |
1264 | } while (read_state && (bytes_to_read != 0)); |
1265 | } else { |
1266 | pr_err("no channel callback for relid - %u\n", relid); |
1267 | diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c |
1268 | index 29dd9f746dfa..233b374334ed 100644 |
1269 | --- a/drivers/hwmon/adm1021.c |
1270 | +++ b/drivers/hwmon/adm1021.c |
1271 | @@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev, |
1272 | struct i2c_client *client = to_i2c_client(dev); |
1273 | struct adm1021_data *data = i2c_get_clientdata(client); |
1274 | long temp; |
1275 | - int err; |
1276 | + int reg_val, err; |
1277 | |
1278 | err = kstrtol(buf, 10, &temp); |
1279 | if (err) |
1280 | @@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev, |
1281 | temp /= 1000; |
1282 | |
1283 | mutex_lock(&data->update_lock); |
1284 | - data->temp_max[index] = clamp_val(temp, -128, 127); |
1285 | + reg_val = clamp_val(temp, -128, 127); |
1286 | + data->temp_max[index] = reg_val * 1000; |
1287 | if (!read_only) |
1288 | i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index), |
1289 | - data->temp_max[index]); |
1290 | + reg_val); |
1291 | mutex_unlock(&data->update_lock); |
1292 | |
1293 | return count; |
1294 | @@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev, |
1295 | struct i2c_client *client = to_i2c_client(dev); |
1296 | struct adm1021_data *data = i2c_get_clientdata(client); |
1297 | long temp; |
1298 | - int err; |
1299 | + int reg_val, err; |
1300 | |
1301 | err = kstrtol(buf, 10, &temp); |
1302 | if (err) |
1303 | @@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev, |
1304 | temp /= 1000; |
1305 | |
1306 | mutex_lock(&data->update_lock); |
1307 | - data->temp_min[index] = clamp_val(temp, -128, 127); |
1308 | + reg_val = clamp_val(temp, -128, 127); |
1309 | + data->temp_min[index] = reg_val * 1000; |
1310 | if (!read_only) |
1311 | i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index), |
1312 | - data->temp_min[index]); |
1313 | + reg_val); |
1314 | mutex_unlock(&data->update_lock); |
1315 | |
1316 | return count; |
1317 | diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c |
1318 | index d19c790e410a..e38115ce0350 100644 |
1319 | --- a/drivers/hwmon/adm1029.c |
1320 | +++ b/drivers/hwmon/adm1029.c |
1321 | @@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev, |
1322 | /* Update the value */ |
1323 | reg = (reg & 0x3F) | (val << 6); |
1324 | |
1325 | + /* Update the cache */ |
1326 | + data->fan_div[attr->index] = reg; |
1327 | + |
1328 | /* Write value */ |
1329 | i2c_smbus_write_byte_data(client, |
1330 | ADM1029_REG_FAN_DIV[attr->index], reg); |
1331 | diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c |
1332 | index a8a540ca8c34..51c1a5a165ab 100644 |
1333 | --- a/drivers/hwmon/adm1031.c |
1334 | +++ b/drivers/hwmon/adm1031.c |
1335 | @@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr, |
1336 | if (ret) |
1337 | return ret; |
1338 | |
1339 | + val = clamp_val(val, 0, 127000); |
1340 | mutex_lock(&data->update_lock); |
1341 | data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]); |
1342 | adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), |
1343 | @@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr, |
1344 | if (ret) |
1345 | return ret; |
1346 | |
1347 | + val = clamp_val(val, 0, 127000); |
1348 | mutex_lock(&data->update_lock); |
1349 | data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], |
1350 | data->pwm[nr]); |
1351 | @@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, |
1352 | if (ret) |
1353 | return ret; |
1354 | |
1355 | - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); |
1356 | + val = clamp_val(val, -55000, 127000); |
1357 | mutex_lock(&data->update_lock); |
1358 | data->temp_min[nr] = TEMP_TO_REG(val); |
1359 | adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr), |
1360 | @@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, |
1361 | if (ret) |
1362 | return ret; |
1363 | |
1364 | - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); |
1365 | + val = clamp_val(val, -55000, 127000); |
1366 | mutex_lock(&data->update_lock); |
1367 | data->temp_max[nr] = TEMP_TO_REG(val); |
1368 | adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr), |
1369 | @@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr, |
1370 | if (ret) |
1371 | return ret; |
1372 | |
1373 | - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); |
1374 | + val = clamp_val(val, -55000, 127000); |
1375 | mutex_lock(&data->update_lock); |
1376 | data->temp_crit[nr] = TEMP_TO_REG(val); |
1377 | adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr), |
1378 | diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c |
1379 | index eea817296513..9f2be3dd28f3 100644 |
1380 | --- a/drivers/hwmon/amc6821.c |
1381 | +++ b/drivers/hwmon/amc6821.c |
1382 | @@ -704,7 +704,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, |
1383 | get_temp_alarm, NULL, IDX_TEMP1_MAX); |
1384 | static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, |
1385 | get_temp_alarm, NULL, IDX_TEMP1_CRIT); |
1386 | -static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR, |
1387 | +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, |
1388 | get_temp, NULL, IDX_TEMP2_INPUT); |
1389 | static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp, |
1390 | set_temp, IDX_TEMP2_MIN); |
1391 | diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c |
1392 | index 2c137b26acb4..5790246a7e1d 100644 |
1393 | --- a/drivers/hwmon/emc2103.c |
1394 | +++ b/drivers/hwmon/emc2103.c |
1395 | @@ -250,9 +250,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da, |
1396 | if (result < 0) |
1397 | return result; |
1398 | |
1399 | - val = DIV_ROUND_CLOSEST(val, 1000); |
1400 | - if ((val < -63) || (val > 127)) |
1401 | - return -EINVAL; |
1402 | + val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127); |
1403 | |
1404 | mutex_lock(&data->update_lock); |
1405 | data->temp_min[nr] = val; |
1406 | @@ -274,9 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da, |
1407 | if (result < 0) |
1408 | return result; |
1409 | |
1410 | - val = DIV_ROUND_CLOSEST(val, 1000); |
1411 | - if ((val < -63) || (val > 127)) |
1412 | - return -EINVAL; |
1413 | + val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127); |
1414 | |
1415 | mutex_lock(&data->update_lock); |
1416 | data->temp_max[nr] = val; |
1417 | @@ -390,15 +386,14 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da, |
1418 | { |
1419 | struct emc2103_data *data = emc2103_update_device(dev); |
1420 | struct i2c_client *client = to_i2c_client(dev); |
1421 | - long rpm_target; |
1422 | + unsigned long rpm_target; |
1423 | |
1424 | - int result = kstrtol(buf, 10, &rpm_target); |
1425 | + int result = kstrtoul(buf, 10, &rpm_target); |
1426 | if (result < 0) |
1427 | return result; |
1428 | |
1429 | /* Datasheet states 16384 as maximum RPM target (table 3.2) */ |
1430 | - if ((rpm_target < 0) || (rpm_target > 16384)) |
1431 | - return -EINVAL; |
1432 | + rpm_target = clamp_val(rpm_target, 0, 16384); |
1433 | |
1434 | mutex_lock(&data->update_lock); |
1435 | |
1436 | diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c |
1437 | index 31e786e3999b..63b2bb6bdbbc 100644 |
1438 | --- a/drivers/iio/adc/ti_am335x_adc.c |
1439 | +++ b/drivers/iio/adc/ti_am335x_adc.c |
1440 | @@ -375,7 +375,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, |
1441 | return -EAGAIN; |
1442 | } |
1443 | } |
1444 | - map_val = chan->channel + TOTAL_CHANNELS; |
1445 | + map_val = adc_dev->channel_step[chan->scan_index]; |
1446 | |
1447 | /* |
1448 | * We check the complete FIFO. We programmed just one entry but in case |
1449 | diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c |
1450 | index 3842ac738f98..db404a0f7e2c 100644 |
1451 | --- a/drivers/md/dm-io.c |
1452 | +++ b/drivers/md/dm-io.c |
1453 | @@ -10,6 +10,7 @@ |
1454 | #include <linux/device-mapper.h> |
1455 | |
1456 | #include <linux/bio.h> |
1457 | +#include <linux/completion.h> |
1458 | #include <linux/mempool.h> |
1459 | #include <linux/module.h> |
1460 | #include <linux/sched.h> |
1461 | @@ -32,7 +33,7 @@ struct dm_io_client { |
1462 | struct io { |
1463 | unsigned long error_bits; |
1464 | atomic_t count; |
1465 | - struct task_struct *sleeper; |
1466 | + struct completion *wait; |
1467 | struct dm_io_client *client; |
1468 | io_notify_fn callback; |
1469 | void *context; |
1470 | @@ -121,8 +122,8 @@ static void dec_count(struct io *io, unsigned int region, int error) |
1471 | invalidate_kernel_vmap_range(io->vma_invalidate_address, |
1472 | io->vma_invalidate_size); |
1473 | |
1474 | - if (io->sleeper) |
1475 | - wake_up_process(io->sleeper); |
1476 | + if (io->wait) |
1477 | + complete(io->wait); |
1478 | |
1479 | else { |
1480 | unsigned long r = io->error_bits; |
1481 | @@ -387,6 +388,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
1482 | */ |
1483 | volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; |
1484 | struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); |
1485 | + DECLARE_COMPLETION_ONSTACK(wait); |
1486 | |
1487 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
1488 | WARN_ON(1); |
1489 | @@ -395,7 +397,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
1490 | |
1491 | io->error_bits = 0; |
1492 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
1493 | - io->sleeper = current; |
1494 | + io->wait = &wait; |
1495 | io->client = client; |
1496 | |
1497 | io->vma_invalidate_address = dp->vma_invalidate_address; |
1498 | @@ -403,15 +405,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
1499 | |
1500 | dispatch_io(rw, num_regions, where, dp, io, 1); |
1501 | |
1502 | - while (1) { |
1503 | - set_current_state(TASK_UNINTERRUPTIBLE); |
1504 | - |
1505 | - if (!atomic_read(&io->count)) |
1506 | - break; |
1507 | - |
1508 | - io_schedule(); |
1509 | - } |
1510 | - set_current_state(TASK_RUNNING); |
1511 | + wait_for_completion_io(&wait); |
1512 | |
1513 | if (error_bits) |
1514 | *error_bits = io->error_bits; |
1515 | @@ -434,7 +428,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, |
1516 | io = mempool_alloc(client->pool, GFP_NOIO); |
1517 | io->error_bits = 0; |
1518 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
1519 | - io->sleeper = NULL; |
1520 | + io->wait = NULL; |
1521 | io->client = client; |
1522 | io->callback = fn; |
1523 | io->context = context; |
1524 | diff --git a/drivers/md/dm.c b/drivers/md/dm.c |
1525 | index 8c53b09b9a2c..65ee3a0d4683 100644 |
1526 | --- a/drivers/md/dm.c |
1527 | +++ b/drivers/md/dm.c |
1528 | @@ -54,6 +54,8 @@ static void do_deferred_remove(struct work_struct *w); |
1529 | |
1530 | static DECLARE_WORK(deferred_remove_work, do_deferred_remove); |
1531 | |
1532 | +static struct workqueue_struct *deferred_remove_workqueue; |
1533 | + |
1534 | /* |
1535 | * For bio-based dm. |
1536 | * One of these is allocated per bio. |
1537 | @@ -283,16 +285,24 @@ static int __init local_init(void) |
1538 | if (r) |
1539 | goto out_free_rq_tio_cache; |
1540 | |
1541 | + deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); |
1542 | + if (!deferred_remove_workqueue) { |
1543 | + r = -ENOMEM; |
1544 | + goto out_uevent_exit; |
1545 | + } |
1546 | + |
1547 | _major = major; |
1548 | r = register_blkdev(_major, _name); |
1549 | if (r < 0) |
1550 | - goto out_uevent_exit; |
1551 | + goto out_free_workqueue; |
1552 | |
1553 | if (!_major) |
1554 | _major = r; |
1555 | |
1556 | return 0; |
1557 | |
1558 | +out_free_workqueue: |
1559 | + destroy_workqueue(deferred_remove_workqueue); |
1560 | out_uevent_exit: |
1561 | dm_uevent_exit(); |
1562 | out_free_rq_tio_cache: |
1563 | @@ -306,6 +316,7 @@ out_free_io_cache: |
1564 | static void local_exit(void) |
1565 | { |
1566 | flush_scheduled_work(); |
1567 | + destroy_workqueue(deferred_remove_workqueue); |
1568 | |
1569 | kmem_cache_destroy(_rq_tio_cache); |
1570 | kmem_cache_destroy(_io_cache); |
1571 | @@ -414,7 +425,7 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode) |
1572 | |
1573 | if (atomic_dec_and_test(&md->open_count) && |
1574 | (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) |
1575 | - schedule_work(&deferred_remove_work); |
1576 | + queue_work(deferred_remove_workqueue, &deferred_remove_work); |
1577 | |
1578 | dm_put(md); |
1579 | |
1580 | diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c |
1581 | index ed1b6db25b03..be36adf33ab0 100644 |
1582 | --- a/drivers/pci/pci.c |
1583 | +++ b/drivers/pci/pci.c |
1584 | @@ -3085,8 +3085,13 @@ static int pci_af_flr(struct pci_dev *dev, int probe) |
1585 | if (probe) |
1586 | return 0; |
1587 | |
1588 | - /* Wait for Transaction Pending bit clean */ |
1589 | - if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP)) |
1590 | + /* |
1591 | + * Wait for Transaction Pending bit to clear. A word-aligned test |
1592 | + * is used, so we use the conrol offset rather than status and shift |
1593 | + * the test bit to match. |
1594 | + */ |
1595 | + if (pci_wait_for_pending(dev, pos + PCI_AF_CTRL, |
1596 | + PCI_AF_STATUS_TP << 8)) |
1597 | goto clear; |
1598 | |
1599 | dev_err(&dev->dev, "transaction is not cleared; " |
1600 | diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c |
1601 | index 6c738376daff..34d56f7864d6 100644 |
1602 | --- a/drivers/phy/phy-core.c |
1603 | +++ b/drivers/phy/phy-core.c |
1604 | @@ -553,8 +553,9 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, |
1605 | return phy; |
1606 | |
1607 | put_dev: |
1608 | - put_device(&phy->dev); |
1609 | - ida_remove(&phy_ida, phy->id); |
1610 | + put_device(&phy->dev); /* calls phy_release() which frees resources */ |
1611 | + return ERR_PTR(ret); |
1612 | + |
1613 | free_phy: |
1614 | kfree(phy); |
1615 | return ERR_PTR(ret); |
1616 | @@ -738,7 +739,7 @@ static void phy_release(struct device *dev) |
1617 | |
1618 | phy = to_phy(dev); |
1619 | dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); |
1620 | - ida_remove(&phy_ida, phy->id); |
1621 | + ida_simple_remove(&phy_ida, phy->id); |
1622 | kfree(phy); |
1623 | } |
1624 | |
1625 | diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c |
1626 | index 1ecfe3bd92ac..1cff2a21db67 100644 |
1627 | --- a/drivers/rtc/rtc-puv3.c |
1628 | +++ b/drivers/rtc/rtc-puv3.c |
1629 | @@ -71,7 +71,7 @@ static int puv3_rtc_setpie(struct device *dev, int enabled) |
1630 | { |
1631 | unsigned int tmp; |
1632 | |
1633 | - dev_debug(dev, "%s: pie=%d\n", __func__, enabled); |
1634 | + dev_dbg(dev, "%s: pie=%d\n", __func__, enabled); |
1635 | |
1636 | spin_lock_irq(&puv3_rtc_pie_lock); |
1637 | tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE; |
1638 | @@ -140,7 +140,7 @@ static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) |
1639 | rtc_tm_to_time(tm, &rtcalarm_count); |
1640 | writel(rtcalarm_count, RTC_RTAR); |
1641 | |
1642 | - puv3_rtc_setaie(&dev->dev, alrm->enabled); |
1643 | + puv3_rtc_setaie(dev, alrm->enabled); |
1644 | |
1645 | if (alrm->enabled) |
1646 | enable_irq_wake(puv3_rtc_alarmno); |
1647 | diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c |
1648 | index fdb07199d9c2..1967bee4f076 100644 |
1649 | --- a/drivers/thermal/thermal_hwmon.c |
1650 | +++ b/drivers/thermal/thermal_hwmon.c |
1651 | @@ -140,6 +140,12 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon, |
1652 | return NULL; |
1653 | } |
1654 | |
1655 | +static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz) |
1656 | +{ |
1657 | + unsigned long temp; |
1658 | + return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp); |
1659 | +} |
1660 | + |
1661 | int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) |
1662 | { |
1663 | struct thermal_hwmon_device *hwmon; |
1664 | @@ -189,21 +195,18 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) |
1665 | if (result) |
1666 | goto free_temp_mem; |
1667 | |
1668 | - if (tz->ops->get_crit_temp) { |
1669 | - unsigned long temperature; |
1670 | - if (!tz->ops->get_crit_temp(tz, &temperature)) { |
1671 | - snprintf(temp->temp_crit.name, |
1672 | - sizeof(temp->temp_crit.name), |
1673 | + if (thermal_zone_crit_temp_valid(tz)) { |
1674 | + snprintf(temp->temp_crit.name, |
1675 | + sizeof(temp->temp_crit.name), |
1676 | "temp%d_crit", hwmon->count); |
1677 | - temp->temp_crit.attr.attr.name = temp->temp_crit.name; |
1678 | - temp->temp_crit.attr.attr.mode = 0444; |
1679 | - temp->temp_crit.attr.show = temp_crit_show; |
1680 | - sysfs_attr_init(&temp->temp_crit.attr.attr); |
1681 | - result = device_create_file(hwmon->device, |
1682 | - &temp->temp_crit.attr); |
1683 | - if (result) |
1684 | - goto unregister_input; |
1685 | - } |
1686 | + temp->temp_crit.attr.attr.name = temp->temp_crit.name; |
1687 | + temp->temp_crit.attr.attr.mode = 0444; |
1688 | + temp->temp_crit.attr.show = temp_crit_show; |
1689 | + sysfs_attr_init(&temp->temp_crit.attr.attr); |
1690 | + result = device_create_file(hwmon->device, |
1691 | + &temp->temp_crit.attr); |
1692 | + if (result) |
1693 | + goto unregister_input; |
1694 | } |
1695 | |
1696 | mutex_lock(&thermal_hwmon_list_lock); |
1697 | @@ -250,7 +253,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) |
1698 | } |
1699 | |
1700 | device_remove_file(hwmon->device, &temp->temp_input.attr); |
1701 | - if (tz->ops->get_crit_temp) |
1702 | + if (thermal_zone_crit_temp_valid(tz)) |
1703 | device_remove_file(hwmon->device, &temp->temp_crit.attr); |
1704 | |
1705 | mutex_lock(&thermal_hwmon_list_lock); |
1706 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
1707 | index 762e4a5f5ae9..330df5ce435b 100644 |
1708 | --- a/drivers/usb/serial/cp210x.c |
1709 | +++ b/drivers/usb/serial/cp210x.c |
1710 | @@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = { |
1711 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ |
1712 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ |
1713 | { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ |
1714 | + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ |
1715 | { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ |
1716 | { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ |
1717 | { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ |
1718 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
1719 | index 115662c16dcc..8a3813be1b28 100644 |
1720 | --- a/drivers/usb/serial/ftdi_sio.c |
1721 | +++ b/drivers/usb/serial/ftdi_sio.c |
1722 | @@ -720,7 +720,8 @@ static const struct usb_device_id id_table_combined[] = { |
1723 | { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, |
1724 | { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, |
1725 | { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, |
1726 | - { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) }, |
1727 | + { USB_DEVICE(TESTO_VID, TESTO_1_PID) }, |
1728 | + { USB_DEVICE(TESTO_VID, TESTO_3_PID) }, |
1729 | { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) }, |
1730 | { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) }, |
1731 | { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, |
1732 | @@ -944,6 +945,8 @@ static const struct usb_device_id id_table_combined[] = { |
1733 | { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, |
1734 | { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, |
1735 | { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, |
1736 | + /* Infineon Devices */ |
1737 | + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, |
1738 | { } /* Terminating entry */ |
1739 | }; |
1740 | |
1741 | diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h |
1742 | index 500474c48f4b..c4777bc6aee0 100644 |
1743 | --- a/drivers/usb/serial/ftdi_sio_ids.h |
1744 | +++ b/drivers/usb/serial/ftdi_sio_ids.h |
1745 | @@ -584,6 +584,12 @@ |
1746 | #define RATOC_PRODUCT_ID_USB60F 0xb020 |
1747 | |
1748 | /* |
1749 | + * Infineon Technologies |
1750 | + */ |
1751 | +#define INFINEON_VID 0x058b |
1752 | +#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ |
1753 | + |
1754 | +/* |
1755 | * Acton Research Corp. |
1756 | */ |
1757 | #define ACTON_VID 0x0647 /* Vendor ID */ |
1758 | @@ -798,7 +804,8 @@ |
1759 | * Submitted by Colin Leroy |
1760 | */ |
1761 | #define TESTO_VID 0x128D |
1762 | -#define TESTO_USB_INTERFACE_PID 0x0001 |
1763 | +#define TESTO_1_PID 0x0001 |
1764 | +#define TESTO_3_PID 0x0003 |
1765 | |
1766 | /* |
1767 | * Mobility Electronics products. |
1768 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
1769 | index e25e8ca09fe2..9da566a3f5c8 100644 |
1770 | --- a/drivers/usb/serial/option.c |
1771 | +++ b/drivers/usb/serial/option.c |
1772 | @@ -1487,6 +1487,8 @@ static const struct usb_device_id option_ids[] = { |
1773 | .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, |
1774 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ |
1775 | .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, |
1776 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ |
1777 | + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, |
1778 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, |
1779 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, |
1780 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, |
1781 | diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c |
1782 | index 3981ff783950..171b9fa0f27a 100644 |
1783 | --- a/fs/ext4/extents_status.c |
1784 | +++ b/fs/ext4/extents_status.c |
1785 | @@ -962,10 +962,10 @@ retry: |
1786 | continue; |
1787 | } |
1788 | |
1789 | - if (ei->i_es_lru_nr == 0 || ei == locked_ei) |
1790 | + if (ei->i_es_lru_nr == 0 || ei == locked_ei || |
1791 | + !write_trylock(&ei->i_es_lock)) |
1792 | continue; |
1793 | |
1794 | - write_lock(&ei->i_es_lock); |
1795 | shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan); |
1796 | if (ei->i_es_lru_nr == 0) |
1797 | list_del_init(&ei->i_es_lru); |
1798 | diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c |
1799 | index 0ee59a6644e2..64bb32f17903 100644 |
1800 | --- a/fs/ext4/ialloc.c |
1801 | +++ b/fs/ext4/ialloc.c |
1802 | @@ -851,6 +851,13 @@ got: |
1803 | goto out; |
1804 | } |
1805 | |
1806 | + BUFFER_TRACE(group_desc_bh, "get_write_access"); |
1807 | + err = ext4_journal_get_write_access(handle, group_desc_bh); |
1808 | + if (err) { |
1809 | + ext4_std_error(sb, err); |
1810 | + goto out; |
1811 | + } |
1812 | + |
1813 | /* We may have to initialize the block bitmap if it isn't already */ |
1814 | if (ext4_has_group_desc_csum(sb) && |
1815 | gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
1816 | @@ -887,13 +894,6 @@ got: |
1817 | } |
1818 | } |
1819 | |
1820 | - BUFFER_TRACE(group_desc_bh, "get_write_access"); |
1821 | - err = ext4_journal_get_write_access(handle, group_desc_bh); |
1822 | - if (err) { |
1823 | - ext4_std_error(sb, err); |
1824 | - goto out; |
1825 | - } |
1826 | - |
1827 | /* Update the relevant bg descriptor fields */ |
1828 | if (ext4_has_group_desc_csum(sb)) { |
1829 | int free; |
1830 | diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c |
1831 | index 08ddfdac955c..502f0fd71470 100644 |
1832 | --- a/fs/ext4/mballoc.c |
1833 | +++ b/fs/ext4/mballoc.c |
1834 | @@ -751,8 +751,8 @@ void ext4_mb_generate_buddy(struct super_block *sb, |
1835 | |
1836 | if (free != grp->bb_free) { |
1837 | ext4_grp_locked_error(sb, group, 0, 0, |
1838 | - "%u clusters in bitmap, %u in gd; " |
1839 | - "block bitmap corrupt.", |
1840 | + "block bitmap and bg descriptor " |
1841 | + "inconsistent: %u vs %u free clusters", |
1842 | free, grp->bb_free); |
1843 | /* |
1844 | * If we intend to continue, we consider group descriptor |
1845 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
1846 | index 710fed2377d4..25b327e87318 100644 |
1847 | --- a/fs/ext4/super.c |
1848 | +++ b/fs/ext4/super.c |
1849 | @@ -1519,8 +1519,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, |
1850 | arg = JBD2_DEFAULT_MAX_COMMIT_AGE; |
1851 | sbi->s_commit_interval = HZ * arg; |
1852 | } else if (token == Opt_max_batch_time) { |
1853 | - if (arg == 0) |
1854 | - arg = EXT4_DEF_MAX_BATCH_TIME; |
1855 | sbi->s_max_batch_time = arg; |
1856 | } else if (token == Opt_min_batch_time) { |
1857 | sbi->s_min_batch_time = arg; |
1858 | @@ -2793,10 +2791,11 @@ static void print_daily_error_info(unsigned long arg) |
1859 | es = sbi->s_es; |
1860 | |
1861 | if (es->s_error_count) |
1862 | - ext4_msg(sb, KERN_NOTICE, "error count: %u", |
1863 | + /* fsck newer than v1.41.13 is needed to clean this condition. */ |
1864 | + ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", |
1865 | le32_to_cpu(es->s_error_count)); |
1866 | if (es->s_first_error_time) { |
1867 | - printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d", |
1868 | + printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d", |
1869 | sb->s_id, le32_to_cpu(es->s_first_error_time), |
1870 | (int) sizeof(es->s_first_error_func), |
1871 | es->s_first_error_func, |
1872 | @@ -2810,7 +2809,7 @@ static void print_daily_error_info(unsigned long arg) |
1873 | printk("\n"); |
1874 | } |
1875 | if (es->s_last_error_time) { |
1876 | - printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d", |
1877 | + printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d", |
1878 | sb->s_id, le32_to_cpu(es->s_last_error_time), |
1879 | (int) sizeof(es->s_last_error_func), |
1880 | es->s_last_error_func, |
1881 | diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c |
1882 | index 60bb365f54a5..f8a5d6a166fb 100644 |
1883 | --- a/fs/jbd2/transaction.c |
1884 | +++ b/fs/jbd2/transaction.c |
1885 | @@ -1590,9 +1590,12 @@ int jbd2_journal_stop(handle_t *handle) |
1886 | * to perform a synchronous write. We do this to detect the |
1887 | * case where a single process is doing a stream of sync |
1888 | * writes. No point in waiting for joiners in that case. |
1889 | + * |
1890 | + * Setting max_batch_time to 0 disables this completely. |
1891 | */ |
1892 | pid = current->pid; |
1893 | - if (handle->h_sync && journal->j_last_sync_writer != pid) { |
1894 | + if (handle->h_sync && journal->j_last_sync_writer != pid && |
1895 | + journal->j_max_batch_time) { |
1896 | u64 commit_time, trans_time; |
1897 | |
1898 | journal->j_last_sync_writer = pid; |
1899 | diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h |
1900 | index d69cf637a15a..49a4d6f59108 100644 |
1901 | --- a/include/linux/ring_buffer.h |
1902 | +++ b/include/linux/ring_buffer.h |
1903 | @@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k |
1904 | __ring_buffer_alloc((size), (flags), &__key); \ |
1905 | }) |
1906 | |
1907 | -void ring_buffer_wait(struct ring_buffer *buffer, int cpu); |
1908 | +int ring_buffer_wait(struct ring_buffer *buffer, int cpu); |
1909 | int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, |
1910 | struct file *filp, poll_table *poll_table); |
1911 | |
1912 | diff --git a/kernel/cpuset.c b/kernel/cpuset.c |
1913 | index e6b1b66afe52..6b27e5c0cd86 100644 |
1914 | --- a/kernel/cpuset.c |
1915 | +++ b/kernel/cpuset.c |
1916 | @@ -1236,7 +1236,13 @@ done: |
1917 | |
1918 | int current_cpuset_is_being_rebound(void) |
1919 | { |
1920 | - return task_cs(current) == cpuset_being_rebound; |
1921 | + int ret; |
1922 | + |
1923 | + rcu_read_lock(); |
1924 | + ret = task_cs(current) == cpuset_being_rebound; |
1925 | + rcu_read_unlock(); |
1926 | + |
1927 | + return ret; |
1928 | } |
1929 | |
1930 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
1931 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c |
1932 | index fc4da2d97f9b..04202d9aa514 100644 |
1933 | --- a/kernel/trace/ring_buffer.c |
1934 | +++ b/kernel/trace/ring_buffer.c |
1935 | @@ -543,7 +543,7 @@ static void rb_wake_up_waiters(struct irq_work *work) |
1936 | * as data is added to any of the @buffer's cpu buffers. Otherwise |
1937 | * it will wait for data to be added to a specific cpu buffer. |
1938 | */ |
1939 | -void ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
1940 | +int ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
1941 | { |
1942 | struct ring_buffer_per_cpu *cpu_buffer; |
1943 | DEFINE_WAIT(wait); |
1944 | @@ -557,6 +557,8 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
1945 | if (cpu == RING_BUFFER_ALL_CPUS) |
1946 | work = &buffer->irq_work; |
1947 | else { |
1948 | + if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1949 | + return -ENODEV; |
1950 | cpu_buffer = buffer->buffers[cpu]; |
1951 | work = &cpu_buffer->irq_work; |
1952 | } |
1953 | @@ -591,6 +593,7 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
1954 | schedule(); |
1955 | |
1956 | finish_wait(&work->waiters, &wait); |
1957 | + return 0; |
1958 | } |
1959 | |
1960 | /** |
1961 | diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
1962 | index fd21e601a891..922657f30723 100644 |
1963 | --- a/kernel/trace/trace.c |
1964 | +++ b/kernel/trace/trace.c |
1965 | @@ -1091,13 +1091,13 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) |
1966 | } |
1967 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
1968 | |
1969 | -static void default_wait_pipe(struct trace_iterator *iter) |
1970 | +static int default_wait_pipe(struct trace_iterator *iter) |
1971 | { |
1972 | /* Iterators are static, they should be filled or empty */ |
1973 | if (trace_buffer_iter(iter, iter->cpu_file)) |
1974 | - return; |
1975 | + return 0; |
1976 | |
1977 | - ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); |
1978 | + return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); |
1979 | } |
1980 | |
1981 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
1982 | @@ -4160,17 +4160,19 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) |
1983 | * |
1984 | * Anyway, this is really very primitive wakeup. |
1985 | */ |
1986 | -void poll_wait_pipe(struct trace_iterator *iter) |
1987 | +int poll_wait_pipe(struct trace_iterator *iter) |
1988 | { |
1989 | set_current_state(TASK_INTERRUPTIBLE); |
1990 | /* sleep for 100 msecs, and try again. */ |
1991 | schedule_timeout(HZ / 10); |
1992 | + return 0; |
1993 | } |
1994 | |
1995 | /* Must be called with trace_types_lock mutex held. */ |
1996 | static int tracing_wait_pipe(struct file *filp) |
1997 | { |
1998 | struct trace_iterator *iter = filp->private_data; |
1999 | + int ret; |
2000 | |
2001 | while (trace_empty(iter)) { |
2002 | |
2003 | @@ -4180,10 +4182,13 @@ static int tracing_wait_pipe(struct file *filp) |
2004 | |
2005 | mutex_unlock(&iter->mutex); |
2006 | |
2007 | - iter->trace->wait_pipe(iter); |
2008 | + ret = iter->trace->wait_pipe(iter); |
2009 | |
2010 | mutex_lock(&iter->mutex); |
2011 | |
2012 | + if (ret) |
2013 | + return ret; |
2014 | + |
2015 | if (signal_pending(current)) |
2016 | return -EINTR; |
2017 | |
2018 | @@ -5111,8 +5116,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, |
2019 | goto out_unlock; |
2020 | } |
2021 | mutex_unlock(&trace_types_lock); |
2022 | - iter->trace->wait_pipe(iter); |
2023 | + ret = iter->trace->wait_pipe(iter); |
2024 | mutex_lock(&trace_types_lock); |
2025 | + if (ret) { |
2026 | + size = ret; |
2027 | + goto out_unlock; |
2028 | + } |
2029 | if (signal_pending(current)) { |
2030 | size = -EINTR; |
2031 | goto out_unlock; |
2032 | @@ -5324,8 +5333,10 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, |
2033 | goto out; |
2034 | } |
2035 | mutex_unlock(&trace_types_lock); |
2036 | - iter->trace->wait_pipe(iter); |
2037 | + ret = iter->trace->wait_pipe(iter); |
2038 | mutex_lock(&trace_types_lock); |
2039 | + if (ret) |
2040 | + goto out; |
2041 | if (signal_pending(current)) { |
2042 | ret = -EINTR; |
2043 | goto out; |
2044 | diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h |
2045 | index 02b592f2d4b7..c8bd809cbd1c 100644 |
2046 | --- a/kernel/trace/trace.h |
2047 | +++ b/kernel/trace/trace.h |
2048 | @@ -336,7 +336,7 @@ struct tracer { |
2049 | void (*stop)(struct trace_array *tr); |
2050 | void (*open)(struct trace_iterator *iter); |
2051 | void (*pipe_open)(struct trace_iterator *iter); |
2052 | - void (*wait_pipe)(struct trace_iterator *iter); |
2053 | + int (*wait_pipe)(struct trace_iterator *iter); |
2054 | void (*close)(struct trace_iterator *iter); |
2055 | void (*pipe_close)(struct trace_iterator *iter); |
2056 | ssize_t (*read)(struct trace_iterator *iter, |
2057 | @@ -552,7 +552,7 @@ void trace_init_global_iter(struct trace_iterator *iter); |
2058 | |
2059 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
2060 | |
2061 | -void poll_wait_pipe(struct trace_iterator *iter); |
2062 | +int poll_wait_pipe(struct trace_iterator *iter); |
2063 | |
2064 | void tracing_sched_switch_trace(struct trace_array *tr, |
2065 | struct task_struct *prev, |
2066 | diff --git a/kernel/workqueue.c b/kernel/workqueue.c |
2067 | index b6a394108e3b..b4defdecec8a 100644 |
2068 | --- a/kernel/workqueue.c |
2069 | +++ b/kernel/workqueue.c |
2070 | @@ -3415,6 +3415,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) |
2071 | } |
2072 | } |
2073 | |
2074 | + dev_set_uevent_suppress(&wq_dev->dev, false); |
2075 | kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); |
2076 | return 0; |
2077 | } |
2078 | @@ -5026,7 +5027,7 @@ static void __init wq_numa_init(void) |
2079 | BUG_ON(!tbl); |
2080 | |
2081 | for_each_node(node) |
2082 | - BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, |
2083 | + BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL, |
2084 | node_online(node) ? node : NUMA_NO_NODE)); |
2085 | |
2086 | for_each_possible_cpu(cpu) { |
2087 | diff --git a/mm/Kconfig b/mm/Kconfig |
2088 | index 9b63c1584a42..0862816bb455 100644 |
2089 | --- a/mm/Kconfig |
2090 | +++ b/mm/Kconfig |
2091 | @@ -580,3 +580,18 @@ config PGTABLE_MAPPING |
2092 | |
2093 | You can check speed with zsmalloc benchmark: |
2094 | https://github.com/spartacus06/zsmapbench |
2095 | + |
2096 | +config MAX_STACK_SIZE_MB |
2097 | + int "Maximum user stack size for 32-bit processes (MB)" |
2098 | + default 80 |
2099 | + range 8 256 if METAG |
2100 | + range 8 2048 |
2101 | + depends on STACK_GROWSUP && (!64BIT || COMPAT) |
2102 | + help |
2103 | + This is the maximum stack size in Megabytes in the VM layout of 32-bit |
2104 | + user processes when the stack grows upwards (currently only on parisc |
2105 | + and metag arch). The stack will be located at the highest memory |
2106 | + address minus the given value, unless the RLIMIT_STACK hard limit is |
2107 | + changed to a smaller value in which case that is used. |
2108 | + |
2109 | + A sane initial value is 80 MB. |
2110 | diff --git a/mm/mempolicy.c b/mm/mempolicy.c |
2111 | index 9c6288aea4f9..15a8ea031526 100644 |
2112 | --- a/mm/mempolicy.c |
2113 | +++ b/mm/mempolicy.c |
2114 | @@ -2170,7 +2170,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) |
2115 | } else |
2116 | *new = *old; |
2117 | |
2118 | - rcu_read_lock(); |
2119 | if (current_cpuset_is_being_rebound()) { |
2120 | nodemask_t mems = cpuset_mems_allowed(current); |
2121 | if (new->flags & MPOL_F_REBINDING) |
2122 | @@ -2178,7 +2177,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) |
2123 | else |
2124 | mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); |
2125 | } |
2126 | - rcu_read_unlock(); |
2127 | atomic_set(&new->refcnt, 1); |
2128 | return new; |
2129 | } |