Contents of /trunk/kernel-alx/patches-3.12/0118-3.12.19-all-fixes.patch
Parent Directory | Revision Log
Revision 2433 -
(show annotations)
(download)
Tue May 13 11:02:53 2014 UTC (10 years, 6 months ago) by niro
File size: 57798 byte(s)
Tue May 13 11:02:53 2014 UTC (10 years, 6 months ago) by niro
File size: 57798 byte(s)
-linux-3.12.19
1 | diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 |
2 | index 7b0dcdb57173..babe2ef16139 100644 |
3 | --- a/Documentation/i2c/busses/i2c-i801 |
4 | +++ b/Documentation/i2c/busses/i2c-i801 |
5 | @@ -26,6 +26,7 @@ Supported adapters: |
6 | * Intel Wellsburg (PCH) |
7 | * Intel Coleto Creek (PCH) |
8 | * Intel Wildcat Point-LP (PCH) |
9 | + * Intel BayTrail (SOC) |
10 | Datasheets: Publicly available at the Intel website |
11 | |
12 | On Intel Patsburg and later chipsets, both the normal host SMBus controller |
13 | diff --git a/Makefile b/Makefile |
14 | index fc0dcf63a8d9..cf5d97e60b39 100644 |
15 | --- a/Makefile |
16 | +++ b/Makefile |
17 | @@ -1,6 +1,6 @@ |
18 | VERSION = 3 |
19 | PATCHLEVEL = 12 |
20 | -SUBLEVEL = 18 |
21 | +SUBLEVEL = 19 |
22 | EXTRAVERSION = |
23 | NAME = One Giant Leap for Frogkind |
24 | |
25 | diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c |
26 | index 304661d21369..5e85ed371364 100644 |
27 | --- a/arch/arm/mm/mmap.c |
28 | +++ b/arch/arm/mm/mmap.c |
29 | @@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size) |
30 | } |
31 | |
32 | /* |
33 | - * We don't use supersection mappings for mmap() on /dev/mem, which |
34 | - * means that we can't map the memory area above the 4G barrier into |
35 | - * userspace. |
36 | + * Do not allow /dev/mem mappings beyond the supported physical range. |
37 | */ |
38 | int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) |
39 | { |
40 | - return !(pfn + (size >> PAGE_SHIFT) > 0x00100000); |
41 | + return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT)); |
42 | } |
43 | |
44 | #ifdef CONFIG_STRICT_DEVMEM |
45 | diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S |
46 | index 0c9107285e66..10a0c2aad8cf 100644 |
47 | --- a/arch/parisc/kernel/syscall_table.S |
48 | +++ b/arch/parisc/kernel/syscall_table.S |
49 | @@ -392,7 +392,7 @@ |
50 | ENTRY_COMP(vmsplice) |
51 | ENTRY_COMP(move_pages) /* 295 */ |
52 | ENTRY_SAME(getcpu) |
53 | - ENTRY_SAME(epoll_pwait) |
54 | + ENTRY_COMP(epoll_pwait) |
55 | ENTRY_COMP(statfs64) |
56 | ENTRY_COMP(fstatfs64) |
57 | ENTRY_COMP(kexec_load) /* 300 */ |
58 | diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S |
59 | index 5143228e3e5f..6636b1d7821b 100644 |
60 | --- a/arch/powerpc/boot/util.S |
61 | +++ b/arch/powerpc/boot/util.S |
62 | @@ -71,18 +71,32 @@ udelay: |
63 | add r4,r4,r5 |
64 | addi r4,r4,-1 |
65 | divw r4,r4,r5 /* BUS ticks */ |
66 | +#ifdef CONFIG_8xx |
67 | +1: mftbu r5 |
68 | + mftb r6 |
69 | + mftbu r7 |
70 | +#else |
71 | 1: mfspr r5, SPRN_TBRU |
72 | mfspr r6, SPRN_TBRL |
73 | mfspr r7, SPRN_TBRU |
74 | +#endif |
75 | cmpw 0,r5,r7 |
76 | bne 1b /* Get [synced] base time */ |
77 | addc r9,r6,r4 /* Compute end time */ |
78 | addze r8,r5 |
79 | +#ifdef CONFIG_8xx |
80 | +2: mftbu r5 |
81 | +#else |
82 | 2: mfspr r5, SPRN_TBRU |
83 | +#endif |
84 | cmpw 0,r5,r8 |
85 | blt 2b |
86 | bgt 3f |
87 | +#ifdef CONFIG_8xx |
88 | + mftb r6 |
89 | +#else |
90 | mfspr r6, SPRN_TBRL |
91 | +#endif |
92 | cmpw 0,r6,r9 |
93 | blt 2b |
94 | 3: blr |
95 | diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h |
96 | index c2dcfaa51987..0d2d0f03163b 100644 |
97 | --- a/arch/powerpc/include/asm/ppc_asm.h |
98 | +++ b/arch/powerpc/include/asm/ppc_asm.h |
99 | @@ -438,6 +438,8 @@ BEGIN_FTR_SECTION_NESTED(96); \ |
100 | cmpwi dest,0; \ |
101 | beq- 90b; \ |
102 | END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) |
103 | +#elif defined(CONFIG_8xx) |
104 | +#define MFTB(dest) mftb dest |
105 | #else |
106 | #define MFTB(dest) mfspr dest, SPRN_TBRL |
107 | #endif |
108 | diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h |
109 | index 10d1ef016bf1..7ca729cac073 100644 |
110 | --- a/arch/powerpc/include/asm/reg.h |
111 | +++ b/arch/powerpc/include/asm/reg.h |
112 | @@ -1154,12 +1154,19 @@ |
113 | |
114 | #else /* __powerpc64__ */ |
115 | |
116 | +#if defined(CONFIG_8xx) |
117 | +#define mftbl() ({unsigned long rval; \ |
118 | + asm volatile("mftbl %0" : "=r" (rval)); rval;}) |
119 | +#define mftbu() ({unsigned long rval; \ |
120 | + asm volatile("mftbu %0" : "=r" (rval)); rval;}) |
121 | +#else |
122 | #define mftbl() ({unsigned long rval; \ |
123 | asm volatile("mfspr %0, %1" : "=r" (rval) : \ |
124 | "i" (SPRN_TBRL)); rval;}) |
125 | #define mftbu() ({unsigned long rval; \ |
126 | asm volatile("mfspr %0, %1" : "=r" (rval) : \ |
127 | "i" (SPRN_TBRU)); rval;}) |
128 | +#endif |
129 | #endif /* !__powerpc64__ */ |
130 | |
131 | #define mttbl(v) asm volatile("mttbl %0":: "r"(v)) |
132 | diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h |
133 | index 18908caa1f3b..2cf846edb3fc 100644 |
134 | --- a/arch/powerpc/include/asm/timex.h |
135 | +++ b/arch/powerpc/include/asm/timex.h |
136 | @@ -29,7 +29,11 @@ static inline cycles_t get_cycles(void) |
137 | ret = 0; |
138 | |
139 | __asm__ __volatile__( |
140 | +#ifdef CONFIG_8xx |
141 | + "97: mftb %0\n" |
142 | +#else |
143 | "97: mfspr %0, %2\n" |
144 | +#endif |
145 | "99:\n" |
146 | ".section __ftr_fixup,\"a\"\n" |
147 | ".align 2\n" |
148 | @@ -41,7 +45,11 @@ static inline cycles_t get_cycles(void) |
149 | " .long 0\n" |
150 | " .long 0\n" |
151 | ".previous" |
152 | +#ifdef CONFIG_8xx |
153 | + : "=r" (ret) : "i" (CPU_FTR_601)); |
154 | +#else |
155 | : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL)); |
156 | +#endif |
157 | return ret; |
158 | #endif |
159 | } |
160 | diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S |
161 | index 6b1f2a6d5517..6b2b69616e77 100644 |
162 | --- a/arch/powerpc/kernel/vdso32/gettimeofday.S |
163 | +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S |
164 | @@ -232,9 +232,15 @@ __do_get_tspec: |
165 | lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) |
166 | |
167 | /* Get a stable TB value */ |
168 | +#ifdef CONFIG_8xx |
169 | +2: mftbu r3 |
170 | + mftbl r4 |
171 | + mftbu r0 |
172 | +#else |
173 | 2: mfspr r3, SPRN_TBRU |
174 | mfspr r4, SPRN_TBRL |
175 | mfspr r0, SPRN_TBRU |
176 | +#endif |
177 | cmplw cr0,r3,r0 |
178 | bne- 2b |
179 | |
180 | diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig |
181 | index 78c4fdb91bc5..4e5683877b93 100644 |
182 | --- a/arch/sparc/Kconfig |
183 | +++ b/arch/sparc/Kconfig |
184 | @@ -25,7 +25,7 @@ config SPARC |
185 | select RTC_DRV_M48T59 |
186 | select HAVE_DMA_ATTRS |
187 | select HAVE_DMA_API_DEBUG |
188 | - select HAVE_ARCH_JUMP_LABEL |
189 | + select HAVE_ARCH_JUMP_LABEL if SPARC64 |
190 | select GENERIC_IRQ_SHOW |
191 | select ARCH_WANT_IPC_PARSE_VERSION |
192 | select USE_GENERIC_SMP_HELPERS if SMP |
193 | diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c |
194 | index baebab215492..b9cc9763faf4 100644 |
195 | --- a/arch/sparc/kernel/process_64.c |
196 | +++ b/arch/sparc/kernel/process_64.c |
197 | @@ -57,9 +57,12 @@ void arch_cpu_idle(void) |
198 | { |
199 | if (tlb_type != hypervisor) { |
200 | touch_nmi_watchdog(); |
201 | + local_irq_enable(); |
202 | } else { |
203 | unsigned long pstate; |
204 | |
205 | + local_irq_enable(); |
206 | + |
207 | /* The sun4v sleeping code requires that we have PSTATE.IE cleared over |
208 | * the cpu sleep hypervisor call. |
209 | */ |
210 | @@ -81,7 +84,6 @@ void arch_cpu_idle(void) |
211 | : "=&r" (pstate) |
212 | : "i" (PSTATE_IE)); |
213 | } |
214 | - local_irq_enable(); |
215 | } |
216 | |
217 | #ifdef CONFIG_HOTPLUG_CPU |
218 | diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S |
219 | index d950197a17e1..6dee79575791 100644 |
220 | --- a/arch/sparc/kernel/syscalls.S |
221 | +++ b/arch/sparc/kernel/syscalls.S |
222 | @@ -189,7 +189,8 @@ linux_sparc_syscall32: |
223 | mov %i0, %l5 ! IEU1 |
224 | 5: call %l7 ! CTI Group brk forced |
225 | srl %i5, 0, %o5 ! IEU1 |
226 | - ba,a,pt %xcc, 3f |
227 | + ba,pt %xcc, 3f |
228 | + sra %o0, 0, %o0 |
229 | |
230 | /* Linux native system calls enter here... */ |
231 | .align 32 |
232 | @@ -217,7 +218,6 @@ linux_sparc_syscall: |
233 | 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] |
234 | ret_sys_call: |
235 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 |
236 | - sra %o0, 0, %o0 |
237 | mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 |
238 | sllx %g2, 32, %g2 |
239 | |
240 | diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c |
241 | index b3cd3ebae077..7eb30af8c7a2 100644 |
242 | --- a/arch/x86/kernel/early-quirks.c |
243 | +++ b/arch/x86/kernel/early-quirks.c |
244 | @@ -203,18 +203,15 @@ static void __init intel_remapping_check(int num, int slot, int func) |
245 | revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); |
246 | |
247 | /* |
248 | - * Revision 13 of all triggering devices id in this quirk have |
249 | - * a problem draining interrupts when irq remapping is enabled, |
250 | - * and should be flagged as broken. Additionally revisions 0x12 |
251 | - * and 0x22 of device id 0x3405 has this problem. |
252 | + * Revision <= 13 of all triggering devices id in this quirk |
253 | + * have a problem draining interrupts when irq remapping is |
254 | + * enabled, and should be flagged as broken. Additionally |
255 | + * revision 0x22 of device id 0x3405 has this problem. |
256 | */ |
257 | - if (revision == 0x13) |
258 | + if (revision <= 0x13) |
259 | set_irq_remapping_broken(); |
260 | - else if ((device == 0x3405) && |
261 | - ((revision == 0x12) || |
262 | - (revision == 0x22))) |
263 | + else if (device == 0x3405 && revision == 0x22) |
264 | set_irq_remapping_broken(); |
265 | - |
266 | } |
267 | |
268 | /* |
269 | diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c |
270 | index 52dbf1e400dc..ff898bbf579d 100644 |
271 | --- a/arch/x86/kernel/quirks.c |
272 | +++ b/arch/x86/kernel/quirks.c |
273 | @@ -571,3 +571,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5, |
274 | quirk_amd_nb_node); |
275 | |
276 | #endif |
277 | + |
278 | +#ifdef CONFIG_PCI |
279 | +/* |
280 | + * Processor does not ensure DRAM scrub read/write sequence |
281 | + * is atomic wrt accesses to CC6 save state area. Therefore |
282 | + * if a concurrent scrub read/write access is to same address |
283 | + * the entry may appear as if it is not written. This quirk |
284 | + * applies to Fam16h models 00h-0Fh |
285 | + * |
286 | + * See "Revision Guide" for AMD F16h models 00h-0fh, |
287 | + * document 51810 rev. 3.04, Nov 2013 |
288 | + */ |
289 | +static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev) |
290 | +{ |
291 | + u32 val; |
292 | + |
293 | + /* |
294 | + * Suggested workaround: |
295 | + * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b |
296 | + */ |
297 | + pci_read_config_dword(dev, 0x58, &val); |
298 | + if (val & 0x1F) { |
299 | + val &= ~(0x1F); |
300 | + pci_write_config_dword(dev, 0x58, val); |
301 | + } |
302 | + |
303 | + pci_read_config_dword(dev, 0x5C, &val); |
304 | + if (val & BIT(0)) { |
305 | + val &= ~BIT(0); |
306 | + pci_write_config_dword(dev, 0x5c, val); |
307 | + } |
308 | +} |
309 | + |
310 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3, |
311 | + amd_disable_seq_and_redirect_scrub); |
312 | + |
313 | +#endif |
314 | diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c |
315 | index a55773801c5f..3aa89eb8dbbd 100644 |
316 | --- a/drivers/acpi/button.c |
317 | +++ b/drivers/acpi/button.c |
318 | @@ -302,6 +302,10 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) |
319 | input_sync(input); |
320 | |
321 | pm_wakeup_event(&device->dev, 0); |
322 | + acpi_bus_generate_netlink_event( |
323 | + device->pnp.device_class, |
324 | + dev_name(&device->dev), |
325 | + event, ++button->pushed); |
326 | } |
327 | break; |
328 | default: |
329 | diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c |
330 | index 3cc0b92e3544..51b700838f64 100644 |
331 | --- a/drivers/acpi/ec.c |
332 | +++ b/drivers/acpi/ec.c |
333 | @@ -213,13 +213,13 @@ unlock: |
334 | spin_unlock_irqrestore(&ec->lock, flags); |
335 | } |
336 | |
337 | -static int acpi_ec_sync_query(struct acpi_ec *ec); |
338 | +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); |
339 | |
340 | static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) |
341 | { |
342 | if (state & ACPI_EC_FLAG_SCI) { |
343 | if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) |
344 | - return acpi_ec_sync_query(ec); |
345 | + return acpi_ec_sync_query(ec, NULL); |
346 | } |
347 | return 0; |
348 | } |
349 | @@ -471,10 +471,8 @@ acpi_handle ec_get_handle(void) |
350 | |
351 | EXPORT_SYMBOL(ec_get_handle); |
352 | |
353 | -static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data); |
354 | - |
355 | /* |
356 | - * Clears stale _Q events that might have accumulated in the EC. |
357 | + * Process _Q events that might have accumulated in the EC. |
358 | * Run with locked ec mutex. |
359 | */ |
360 | static void acpi_ec_clear(struct acpi_ec *ec) |
361 | @@ -483,7 +481,7 @@ static void acpi_ec_clear(struct acpi_ec *ec) |
362 | u8 value = 0; |
363 | |
364 | for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { |
365 | - status = acpi_ec_query_unlocked(ec, &value); |
366 | + status = acpi_ec_sync_query(ec, &value); |
367 | if (status || !value) |
368 | break; |
369 | } |
370 | @@ -610,13 +608,18 @@ static void acpi_ec_run(void *cxt) |
371 | kfree(handler); |
372 | } |
373 | |
374 | -static int acpi_ec_sync_query(struct acpi_ec *ec) |
375 | +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data) |
376 | { |
377 | u8 value = 0; |
378 | int status; |
379 | struct acpi_ec_query_handler *handler, *copy; |
380 | - if ((status = acpi_ec_query_unlocked(ec, &value))) |
381 | + |
382 | + status = acpi_ec_query_unlocked(ec, &value); |
383 | + if (data) |
384 | + *data = value; |
385 | + if (status) |
386 | return status; |
387 | + |
388 | list_for_each_entry(handler, &ec->list, node) { |
389 | if (value == handler->query_bit) { |
390 | /* have custom handler for this bit */ |
391 | @@ -639,7 +642,7 @@ static void acpi_ec_gpe_query(void *ec_cxt) |
392 | if (!ec) |
393 | return; |
394 | mutex_lock(&ec->mutex); |
395 | - acpi_ec_sync_query(ec); |
396 | + acpi_ec_sync_query(ec, NULL); |
397 | mutex_unlock(&ec->mutex); |
398 | } |
399 | |
400 | diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c |
401 | index 99e5158456d8..c09e6f646fe4 100644 |
402 | --- a/drivers/acpi/sleep.c |
403 | +++ b/drivers/acpi/sleep.c |
404 | @@ -675,11 +675,8 @@ static void acpi_hibernation_leave(void) |
405 | /* Reprogram control registers */ |
406 | acpi_leave_sleep_state_prep(ACPI_STATE_S4); |
407 | /* Check the hardware signature */ |
408 | - if (facs && s4_hardware_signature != facs->hardware_signature) { |
409 | - printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " |
410 | - "cannot resume!\n"); |
411 | - panic("ACPI S4 hardware signature mismatch"); |
412 | - } |
413 | + if (facs && s4_hardware_signature != facs->hardware_signature) |
414 | + pr_crit("ACPI: Hardware changed while hibernated, success doubtful!\n"); |
415 | /* Restore the NVS memory area */ |
416 | suspend_nvs_restore(); |
417 | /* Allow EC transactions to happen. */ |
418 | diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c |
419 | index d593c99121c3..6e30356d3e42 100644 |
420 | --- a/drivers/bluetooth/btusb.c |
421 | +++ b/drivers/bluetooth/btusb.c |
422 | @@ -223,6 +223,7 @@ static struct usb_device_id blacklist_table[] = { |
423 | |
424 | /* Intel Bluetooth device */ |
425 | { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, |
426 | + { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, |
427 | |
428 | { } /* Terminating entry */ |
429 | }; |
430 | diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c |
431 | index 15e4a6031934..e5bdd1a2f541 100644 |
432 | --- a/drivers/char/ipmi/ipmi_si_intf.c |
433 | +++ b/drivers/char/ipmi/ipmi_si_intf.c |
434 | @@ -249,6 +249,9 @@ struct smi_info { |
435 | /* The timer for this si. */ |
436 | struct timer_list si_timer; |
437 | |
438 | + /* This flag is set, if the timer is running (timer_pending() isn't enough) */ |
439 | + bool timer_running; |
440 | + |
441 | /* The time (in jiffies) the last timeout occurred at. */ |
442 | unsigned long last_timeout_jiffies; |
443 | |
444 | @@ -435,6 +438,13 @@ static void start_clear_flags(struct smi_info *smi_info) |
445 | smi_info->si_state = SI_CLEARING_FLAGS; |
446 | } |
447 | |
448 | +static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) |
449 | +{ |
450 | + smi_info->last_timeout_jiffies = jiffies; |
451 | + mod_timer(&smi_info->si_timer, new_val); |
452 | + smi_info->timer_running = true; |
453 | +} |
454 | + |
455 | /* |
456 | * When we have a situtaion where we run out of memory and cannot |
457 | * allocate messages, we just leave them in the BMC and run the system |
458 | @@ -447,8 +457,7 @@ static inline void disable_si_irq(struct smi_info *smi_info) |
459 | start_disable_irq(smi_info); |
460 | smi_info->interrupt_disabled = 1; |
461 | if (!atomic_read(&smi_info->stop_operation)) |
462 | - mod_timer(&smi_info->si_timer, |
463 | - jiffies + SI_TIMEOUT_JIFFIES); |
464 | + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); |
465 | } |
466 | } |
467 | |
468 | @@ -908,15 +917,7 @@ static void sender(void *send_info, |
469 | list_add_tail(&msg->link, &smi_info->xmit_msgs); |
470 | |
471 | if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { |
472 | - /* |
473 | - * last_timeout_jiffies is updated here to avoid |
474 | - * smi_timeout() handler passing very large time_diff |
475 | - * value to smi_event_handler() that causes |
476 | - * the send command to abort. |
477 | - */ |
478 | - smi_info->last_timeout_jiffies = jiffies; |
479 | - |
480 | - mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); |
481 | + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); |
482 | |
483 | if (smi_info->thread) |
484 | wake_up_process(smi_info->thread); |
485 | @@ -1005,6 +1006,17 @@ static int ipmi_thread(void *data) |
486 | |
487 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
488 | smi_result = smi_event_handler(smi_info, 0); |
489 | + |
490 | + /* |
491 | + * If the driver is doing something, there is a possible |
492 | + * race with the timer. If the timer handler see idle, |
493 | + * and the thread here sees something else, the timer |
494 | + * handler won't restart the timer even though it is |
495 | + * required. So start it here if necessary. |
496 | + */ |
497 | + if (smi_result != SI_SM_IDLE && !smi_info->timer_running) |
498 | + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); |
499 | + |
500 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); |
501 | busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, |
502 | &busy_until); |
503 | @@ -1074,10 +1086,6 @@ static void smi_timeout(unsigned long data) |
504 | * SI_USEC_PER_JIFFY); |
505 | smi_result = smi_event_handler(smi_info, time_diff); |
506 | |
507 | - spin_unlock_irqrestore(&(smi_info->si_lock), flags); |
508 | - |
509 | - smi_info->last_timeout_jiffies = jiffies_now; |
510 | - |
511 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
512 | /* Running with interrupts, only do long timeouts. */ |
513 | timeout = jiffies + SI_TIMEOUT_JIFFIES; |
514 | @@ -1099,7 +1107,10 @@ static void smi_timeout(unsigned long data) |
515 | |
516 | do_mod_timer: |
517 | if (smi_result != SI_SM_IDLE) |
518 | - mod_timer(&(smi_info->si_timer), timeout); |
519 | + smi_mod_timer(smi_info, timeout); |
520 | + else |
521 | + smi_info->timer_running = false; |
522 | + spin_unlock_irqrestore(&(smi_info->si_lock), flags); |
523 | } |
524 | |
525 | static irqreturn_t si_irq_handler(int irq, void *data) |
526 | @@ -1147,8 +1158,7 @@ static int smi_start_processing(void *send_info, |
527 | |
528 | /* Set up the timer that drives the interface. */ |
529 | setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); |
530 | - new_smi->last_timeout_jiffies = jiffies; |
531 | - mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES); |
532 | + smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); |
533 | |
534 | /* |
535 | * Check if the user forcefully enabled the daemon. |
536 | diff --git a/drivers/char/mem.c b/drivers/char/mem.c |
537 | index f895a8c8a244..d1f4675809f8 100644 |
538 | --- a/drivers/char/mem.c |
539 | +++ b/drivers/char/mem.c |
540 | @@ -100,6 +100,9 @@ static ssize_t read_mem(struct file *file, char __user *buf, |
541 | ssize_t read, sz; |
542 | char *ptr; |
543 | |
544 | + if (p != *ppos) |
545 | + return 0; |
546 | + |
547 | if (!valid_phys_addr_range(p, count)) |
548 | return -EFAULT; |
549 | read = 0; |
550 | @@ -158,6 +161,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf, |
551 | unsigned long copied; |
552 | void *ptr; |
553 | |
554 | + if (p != *ppos) |
555 | + return -EFBIG; |
556 | + |
557 | if (!valid_phys_addr_range(p, count)) |
558 | return -EFAULT; |
559 | |
560 | diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c |
561 | index 3c9e4e98c651..d43a6202a5c5 100644 |
562 | --- a/drivers/edac/amd64_edac.c |
563 | +++ b/drivers/edac/amd64_edac.c |
564 | @@ -1238,9 +1238,17 @@ static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, |
565 | if (num_dcts_intlv == 2) { |
566 | select = (sys_addr >> 8) & 0x3; |
567 | channel = select ? 0x3 : 0; |
568 | - } else if (num_dcts_intlv == 4) |
569 | - channel = (sys_addr >> 8) & 0x7; |
570 | - |
571 | + } else if (num_dcts_intlv == 4) { |
572 | + u8 intlv_addr = dct_sel_interleave_addr(pvt); |
573 | + switch (intlv_addr) { |
574 | + case 0x4: |
575 | + channel = (sys_addr >> 8) & 0x3; |
576 | + break; |
577 | + case 0x5: |
578 | + channel = (sys_addr >> 9) & 0x3; |
579 | + break; |
580 | + } |
581 | + } |
582 | return channel; |
583 | } |
584 | |
585 | diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig |
586 | index 4e901081e287..01892bdfa7b7 100644 |
587 | --- a/drivers/i2c/busses/Kconfig |
588 | +++ b/drivers/i2c/busses/Kconfig |
589 | @@ -110,6 +110,7 @@ config I2C_I801 |
590 | Wellsburg (PCH) |
591 | Coleto Creek (PCH) |
592 | Wildcat Point-LP (PCH) |
593 | + BayTrail (SOC) |
594 | |
595 | This driver can also be built as a module. If so, the module |
596 | will be called i2c-i801. |
597 | diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c |
598 | index 737e29866887..0444f7aa1046 100644 |
599 | --- a/drivers/i2c/busses/i2c-i801.c |
600 | +++ b/drivers/i2c/busses/i2c-i801.c |
601 | @@ -60,6 +60,7 @@ |
602 | Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes |
603 | Coleto Creek (PCH) 0x23b0 32 hard yes yes yes |
604 | Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes |
605 | + BayTrail (SOC) 0x0f12 32 hard yes yes yes |
606 | |
607 | Features supported by this driver: |
608 | Software PEC no |
609 | @@ -161,6 +162,7 @@ |
610 | STATUS_ERROR_FLAGS) |
611 | |
612 | /* Older devices have their ID defined in <linux/pci_ids.h> */ |
613 | +#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 |
614 | #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 |
615 | #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 |
616 | /* Patsburg also has three 'Integrated Device Function' SMBus controllers */ |
617 | @@ -822,6 +824,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = { |
618 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) }, |
619 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) }, |
620 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) }, |
621 | + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) }, |
622 | { 0, } |
623 | }; |
624 | |
625 | diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c |
626 | index 72531f008a5e..5d2edb4b60aa 100644 |
627 | --- a/drivers/iommu/amd_iommu.c |
628 | +++ b/drivers/iommu/amd_iommu.c |
629 | @@ -982,10 +982,10 @@ static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid, |
630 | address &= ~(0xfffULL); |
631 | |
632 | cmd->data[0] = devid; |
633 | - cmd->data[0] |= (pasid & 0xff) << 16; |
634 | + cmd->data[0] |= ((pasid >> 8) & 0xff) << 16; |
635 | cmd->data[0] |= (qdep & 0xff) << 24; |
636 | cmd->data[1] = devid; |
637 | - cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16; |
638 | + cmd->data[1] |= (pasid & 0xff) << 16; |
639 | cmd->data[2] = lower_32_bits(address); |
640 | cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; |
641 | cmd->data[3] = upper_32_bits(address); |
642 | diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c |
643 | index 4e8cfa2ac803..779016068a82 100644 |
644 | --- a/drivers/net/ethernet/dec/tulip/tulip_core.c |
645 | +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c |
646 | @@ -1940,6 +1940,7 @@ static void tulip_remove_one(struct pci_dev *pdev) |
647 | free_netdev (dev); |
648 | pci_release_regions (pdev); |
649 | pci_set_drvdata (pdev, NULL); |
650 | + pci_disable_device(pdev); |
651 | |
652 | /* pci_power_off (pdev, -1); */ |
653 | } |
654 | diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c |
655 | index 4ef786775acb..9cb400c4cbaa 100644 |
656 | --- a/drivers/net/ethernet/intel/e1000e/netdev.c |
657 | +++ b/drivers/net/ethernet/intel/e1000e/netdev.c |
658 | @@ -2976,11 +2976,21 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) |
659 | u32 rctl, rfctl; |
660 | u32 pages = 0; |
661 | |
662 | - /* Workaround Si errata on PCHx - configure jumbo frame flow */ |
663 | - if ((hw->mac.type >= e1000_pch2lan) && |
664 | - (adapter->netdev->mtu > ETH_DATA_LEN) && |
665 | - e1000_lv_jumbo_workaround_ich8lan(hw, true)) |
666 | - e_dbg("failed to enable jumbo frame workaround mode\n"); |
667 | + /* Workaround Si errata on PCHx - configure jumbo frame flow. |
668 | + * If jumbo frames not set, program related MAC/PHY registers |
669 | + * to h/w defaults |
670 | + */ |
671 | + if (hw->mac.type >= e1000_pch2lan) { |
672 | + s32 ret_val; |
673 | + |
674 | + if (adapter->netdev->mtu > ETH_DATA_LEN) |
675 | + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); |
676 | + else |
677 | + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); |
678 | + |
679 | + if (ret_val) |
680 | + e_dbg("failed to enable|disable jumbo frame workaround mode\n"); |
681 | + } |
682 | |
683 | /* Program MC offset vector base */ |
684 | rctl = er32(RCTL); |
685 | diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c |
686 | index edc5d105ff98..03a56dfba2db 100644 |
687 | --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c |
688 | +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c |
689 | @@ -426,6 +426,12 @@ static int brcms_ops_start(struct ieee80211_hw *hw) |
690 | bool blocked; |
691 | int err; |
692 | |
693 | + if (!wl->ucode.bcm43xx_bomminor) { |
694 | + err = brcms_request_fw(wl, wl->wlc->hw->d11core); |
695 | + if (err) |
696 | + return -ENOENT; |
697 | + } |
698 | + |
699 | ieee80211_wake_queues(hw); |
700 | spin_lock_bh(&wl->lock); |
701 | blocked = brcms_rfkill_set_hw_state(wl); |
702 | @@ -433,14 +439,6 @@ static int brcms_ops_start(struct ieee80211_hw *hw) |
703 | if (!blocked) |
704 | wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); |
705 | |
706 | - if (!wl->ucode.bcm43xx_bomminor) { |
707 | - err = brcms_request_fw(wl, wl->wlc->hw->d11core); |
708 | - if (err) { |
709 | - brcms_remove(wl->wlc->hw->d11core); |
710 | - return -ENOENT; |
711 | - } |
712 | - } |
713 | - |
714 | spin_lock_bh(&wl->lock); |
715 | /* avoid acknowledging frames before a non-monitor device is added */ |
716 | wl->mute_tx = true; |
717 | diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c |
718 | index c10e9ac9bbbc..510994a7eca0 100644 |
719 | --- a/drivers/pci/host/pcie-designware.c |
720 | +++ b/drivers/pci/host/pcie-designware.c |
721 | @@ -268,13 +268,13 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) |
722 | dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, |
723 | PCIE_ATU_VIEWPORT); |
724 | dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); |
725 | - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); |
726 | dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE); |
727 | dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE); |
728 | dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1, |
729 | PCIE_ATU_LIMIT); |
730 | dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); |
731 | dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); |
732 | + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); |
733 | } |
734 | |
735 | static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) |
736 | @@ -283,7 +283,6 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) |
737 | dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, |
738 | PCIE_ATU_VIEWPORT); |
739 | dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); |
740 | - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); |
741 | dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE); |
742 | dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE); |
743 | dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1, |
744 | @@ -291,6 +290,7 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) |
745 | dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET); |
746 | dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), |
747 | PCIE_ATU_UPPER_TARGET); |
748 | + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); |
749 | } |
750 | |
751 | static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) |
752 | @@ -299,7 +299,6 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) |
753 | dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, |
754 | PCIE_ATU_VIEWPORT); |
755 | dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); |
756 | - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); |
757 | dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE); |
758 | dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE); |
759 | dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1, |
760 | @@ -307,6 +306,7 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) |
761 | dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET); |
762 | dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), |
763 | PCIE_ATU_UPPER_TARGET); |
764 | + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); |
765 | } |
766 | |
767 | static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, |
768 | @@ -532,7 +532,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) |
769 | |
770 | /* setup RC BARs */ |
771 | dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0); |
772 | - dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_1); |
773 | + dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1); |
774 | |
775 | /* setup interrupt pins */ |
776 | dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val); |
777 | diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c |
778 | index c55f234b29e6..26321f9f5caf 100644 |
779 | --- a/drivers/staging/comedi/drivers/8255_pci.c |
780 | +++ b/drivers/staging/comedi/drivers/8255_pci.c |
781 | @@ -56,6 +56,7 @@ Configuration Options: not applicable, uses PCI auto config |
782 | #include "../comedidev.h" |
783 | |
784 | #include "8255.h" |
785 | +#include "mite.h" |
786 | |
787 | enum pci_8255_boardid { |
788 | BOARD_ADLINK_PCI7224, |
789 | @@ -79,6 +80,7 @@ struct pci_8255_boardinfo { |
790 | const char *name; |
791 | int dio_badr; |
792 | int n_8255; |
793 | + unsigned int has_mite:1; |
794 | }; |
795 | |
796 | static const struct pci_8255_boardinfo pci_8255_boards[] = { |
797 | @@ -126,36 +128,43 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = { |
798 | .name = "ni_pci-dio-96", |
799 | .dio_badr = 1, |
800 | .n_8255 = 4, |
801 | + .has_mite = 1, |
802 | }, |
803 | [BOARD_NI_PCIDIO96B] = { |
804 | .name = "ni_pci-dio-96b", |
805 | .dio_badr = 1, |
806 | .n_8255 = 4, |
807 | + .has_mite = 1, |
808 | }, |
809 | [BOARD_NI_PXI6508] = { |
810 | .name = "ni_pxi-6508", |
811 | .dio_badr = 1, |
812 | .n_8255 = 4, |
813 | + .has_mite = 1, |
814 | }, |
815 | [BOARD_NI_PCI6503] = { |
816 | .name = "ni_pci-6503", |
817 | .dio_badr = 1, |
818 | .n_8255 = 1, |
819 | + .has_mite = 1, |
820 | }, |
821 | [BOARD_NI_PCI6503B] = { |
822 | .name = "ni_pci-6503b", |
823 | .dio_badr = 1, |
824 | .n_8255 = 1, |
825 | + .has_mite = 1, |
826 | }, |
827 | [BOARD_NI_PCI6503X] = { |
828 | .name = "ni_pci-6503x", |
829 | .dio_badr = 1, |
830 | .n_8255 = 1, |
831 | + .has_mite = 1, |
832 | }, |
833 | [BOARD_NI_PXI_6503] = { |
834 | .name = "ni_pxi-6503", |
835 | .dio_badr = 1, |
836 | .n_8255 = 1, |
837 | + .has_mite = 1, |
838 | }, |
839 | }; |
840 | |
841 | @@ -163,6 +172,25 @@ struct pci_8255_private { |
842 | void __iomem *mmio_base; |
843 | }; |
844 | |
845 | +static int pci_8255_mite_init(struct pci_dev *pcidev) |
846 | +{ |
847 | + void __iomem *mite_base; |
848 | + u32 main_phys_addr; |
849 | + |
850 | + /* ioremap the MITE registers (BAR 0) temporarily */ |
851 | + mite_base = pci_ioremap_bar(pcidev, 0); |
852 | + if (!mite_base) |
853 | + return -ENOMEM; |
854 | + |
855 | + /* set data window to main registers (BAR 1) */ |
856 | + main_phys_addr = pci_resource_start(pcidev, 1); |
857 | + writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR); |
858 | + |
859 | + /* finished with MITE registers */ |
860 | + iounmap(mite_base); |
861 | + return 0; |
862 | +} |
863 | + |
864 | static int pci_8255_mmio(int dir, int port, int data, unsigned long iobase) |
865 | { |
866 | void __iomem *mmio_base = (void __iomem *)iobase; |
867 | @@ -201,6 +229,12 @@ static int pci_8255_auto_attach(struct comedi_device *dev, |
868 | if (ret) |
869 | return ret; |
870 | |
871 | + if (board->has_mite) { |
872 | + ret = pci_8255_mite_init(pcidev); |
873 | + if (ret) |
874 | + return ret; |
875 | + } |
876 | + |
877 | is_mmio = (pci_resource_flags(pcidev, board->dio_badr) & |
878 | IORESOURCE_MEM) != 0; |
879 | if (is_mmio) { |
880 | diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c |
881 | index 8fd72ff9436e..d917a34e94bb 100644 |
882 | --- a/drivers/tty/ipwireless/tty.c |
883 | +++ b/drivers/tty/ipwireless/tty.c |
884 | @@ -177,9 +177,6 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data, |
885 | ": %d chars not inserted to flip buffer!\n", |
886 | length - work); |
887 | |
888 | - /* |
889 | - * This may sleep if ->low_latency is set |
890 | - */ |
891 | if (work) |
892 | tty_flip_buffer_push(&tty->port); |
893 | } |
894 | diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c |
895 | index c043136fbe51..2b52d807934e 100644 |
896 | --- a/drivers/tty/tty_buffer.c |
897 | +++ b/drivers/tty/tty_buffer.c |
898 | @@ -332,14 +332,11 @@ EXPORT_SYMBOL(tty_insert_flip_string_flags); |
899 | * Takes any pending buffers and transfers their ownership to the |
900 | * ldisc side of the queue. It then schedules those characters for |
901 | * processing by the line discipline. |
902 | - * Note that this function can only be used when the low_latency flag |
903 | - * is unset. Otherwise the workqueue won't be flushed. |
904 | */ |
905 | |
906 | void tty_schedule_flip(struct tty_port *port) |
907 | { |
908 | struct tty_bufhead *buf = &port->buf; |
909 | - WARN_ON(port->low_latency); |
910 | |
911 | buf->tail->commit = buf->tail->used; |
912 | schedule_work(&buf->work); |
913 | @@ -487,17 +484,15 @@ static void flush_to_ldisc(struct work_struct *work) |
914 | */ |
915 | void tty_flush_to_ldisc(struct tty_struct *tty) |
916 | { |
917 | - if (!tty->port->low_latency) |
918 | - flush_work(&tty->port->buf.work); |
919 | + flush_work(&tty->port->buf.work); |
920 | } |
921 | |
922 | /** |
923 | * tty_flip_buffer_push - terminal |
924 | * @port: tty port to push |
925 | * |
926 | - * Queue a push of the terminal flip buffers to the line discipline. This |
927 | - * function must not be called from IRQ context if port->low_latency is |
928 | - * set. |
929 | + * Queue a push of the terminal flip buffers to the line discipline. |
930 | + * Can be called from IRQ/atomic context. |
931 | * |
932 | * In the event of the queue being busy for flipping the work will be |
933 | * held off and retried later. |
934 | @@ -505,14 +500,7 @@ void tty_flush_to_ldisc(struct tty_struct *tty) |
935 | |
936 | void tty_flip_buffer_push(struct tty_port *port) |
937 | { |
938 | - struct tty_bufhead *buf = &port->buf; |
939 | - |
940 | - buf->tail->commit = buf->tail->used; |
941 | - |
942 | - if (port->low_latency) |
943 | - flush_to_ldisc(&buf->work); |
944 | - else |
945 | - schedule_work(&buf->work); |
946 | + tty_schedule_flip(port); |
947 | } |
948 | EXPORT_SYMBOL(tty_flip_buffer_push); |
949 | |
950 | diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c |
951 | index c74a00ad7add..d3448a90f0f9 100644 |
952 | --- a/drivers/tty/tty_io.c |
953 | +++ b/drivers/tty/tty_io.c |
954 | @@ -1271,12 +1271,13 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p) |
955 | * |
956 | * Locking: None |
957 | */ |
958 | -static void tty_line_name(struct tty_driver *driver, int index, char *p) |
959 | +static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p) |
960 | { |
961 | if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE) |
962 | - strcpy(p, driver->name); |
963 | + return sprintf(p, "%s", driver->name); |
964 | else |
965 | - sprintf(p, "%s%d", driver->name, index + driver->name_base); |
966 | + return sprintf(p, "%s%d", driver->name, |
967 | + index + driver->name_base); |
968 | } |
969 | |
970 | /** |
971 | @@ -3545,9 +3546,19 @@ static ssize_t show_cons_active(struct device *dev, |
972 | if (i >= ARRAY_SIZE(cs)) |
973 | break; |
974 | } |
975 | - while (i--) |
976 | - count += sprintf(buf + count, "%s%d%c", |
977 | - cs[i]->name, cs[i]->index, i ? ' ':'\n'); |
978 | + while (i--) { |
979 | + int index = cs[i]->index; |
980 | + struct tty_driver *drv = cs[i]->device(cs[i], &index); |
981 | + |
982 | + /* don't resolve tty0 as some programs depend on it */ |
983 | + if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR)) |
984 | + count += tty_line_name(drv, index, buf + count); |
985 | + else |
986 | + count += sprintf(buf + count, "%s%d", |
987 | + cs[i]->name, cs[i]->index); |
988 | + |
989 | + count += sprintf(buf + count, "%c", i ? ' ':'\n'); |
990 | + } |
991 | console_unlock(); |
992 | |
993 | return count; |
994 | diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h |
995 | index 5651231a7437..f3eecd967a8a 100644 |
996 | --- a/drivers/usb/atm/usbatm.h |
997 | +++ b/drivers/usb/atm/usbatm.h |
998 | @@ -34,6 +34,7 @@ |
999 | #include <linux/stringify.h> |
1000 | #include <linux/usb.h> |
1001 | #include <linux/mutex.h> |
1002 | +#include <linux/ratelimit.h> |
1003 | |
1004 | /* |
1005 | #define VERBOSE_DEBUG |
1006 | @@ -59,13 +60,12 @@ |
1007 | atm_printk(KERN_INFO, instance , format , ## arg) |
1008 | #define atm_warn(instance, format, arg...) \ |
1009 | atm_printk(KERN_WARNING, instance , format , ## arg) |
1010 | -#define atm_dbg(instance, format, arg...) \ |
1011 | - dynamic_pr_debug("ATM dev %d: " format , \ |
1012 | - (instance)->atm_dev->number , ## arg) |
1013 | -#define atm_rldbg(instance, format, arg...) \ |
1014 | - if (printk_ratelimit()) \ |
1015 | - atm_dbg(instance , format , ## arg) |
1016 | - |
1017 | +#define atm_dbg(instance, format, ...) \ |
1018 | + pr_debug("ATM dev %d: " format, \ |
1019 | + (instance)->atm_dev->number, ##__VA_ARGS__) |
1020 | +#define atm_rldbg(instance, format, ...) \ |
1021 | + pr_debug_ratelimited("ATM dev %d: " format, \ |
1022 | + (instance)->atm_dev->number, ##__VA_ARGS__) |
1023 | |
1024 | /* flags, set by mini-driver in bind() */ |
1025 | |
1026 | diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c |
1027 | index b369292d4b90..ad0aca812002 100644 |
1028 | --- a/drivers/usb/gadget/u_serial.c |
1029 | +++ b/drivers/usb/gadget/u_serial.c |
1030 | @@ -549,8 +549,8 @@ static void gs_rx_push(unsigned long _port) |
1031 | port->read_started--; |
1032 | } |
1033 | |
1034 | - /* Push from tty to ldisc; without low_latency set this is handled by |
1035 | - * a workqueue, so we won't get callbacks and can hold port_lock |
1036 | + /* Push from tty to ldisc; this is handled by a workqueue, |
1037 | + * so we won't get callbacks and can hold port_lock |
1038 | */ |
1039 | if (do_push) |
1040 | tty_flip_buffer_push(&port->port); |
1041 | diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c |
1042 | index 1f572c00a1be..cfda0a6c07a7 100644 |
1043 | --- a/drivers/virtio/virtio_balloon.c |
1044 | +++ b/drivers/virtio/virtio_balloon.c |
1045 | @@ -312,6 +312,12 @@ static int balloon(void *_vballoon) |
1046 | else if (diff < 0) |
1047 | leak_balloon(vb, -diff); |
1048 | update_balloon_size(vb); |
1049 | + |
1050 | + /* |
1051 | + * For large balloon changes, we could spend a lot of time |
1052 | + * and always have work to do. Be nice if preempt disabled. |
1053 | + */ |
1054 | + cond_resched(); |
1055 | } |
1056 | return 0; |
1057 | } |
1058 | diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c |
1059 | index 62176ad89846..84d590a9e4ad 100644 |
1060 | --- a/fs/btrfs/disk-io.c |
1061 | +++ b/fs/btrfs/disk-io.c |
1062 | @@ -3246,6 +3246,8 @@ static int barrier_all_devices(struct btrfs_fs_info *info) |
1063 | /* send down all the barriers */ |
1064 | head = &info->fs_devices->devices; |
1065 | list_for_each_entry_rcu(dev, head, dev_list) { |
1066 | + if (dev->missing) |
1067 | + continue; |
1068 | if (!dev->bdev) { |
1069 | errors_send++; |
1070 | continue; |
1071 | @@ -3260,6 +3262,8 @@ static int barrier_all_devices(struct btrfs_fs_info *info) |
1072 | |
1073 | /* wait for all the barriers */ |
1074 | list_for_each_entry_rcu(dev, head, dev_list) { |
1075 | + if (dev->missing) |
1076 | + continue; |
1077 | if (!dev->bdev) { |
1078 | errors_wait++; |
1079 | continue; |
1080 | diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c |
1081 | index b791cfb9a050..25d64e8e8e47 100644 |
1082 | --- a/fs/btrfs/transaction.c |
1083 | +++ b/fs/btrfs/transaction.c |
1084 | @@ -663,7 +663,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, |
1085 | int lock = (trans->type != TRANS_JOIN_NOLOCK); |
1086 | int err = 0; |
1087 | |
1088 | - if (--trans->use_count) { |
1089 | + if (trans->use_count > 1) { |
1090 | + trans->use_count--; |
1091 | trans->block_rsv = trans->orig_rsv; |
1092 | return 0; |
1093 | } |
1094 | @@ -711,17 +712,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, |
1095 | } |
1096 | |
1097 | if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { |
1098 | - if (throttle) { |
1099 | - /* |
1100 | - * We may race with somebody else here so end up having |
1101 | - * to call end_transaction on ourselves again, so inc |
1102 | - * our use_count. |
1103 | - */ |
1104 | - trans->use_count++; |
1105 | + if (throttle) |
1106 | return btrfs_commit_transaction(trans, root); |
1107 | - } else { |
1108 | + else |
1109 | wake_up_process(info->transaction_kthread); |
1110 | - } |
1111 | } |
1112 | |
1113 | if (trans->type & __TRANS_FREEZABLE) |
1114 | diff --git a/fs/dcache.c b/fs/dcache.c |
1115 | index 30b38e23caa7..8ef74f3d8fe5 100644 |
1116 | --- a/fs/dcache.c |
1117 | +++ b/fs/dcache.c |
1118 | @@ -2893,6 +2893,7 @@ static int prepend_path(const struct path *path, |
1119 | restart: |
1120 | bptr = *buffer; |
1121 | blen = *buflen; |
1122 | + error = 0; |
1123 | dentry = path->dentry; |
1124 | vfsmnt = path->mnt; |
1125 | mnt = real_mount(vfsmnt); |
1126 | @@ -3121,19 +3122,22 @@ char *simple_dname(struct dentry *dentry, char *buffer, int buflen) |
1127 | /* |
1128 | * Write full pathname from the root of the filesystem into the buffer. |
1129 | */ |
1130 | -static char *__dentry_path(struct dentry *dentry, char *buf, int buflen) |
1131 | +static char *__dentry_path(struct dentry *d, char *buf, int buflen) |
1132 | { |
1133 | + struct dentry *dentry; |
1134 | char *end, *retval; |
1135 | int len, seq = 0; |
1136 | int error = 0; |
1137 | |
1138 | + if (buflen < 2) |
1139 | + goto Elong; |
1140 | + |
1141 | rcu_read_lock(); |
1142 | restart: |
1143 | + dentry = d; |
1144 | end = buf + buflen; |
1145 | len = buflen; |
1146 | prepend(&end, &len, "\0", 1); |
1147 | - if (buflen < 1) |
1148 | - goto Elong; |
1149 | /* Get '/' right */ |
1150 | retval = end-1; |
1151 | *retval = '/'; |
1152 | diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c |
1153 | index e678549ec994..8dd96591b2f8 100644 |
1154 | --- a/fs/ext4/extents.c |
1155 | +++ b/fs/ext4/extents.c |
1156 | @@ -2616,6 +2616,27 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, |
1157 | ex_ee_block = le32_to_cpu(ex->ee_block); |
1158 | ex_ee_len = ext4_ext_get_actual_len(ex); |
1159 | |
1160 | + /* |
1161 | + * If we're starting with an extent other than the last one in the |
1162 | + * node, we need to see if it shares a cluster with the extent to |
1163 | + * the right (towards the end of the file). If its leftmost cluster |
1164 | + * is this extent's rightmost cluster and it is not cluster aligned, |
1165 | + * we'll mark it as a partial that is not to be deallocated. |
1166 | + */ |
1167 | + |
1168 | + if (ex != EXT_LAST_EXTENT(eh)) { |
1169 | + ext4_fsblk_t current_pblk, right_pblk; |
1170 | + long long current_cluster, right_cluster; |
1171 | + |
1172 | + current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; |
1173 | + current_cluster = (long long)EXT4_B2C(sbi, current_pblk); |
1174 | + right_pblk = ext4_ext_pblock(ex + 1); |
1175 | + right_cluster = (long long)EXT4_B2C(sbi, right_pblk); |
1176 | + if (current_cluster == right_cluster && |
1177 | + EXT4_PBLK_COFF(sbi, right_pblk)) |
1178 | + *partial_cluster = -right_cluster; |
1179 | + } |
1180 | + |
1181 | trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); |
1182 | |
1183 | while (ex >= EXT_FIRST_EXTENT(eh) && |
1184 | @@ -2741,10 +2762,15 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, |
1185 | err = ext4_ext_correct_indexes(handle, inode, path); |
1186 | |
1187 | /* |
1188 | - * Free the partial cluster only if the current extent does not |
1189 | - * reference it. Otherwise we might free used cluster. |
1190 | + * If there's a partial cluster and at least one extent remains in |
1191 | + * the leaf, free the partial cluster if it isn't shared with the |
1192 | + * current extent. If there's a partial cluster and no extents |
1193 | + * remain in the leaf, it can't be freed here. It can only be |
1194 | + * freed when it's possible to determine if it's not shared with |
1195 | + * any other extent - when the next leaf is processed or when space |
1196 | + * removal is complete. |
1197 | */ |
1198 | - if (*partial_cluster > 0 && |
1199 | + if (*partial_cluster > 0 && eh->eh_entries && |
1200 | (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != |
1201 | *partial_cluster)) { |
1202 | int flags = get_default_free_blocks_flags(inode); |
1203 | @@ -4159,7 +4185,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, |
1204 | struct ext4_extent newex, *ex, *ex2; |
1205 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
1206 | ext4_fsblk_t newblock = 0; |
1207 | - int free_on_err = 0, err = 0, depth; |
1208 | + int free_on_err = 0, err = 0, depth, ret; |
1209 | unsigned int allocated = 0, offset = 0; |
1210 | unsigned int allocated_clusters = 0; |
1211 | struct ext4_allocation_request ar; |
1212 | @@ -4220,9 +4246,13 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, |
1213 | if (!ext4_ext_is_uninitialized(ex)) |
1214 | goto out; |
1215 | |
1216 | - allocated = ext4_ext_handle_uninitialized_extents( |
1217 | + ret = ext4_ext_handle_uninitialized_extents( |
1218 | handle, inode, map, path, flags, |
1219 | allocated, newblock); |
1220 | + if (ret < 0) |
1221 | + err = ret; |
1222 | + else |
1223 | + allocated = ret; |
1224 | goto out3; |
1225 | } |
1226 | } |
1227 | diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c |
1228 | index 3595180b62ac..5bbec31440a4 100644 |
1229 | --- a/fs/fs-writeback.c |
1230 | +++ b/fs/fs-writeback.c |
1231 | @@ -88,16 +88,29 @@ static inline struct inode *wb_inode(struct list_head *head) |
1232 | #define CREATE_TRACE_POINTS |
1233 | #include <trace/events/writeback.h> |
1234 | |
1235 | +static void bdi_wakeup_thread(struct backing_dev_info *bdi) |
1236 | +{ |
1237 | + spin_lock_bh(&bdi->wb_lock); |
1238 | + if (test_bit(BDI_registered, &bdi->state)) |
1239 | + mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); |
1240 | + spin_unlock_bh(&bdi->wb_lock); |
1241 | +} |
1242 | + |
1243 | static void bdi_queue_work(struct backing_dev_info *bdi, |
1244 | struct wb_writeback_work *work) |
1245 | { |
1246 | trace_writeback_queue(bdi, work); |
1247 | |
1248 | spin_lock_bh(&bdi->wb_lock); |
1249 | + if (!test_bit(BDI_registered, &bdi->state)) { |
1250 | + if (work->done) |
1251 | + complete(work->done); |
1252 | + goto out_unlock; |
1253 | + } |
1254 | list_add_tail(&work->list, &bdi->work_list); |
1255 | - spin_unlock_bh(&bdi->wb_lock); |
1256 | - |
1257 | mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); |
1258 | +out_unlock: |
1259 | + spin_unlock_bh(&bdi->wb_lock); |
1260 | } |
1261 | |
1262 | static void |
1263 | @@ -113,7 +126,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, |
1264 | work = kzalloc(sizeof(*work), GFP_ATOMIC); |
1265 | if (!work) { |
1266 | trace_writeback_nowork(bdi); |
1267 | - mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); |
1268 | + bdi_wakeup_thread(bdi); |
1269 | return; |
1270 | } |
1271 | |
1272 | @@ -160,7 +173,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi) |
1273 | * writeback as soon as there is no other work to do. |
1274 | */ |
1275 | trace_writeback_wake_background(bdi); |
1276 | - mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); |
1277 | + bdi_wakeup_thread(bdi); |
1278 | } |
1279 | |
1280 | /* |
1281 | @@ -1016,7 +1029,7 @@ void bdi_writeback_workfn(struct work_struct *work) |
1282 | current->flags |= PF_SWAPWRITE; |
1283 | |
1284 | if (likely(!current_is_workqueue_rescuer() || |
1285 | - list_empty(&bdi->bdi_list))) { |
1286 | + !test_bit(BDI_registered, &bdi->state))) { |
1287 | /* |
1288 | * The normal path. Keep writing back @bdi until its |
1289 | * work_list is empty. Note that this path is also taken |
1290 | @@ -1038,10 +1051,10 @@ void bdi_writeback_workfn(struct work_struct *work) |
1291 | trace_writeback_pages_written(pages_written); |
1292 | } |
1293 | |
1294 | - if (!list_empty(&bdi->work_list) || |
1295 | - (wb_has_dirty_io(wb) && dirty_writeback_interval)) |
1296 | - queue_delayed_work(bdi_wq, &wb->dwork, |
1297 | - msecs_to_jiffies(dirty_writeback_interval * 10)); |
1298 | + if (!list_empty(&bdi->work_list)) |
1299 | + mod_delayed_work(bdi_wq, &wb->dwork, 0); |
1300 | + else if (wb_has_dirty_io(wb) && dirty_writeback_interval) |
1301 | + bdi_wakeup_thread_delayed(bdi); |
1302 | |
1303 | current->flags &= ~PF_SWAPWRITE; |
1304 | } |
1305 | diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c |
1306 | index 16a5047903a6..406d9cc84ba8 100644 |
1307 | --- a/fs/jffs2/compr_rtime.c |
1308 | +++ b/fs/jffs2/compr_rtime.c |
1309 | @@ -33,7 +33,7 @@ static int jffs2_rtime_compress(unsigned char *data_in, |
1310 | unsigned char *cpage_out, |
1311 | uint32_t *sourcelen, uint32_t *dstlen) |
1312 | { |
1313 | - short positions[256]; |
1314 | + unsigned short positions[256]; |
1315 | int outpos = 0; |
1316 | int pos=0; |
1317 | |
1318 | @@ -74,7 +74,7 @@ static int jffs2_rtime_decompress(unsigned char *data_in, |
1319 | unsigned char *cpage_out, |
1320 | uint32_t srclen, uint32_t destlen) |
1321 | { |
1322 | - short positions[256]; |
1323 | + unsigned short positions[256]; |
1324 | int outpos = 0; |
1325 | int pos=0; |
1326 | |
1327 | diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h |
1328 | index e4619b00f7c5..fa35ff79ab35 100644 |
1329 | --- a/fs/jffs2/nodelist.h |
1330 | +++ b/fs/jffs2/nodelist.h |
1331 | @@ -231,7 +231,7 @@ struct jffs2_tmp_dnode_info |
1332 | uint32_t version; |
1333 | uint32_t data_crc; |
1334 | uint32_t partial_crc; |
1335 | - uint16_t csize; |
1336 | + uint32_t csize; |
1337 | uint16_t overlapped; |
1338 | }; |
1339 | |
1340 | diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c |
1341 | index 03310721712f..b6bd4affd9ad 100644 |
1342 | --- a/fs/jffs2/nodemgmt.c |
1343 | +++ b/fs/jffs2/nodemgmt.c |
1344 | @@ -179,6 +179,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, |
1345 | spin_unlock(&c->erase_completion_lock); |
1346 | |
1347 | schedule(); |
1348 | + remove_wait_queue(&c->erase_wait, &wait); |
1349 | } else |
1350 | spin_unlock(&c->erase_completion_lock); |
1351 | } else if (ret) |
1352 | @@ -211,20 +212,25 @@ out: |
1353 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, |
1354 | uint32_t *len, uint32_t sumsize) |
1355 | { |
1356 | - int ret = -EAGAIN; |
1357 | + int ret; |
1358 | minsize = PAD(minsize); |
1359 | |
1360 | jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize); |
1361 | |
1362 | - spin_lock(&c->erase_completion_lock); |
1363 | - while(ret == -EAGAIN) { |
1364 | + while (true) { |
1365 | + spin_lock(&c->erase_completion_lock); |
1366 | ret = jffs2_do_reserve_space(c, minsize, len, sumsize); |
1367 | if (ret) { |
1368 | jffs2_dbg(1, "%s(): looping, ret is %d\n", |
1369 | __func__, ret); |
1370 | } |
1371 | + spin_unlock(&c->erase_completion_lock); |
1372 | + |
1373 | + if (ret == -EAGAIN) |
1374 | + cond_resched(); |
1375 | + else |
1376 | + break; |
1377 | } |
1378 | - spin_unlock(&c->erase_completion_lock); |
1379 | if (!ret) |
1380 | ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); |
1381 | |
1382 | diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c |
1383 | index 20bf8e8002d6..a6fcbd220f6b 100644 |
1384 | --- a/fs/xfs/xfs_da_btree.c |
1385 | +++ b/fs/xfs/xfs_da_btree.c |
1386 | @@ -1335,7 +1335,7 @@ xfs_da3_fixhashpath( |
1387 | node = blk->bp->b_addr; |
1388 | xfs_da3_node_hdr_from_disk(&nodehdr, node); |
1389 | btree = xfs_da3_node_tree_p(node); |
1390 | - if (be32_to_cpu(btree->hashval) == lasthash) |
1391 | + if (be32_to_cpu(btree[blk->index].hashval) == lasthash) |
1392 | break; |
1393 | blk->hashval = lasthash; |
1394 | btree[blk->index].hashval = cpu_to_be32(lasthash); |
1395 | diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h |
1396 | index 5f66d519a726..a4cf599ecfc8 100644 |
1397 | --- a/include/linux/backing-dev.h |
1398 | +++ b/include/linux/backing-dev.h |
1399 | @@ -95,7 +95,7 @@ struct backing_dev_info { |
1400 | unsigned int max_ratio, max_prop_frac; |
1401 | |
1402 | struct bdi_writeback wb; /* default writeback info for this bdi */ |
1403 | - spinlock_t wb_lock; /* protects work_list */ |
1404 | + spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */ |
1405 | |
1406 | struct list_head work_list; |
1407 | |
1408 | diff --git a/include/linux/printk.h b/include/linux/printk.h |
1409 | index e6131a782481..694925837a16 100644 |
1410 | --- a/include/linux/printk.h |
1411 | +++ b/include/linux/printk.h |
1412 | @@ -233,6 +233,8 @@ extern asmlinkage void dump_stack(void) __cold; |
1413 | no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
1414 | #endif |
1415 | |
1416 | +#include <linux/dynamic_debug.h> |
1417 | + |
1418 | /* If you are writing a driver, please use dev_dbg instead */ |
1419 | #if defined(CONFIG_DYNAMIC_DEBUG) |
1420 | /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ |
1421 | @@ -343,7 +345,19 @@ extern asmlinkage void dump_stack(void) __cold; |
1422 | #endif |
1423 | |
1424 | /* If you are writing a driver, please use dev_dbg instead */ |
1425 | -#if defined(DEBUG) |
1426 | +#if defined(CONFIG_DYNAMIC_DEBUG) |
1427 | +/* descriptor check is first to prevent flooding with "callbacks suppressed" */ |
1428 | +#define pr_debug_ratelimited(fmt, ...) \ |
1429 | +do { \ |
1430 | + static DEFINE_RATELIMIT_STATE(_rs, \ |
1431 | + DEFAULT_RATELIMIT_INTERVAL, \ |
1432 | + DEFAULT_RATELIMIT_BURST); \ |
1433 | + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
1434 | + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ |
1435 | + __ratelimit(&_rs)) \ |
1436 | + __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ |
1437 | +} while (0) |
1438 | +#elif defined(DEBUG) |
1439 | #define pr_debug_ratelimited(fmt, ...) \ |
1440 | printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
1441 | #else |
1442 | diff --git a/include/linux/tty.h b/include/linux/tty.h |
1443 | index 64f864651d86..96c23247a332 100644 |
1444 | --- a/include/linux/tty.h |
1445 | +++ b/include/linux/tty.h |
1446 | @@ -203,7 +203,7 @@ struct tty_port { |
1447 | wait_queue_head_t delta_msr_wait; /* Modem status change */ |
1448 | unsigned long flags; /* TTY flags ASY_*/ |
1449 | unsigned char console:1, /* port is a console */ |
1450 | - low_latency:1; /* direct buffer flush */ |
1451 | + low_latency:1; /* optional: tune for latency */ |
1452 | struct mutex mutex; /* Locking */ |
1453 | struct mutex buf_mutex; /* Buffer alloc lock */ |
1454 | unsigned char *xmit_buf; /* Optional buffer */ |
1455 | diff --git a/kernel/exit.c b/kernel/exit.c |
1456 | index a949819055d5..dcde2c4b61d0 100644 |
1457 | --- a/kernel/exit.c |
1458 | +++ b/kernel/exit.c |
1459 | @@ -559,9 +559,6 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p, |
1460 | struct list_head *dead) |
1461 | { |
1462 | list_move_tail(&p->sibling, &p->real_parent->children); |
1463 | - |
1464 | - if (p->exit_state == EXIT_DEAD) |
1465 | - return; |
1466 | /* |
1467 | * If this is a threaded reparent there is no need to |
1468 | * notify anyone anything has happened. |
1469 | @@ -569,9 +566,19 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p, |
1470 | if (same_thread_group(p->real_parent, father)) |
1471 | return; |
1472 | |
1473 | - /* We don't want people slaying init. */ |
1474 | + /* |
1475 | + * We don't want people slaying init. |
1476 | + * |
1477 | + * Note: we do this even if it is EXIT_DEAD, wait_task_zombie() |
1478 | + * can change ->exit_state to EXIT_ZOMBIE. If this is the final |
1479 | + * state, do_notify_parent() was already called and ->exit_signal |
1480 | + * doesn't matter. |
1481 | + */ |
1482 | p->exit_signal = SIGCHLD; |
1483 | |
1484 | + if (p->exit_state == EXIT_DEAD) |
1485 | + return; |
1486 | + |
1487 | /* If it has exited notify the new parent about this child's death. */ |
1488 | if (!p->ptrace && |
1489 | p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { |
1490 | @@ -783,6 +790,8 @@ void do_exit(long code) |
1491 | exit_shm(tsk); |
1492 | exit_files(tsk); |
1493 | exit_fs(tsk); |
1494 | + if (group_dead) |
1495 | + disassociate_ctty(1); |
1496 | exit_task_namespaces(tsk); |
1497 | exit_task_work(tsk); |
1498 | check_stack_usage(); |
1499 | @@ -798,13 +807,9 @@ void do_exit(long code) |
1500 | |
1501 | cgroup_exit(tsk, 1); |
1502 | |
1503 | - if (group_dead) |
1504 | - disassociate_ctty(1); |
1505 | - |
1506 | module_put(task_thread_info(tsk)->exec_domain->module); |
1507 | |
1508 | proc_exit_connector(tsk); |
1509 | - |
1510 | /* |
1511 | * FIXME: do that only when needed, using sched_exit tracepoint |
1512 | */ |
1513 | diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c |
1514 | index 42086551a24a..55e956056987 100644 |
1515 | --- a/kernel/pid_namespace.c |
1516 | +++ b/kernel/pid_namespace.c |
1517 | @@ -312,7 +312,9 @@ static void *pidns_get(struct task_struct *task) |
1518 | struct pid_namespace *ns; |
1519 | |
1520 | rcu_read_lock(); |
1521 | - ns = get_pid_ns(task_active_pid_ns(task)); |
1522 | + ns = task_active_pid_ns(task); |
1523 | + if (ns) |
1524 | + get_pid_ns(ns); |
1525 | rcu_read_unlock(); |
1526 | |
1527 | return ns; |
1528 | diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c |
1529 | index 13fb1134ba58..6991139e3303 100644 |
1530 | --- a/kernel/user_namespace.c |
1531 | +++ b/kernel/user_namespace.c |
1532 | @@ -146,7 +146,7 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count) |
1533 | |
1534 | /* Find the matching extent */ |
1535 | extents = map->nr_extents; |
1536 | - smp_read_barrier_depends(); |
1537 | + smp_rmb(); |
1538 | for (idx = 0; idx < extents; idx++) { |
1539 | first = map->extent[idx].first; |
1540 | last = first + map->extent[idx].count - 1; |
1541 | @@ -170,7 +170,7 @@ static u32 map_id_down(struct uid_gid_map *map, u32 id) |
1542 | |
1543 | /* Find the matching extent */ |
1544 | extents = map->nr_extents; |
1545 | - smp_read_barrier_depends(); |
1546 | + smp_rmb(); |
1547 | for (idx = 0; idx < extents; idx++) { |
1548 | first = map->extent[idx].first; |
1549 | last = first + map->extent[idx].count - 1; |
1550 | @@ -193,7 +193,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id) |
1551 | |
1552 | /* Find the matching extent */ |
1553 | extents = map->nr_extents; |
1554 | - smp_read_barrier_depends(); |
1555 | + smp_rmb(); |
1556 | for (idx = 0; idx < extents; idx++) { |
1557 | first = map->extent[idx].lower_first; |
1558 | last = first + map->extent[idx].count - 1; |
1559 | @@ -609,9 +609,8 @@ static ssize_t map_write(struct file *file, const char __user *buf, |
1560 | * were written before the count of the extents. |
1561 | * |
1562 | * To achieve this smp_wmb() is used on guarantee the write |
1563 | - * order and smp_read_barrier_depends() is guaranteed that we |
1564 | - * don't have crazy architectures returning stale data. |
1565 | - * |
1566 | + * order and smp_rmb() is guaranteed that we don't have crazy |
1567 | + * architectures returning stale data. |
1568 | */ |
1569 | mutex_lock(&id_map_mutex); |
1570 | |
1571 | diff --git a/mm/backing-dev.c b/mm/backing-dev.c |
1572 | index ce682f7a4f29..09d9591b7708 100644 |
1573 | --- a/mm/backing-dev.c |
1574 | +++ b/mm/backing-dev.c |
1575 | @@ -288,13 +288,19 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi) |
1576 | * Note, we wouldn't bother setting up the timer, but this function is on the |
1577 | * fast-path (used by '__mark_inode_dirty()'), so we save few context switches |
1578 | * by delaying the wake-up. |
1579 | + * |
1580 | + * We have to be careful not to postpone flush work if it is scheduled for |
1581 | + * earlier. Thus we use queue_delayed_work(). |
1582 | */ |
1583 | void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) |
1584 | { |
1585 | unsigned long timeout; |
1586 | |
1587 | timeout = msecs_to_jiffies(dirty_writeback_interval * 10); |
1588 | - mod_delayed_work(bdi_wq, &bdi->wb.dwork, timeout); |
1589 | + spin_lock_bh(&bdi->wb_lock); |
1590 | + if (test_bit(BDI_registered, &bdi->state)) |
1591 | + queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout); |
1592 | + spin_unlock_bh(&bdi->wb_lock); |
1593 | } |
1594 | |
1595 | /* |
1596 | @@ -307,9 +313,6 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi) |
1597 | spin_unlock_bh(&bdi_lock); |
1598 | |
1599 | synchronize_rcu_expedited(); |
1600 | - |
1601 | - /* bdi_list is now unused, clear it to mark @bdi dying */ |
1602 | - INIT_LIST_HEAD(&bdi->bdi_list); |
1603 | } |
1604 | |
1605 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, |
1606 | @@ -360,6 +363,11 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) |
1607 | */ |
1608 | bdi_remove_from_list(bdi); |
1609 | |
1610 | + /* Make sure nobody queues further work */ |
1611 | + spin_lock_bh(&bdi->wb_lock); |
1612 | + clear_bit(BDI_registered, &bdi->state); |
1613 | + spin_unlock_bh(&bdi->wb_lock); |
1614 | + |
1615 | /* |
1616 | * Drain work list and shutdown the delayed_work. At this point, |
1617 | * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi |
1618 | diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c |
1619 | index 8db3e89fae35..a3af2b750e96 100644 |
1620 | --- a/net/bluetooth/hci_event.c |
1621 | +++ b/net/bluetooth/hci_event.c |
1622 | @@ -3565,7 +3565,13 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1623 | |
1624 | hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); |
1625 | |
1626 | - if (ltk->type & HCI_SMP_STK) { |
1627 | + /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a |
1628 | + * temporary key used to encrypt a connection following |
1629 | + * pairing. It is used during the Encrypted Session Setup to |
1630 | + * distribute the keys. Later, security can be re-established |
1631 | + * using a distributed LTK. |
1632 | + */ |
1633 | + if (ltk->type == HCI_SMP_STK_SLAVE) { |
1634 | list_del(<k->list); |
1635 | kfree(ltk); |
1636 | } |
1637 | diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c |
1638 | index c482f7c7dd32..c211607b79b3 100644 |
1639 | --- a/net/ipv4/ping.c |
1640 | +++ b/net/ipv4/ping.c |
1641 | @@ -249,26 +249,33 @@ int ping_init_sock(struct sock *sk) |
1642 | { |
1643 | struct net *net = sock_net(sk); |
1644 | kgid_t group = current_egid(); |
1645 | - struct group_info *group_info = get_current_groups(); |
1646 | - int i, j, count = group_info->ngroups; |
1647 | + struct group_info *group_info; |
1648 | + int i, j, count; |
1649 | kgid_t low, high; |
1650 | + int ret = 0; |
1651 | |
1652 | inet_get_ping_group_range_net(net, &low, &high); |
1653 | if (gid_lte(low, group) && gid_lte(group, high)) |
1654 | return 0; |
1655 | |
1656 | + group_info = get_current_groups(); |
1657 | + count = group_info->ngroups; |
1658 | for (i = 0; i < group_info->nblocks; i++) { |
1659 | int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); |
1660 | for (j = 0; j < cp_count; j++) { |
1661 | kgid_t gid = group_info->blocks[i][j]; |
1662 | if (gid_lte(low, gid) && gid_lte(gid, high)) |
1663 | - return 0; |
1664 | + goto out_release_group; |
1665 | } |
1666 | |
1667 | count -= cp_count; |
1668 | } |
1669 | |
1670 | - return -EACCES; |
1671 | + ret = -EACCES; |
1672 | + |
1673 | +out_release_group: |
1674 | + put_group_info(group_info); |
1675 | + return ret; |
1676 | } |
1677 | EXPORT_SYMBOL_GPL(ping_init_sock); |
1678 | |
1679 | diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c |
1680 | index c3235675f359..5c2dab276109 100644 |
1681 | --- a/net/openvswitch/dp_notify.c |
1682 | +++ b/net/openvswitch/dp_notify.c |
1683 | @@ -65,8 +65,7 @@ void ovs_dp_notify_wq(struct work_struct *work) |
1684 | continue; |
1685 | |
1686 | netdev_vport = netdev_vport_priv(vport); |
1687 | - if (netdev_vport->dev->reg_state == NETREG_UNREGISTERED || |
1688 | - netdev_vport->dev->reg_state == NETREG_UNREGISTERING) |
1689 | + if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)) |
1690 | dp_detach_port_notify(vport); |
1691 | } |
1692 | } |
1693 | @@ -88,6 +87,10 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event, |
1694 | return NOTIFY_DONE; |
1695 | |
1696 | if (event == NETDEV_UNREGISTER) { |
1697 | + /* upper_dev_unlink and decrement promisc immediately */ |
1698 | + ovs_netdev_detach_dev(vport); |
1699 | + |
1700 | + /* schedule vport destroy, dev_put and genl notification */ |
1701 | ovs_net = net_generic(dev_net(dev), ovs_net_id); |
1702 | queue_work(system_wq, &ovs_net->dp_notify_work); |
1703 | } |
1704 | diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c |
1705 | index 09d93c13cfd6..d21f77d875ba 100644 |
1706 | --- a/net/openvswitch/vport-netdev.c |
1707 | +++ b/net/openvswitch/vport-netdev.c |
1708 | @@ -150,15 +150,25 @@ static void free_port_rcu(struct rcu_head *rcu) |
1709 | ovs_vport_free(vport_from_priv(netdev_vport)); |
1710 | } |
1711 | |
1712 | -static void netdev_destroy(struct vport *vport) |
1713 | +void ovs_netdev_detach_dev(struct vport *vport) |
1714 | { |
1715 | struct netdev_vport *netdev_vport = netdev_vport_priv(vport); |
1716 | |
1717 | - rtnl_lock(); |
1718 | + ASSERT_RTNL(); |
1719 | netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; |
1720 | netdev_rx_handler_unregister(netdev_vport->dev); |
1721 | - netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp)); |
1722 | + netdev_upper_dev_unlink(netdev_vport->dev, |
1723 | + netdev_master_upper_dev_get(netdev_vport->dev)); |
1724 | dev_set_promiscuity(netdev_vport->dev, -1); |
1725 | +} |
1726 | + |
1727 | +static void netdev_destroy(struct vport *vport) |
1728 | +{ |
1729 | + struct netdev_vport *netdev_vport = netdev_vport_priv(vport); |
1730 | + |
1731 | + rtnl_lock(); |
1732 | + if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH) |
1733 | + ovs_netdev_detach_dev(vport); |
1734 | rtnl_unlock(); |
1735 | |
1736 | call_rcu(&netdev_vport->rcu, free_port_rcu); |
1737 | diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h |
1738 | index dd298b5c5cdb..8df01c1127e5 100644 |
1739 | --- a/net/openvswitch/vport-netdev.h |
1740 | +++ b/net/openvswitch/vport-netdev.h |
1741 | @@ -39,5 +39,6 @@ netdev_vport_priv(const struct vport *vport) |
1742 | } |
1743 | |
1744 | const char *ovs_netdev_get_name(const struct vport *); |
1745 | +void ovs_netdev_detach_dev(struct vport *); |
1746 | |
1747 | #endif /* vport_netdev.h */ |
1748 | diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c |
1749 | index 2d682977ce82..39dc5bc742e0 100644 |
1750 | --- a/virt/kvm/ioapic.c |
1751 | +++ b/virt/kvm/ioapic.c |
1752 | @@ -306,7 +306,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status) |
1753 | BUG_ON(ioapic->rtc_status.pending_eoi != 0); |
1754 | ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, |
1755 | ioapic->rtc_status.dest_map); |
1756 | - ioapic->rtc_status.pending_eoi = ret; |
1757 | + ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret); |
1758 | } else |
1759 | ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); |
1760 |