Magellan Linux

Contents of /trunk/kernel-lts/patches-3.10/0106-3.10.7-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2395 - (show annotations) (download)
Mon Feb 3 12:41:31 2014 UTC (10 years, 8 months ago) by niro
File size: 88691 byte(s)
-copied
1 diff --git a/Makefile b/Makefile
2 index fd92ffb..33e36ab 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 10
8 -SUBLEVEL = 6
9 +SUBLEVEL = 7
10 EXTRAVERSION =
11 NAME = TOSSUG Baby Fish
12
13 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
14 index 7a58ab9..e53e2b4 100644
15 --- a/arch/mips/Kconfig
16 +++ b/arch/mips/Kconfig
17 @@ -27,6 +27,7 @@ config MIPS
18 select HAVE_GENERIC_HARDIRQS
19 select GENERIC_IRQ_PROBE
20 select GENERIC_IRQ_SHOW
21 + select GENERIC_PCI_IOMAP
22 select HAVE_ARCH_JUMP_LABEL
23 select ARCH_WANT_IPC_PARSE_VERSION
24 select IRQ_FORCED_THREADING
25 @@ -2412,7 +2413,6 @@ config PCI
26 bool "Support for PCI controller"
27 depends on HW_HAS_PCI
28 select PCI_DOMAINS
29 - select GENERIC_PCI_IOMAP
30 select NO_GENERIC_PCI_IOPORT_MAP
31 help
32 Find out whether you have a PCI motherboard. PCI is the name of a
33 diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
34 index b7e5985..b84e1fb 100644
35 --- a/arch/mips/include/asm/io.h
36 +++ b/arch/mips/include/asm/io.h
37 @@ -170,6 +170,11 @@ static inline void * isa_bus_to_virt(unsigned long address)
38 extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags);
39 extern void __iounmap(const volatile void __iomem *addr);
40
41 +#ifndef CONFIG_PCI
42 +struct pci_dev;
43 +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
44 +#endif
45 +
46 static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
47 unsigned long flags)
48 {
49 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
50 index c33e3ad..74991fe 100644
51 --- a/arch/powerpc/Kconfig
52 +++ b/arch/powerpc/Kconfig
53 @@ -572,7 +572,7 @@ config SCHED_SMT
54 config PPC_DENORMALISATION
55 bool "PowerPC denormalisation exception handling"
56 depends on PPC_BOOK3S_64
57 - default "n"
58 + default "y" if PPC_POWERNV
59 ---help---
60 Add support for handling denormalisation of single precision
61 values. Useful for bare metal only. If unsure say Y here.
62 diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
63 index 14a6583..419e712 100644
64 --- a/arch/powerpc/include/asm/processor.h
65 +++ b/arch/powerpc/include/asm/processor.h
66 @@ -247,6 +247,10 @@ struct thread_struct {
67 unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
68 struct pt_regs ckpt_regs; /* Checkpointed registers */
69
70 + unsigned long tm_tar;
71 + unsigned long tm_ppr;
72 + unsigned long tm_dscr;
73 +
74 /*
75 * Transactional FP and VSX 0-31 register set.
76 * NOTE: the sense of these is the opposite of the integer ckpt_regs!
77 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
78 index 362142b..e1fb161 100644
79 --- a/arch/powerpc/include/asm/reg.h
80 +++ b/arch/powerpc/include/asm/reg.h
81 @@ -254,19 +254,28 @@
82 #define SPRN_HRMOR 0x139 /* Real mode offset register */
83 #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
84 #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
85 +/* HFSCR and FSCR bit numbers are the same */
86 +#define FSCR_TAR_LG 8 /* Enable Target Address Register */
87 +#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
88 +#define FSCR_TM_LG 5 /* Enable Transactional Memory */
89 +#define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */
90 +#define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/
91 +#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
92 +#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
93 +#define FSCR_FP_LG 0 /* Enable Floating Point */
94 #define SPRN_FSCR 0x099 /* Facility Status & Control Register */
95 -#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
96 -#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
97 -#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
98 +#define FSCR_TAR __MASK(FSCR_TAR_LG)
99 +#define FSCR_EBB __MASK(FSCR_EBB_LG)
100 +#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
101 #define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
102 -#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
103 -#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
104 -#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */
105 -#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */
106 -#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/
107 -#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
108 -#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */
109 -#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */
110 +#define HFSCR_TAR __MASK(FSCR_TAR_LG)
111 +#define HFSCR_EBB __MASK(FSCR_EBB_LG)
112 +#define HFSCR_TM __MASK(FSCR_TM_LG)
113 +#define HFSCR_PM __MASK(FSCR_PM_LG)
114 +#define HFSCR_BHRB __MASK(FSCR_BHRB_LG)
115 +#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
116 +#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
117 +#define HFSCR_FP __MASK(FSCR_FP_LG)
118 #define SPRN_TAR 0x32f /* Target Address Register */
119 #define SPRN_LPCR 0x13E /* LPAR Control Register */
120 #define LPCR_VPM0 (1ul << (63-0))
121 diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
122 index 200d763..685ecc8 100644
123 --- a/arch/powerpc/include/asm/switch_to.h
124 +++ b/arch/powerpc/include/asm/switch_to.h
125 @@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *,
126 struct thread_struct;
127 extern struct task_struct *_switch(struct thread_struct *prev,
128 struct thread_struct *next);
129 +#ifdef CONFIG_PPC_BOOK3S_64
130 +static inline void save_tar(struct thread_struct *prev)
131 +{
132 + if (cpu_has_feature(CPU_FTR_ARCH_207S))
133 + prev->tar = mfspr(SPRN_TAR);
134 +}
135 +#else
136 +static inline void save_tar(struct thread_struct *prev) {}
137 +#endif
138
139 extern void giveup_fpu(struct task_struct *);
140 extern void load_up_fpu(void);
141 diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
142 index 6f16ffa..302886b 100644
143 --- a/arch/powerpc/kernel/asm-offsets.c
144 +++ b/arch/powerpc/kernel/asm-offsets.c
145 @@ -139,6 +139,9 @@ int main(void)
146 DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
147 DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
148 DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
149 + DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
150 + DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
151 + DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
152 DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
153 DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
154 transact_vr[0]));
155 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
156 index 8741c85..38847767 100644
157 --- a/arch/powerpc/kernel/entry_64.S
158 +++ b/arch/powerpc/kernel/entry_64.S
159 @@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
160
161 #ifdef CONFIG_PPC_BOOK3S_64
162 BEGIN_FTR_SECTION
163 - /*
164 - * Back up the TAR across context switches. Note that the TAR is not
165 - * available for use in the kernel. (To provide this, the TAR should
166 - * be backed up/restored on exception entry/exit instead, and be in
167 - * pt_regs. FIXME, this should be in pt_regs anyway (for debug).)
168 - */
169 - mfspr r0,SPRN_TAR
170 - std r0,THREAD_TAR(r3)
171 -
172 /* Event based branch registers */
173 mfspr r0, SPRN_BESCR
174 std r0, THREAD_BESCR(r3)
175 @@ -584,9 +575,34 @@ BEGIN_FTR_SECTION
176 ld r7,DSCR_DEFAULT@toc(2)
177 ld r0,THREAD_DSCR(r4)
178 cmpwi r6,0
179 + li r8, FSCR_DSCR
180 bne 1f
181 ld r0,0(r7)
182 -1: cmpd r0,r25
183 + b 3f
184 +1:
185 + BEGIN_FTR_SECTION_NESTED(70)
186 + mfspr r6, SPRN_FSCR
187 + or r6, r6, r8
188 + mtspr SPRN_FSCR, r6
189 + BEGIN_FTR_SECTION_NESTED(69)
190 + mfspr r6, SPRN_HFSCR
191 + or r6, r6, r8
192 + mtspr SPRN_HFSCR, r6
193 + END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
194 + b 4f
195 + END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
196 +3:
197 + BEGIN_FTR_SECTION_NESTED(70)
198 + mfspr r6, SPRN_FSCR
199 + andc r6, r6, r8
200 + mtspr SPRN_FSCR, r6
201 + BEGIN_FTR_SECTION_NESTED(69)
202 + mfspr r6, SPRN_HFSCR
203 + andc r6, r6, r8
204 + mtspr SPRN_HFSCR, r6
205 + END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
206 + END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
207 +4: cmpd r0,r25
208 beq 2f
209 mtspr SPRN_DSCR,r0
210 2:
211 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
212 index 4e00d22..902ca3c 100644
213 --- a/arch/powerpc/kernel/exceptions-64s.S
214 +++ b/arch/powerpc/kernel/exceptions-64s.S
215 @@ -848,7 +848,7 @@ hv_facility_unavailable_relon_trampoline:
216 . = 0x4f80
217 SET_SCRATCH0(r13)
218 EXCEPTION_PROLOG_0(PACA_EXGEN)
219 - b facility_unavailable_relon_hv
220 + b hv_facility_unavailable_relon_hv
221
222 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
223 #ifdef CONFIG_PPC_DENORMALISATION
224 @@ -1175,6 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
225 b .ret_from_except
226
227 STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
228 + STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
229
230 .align 7
231 .globl __end_handlers
232 @@ -1188,7 +1189,7 @@ __end_handlers:
233 STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
234 STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
235 STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
236 - STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable)
237 + STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
238
239 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
240 /*
241 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
242 index 076d124..7baa27b 100644
243 --- a/arch/powerpc/kernel/process.c
244 +++ b/arch/powerpc/kernel/process.c
245 @@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
246 struct ppc64_tlb_batch *batch;
247 #endif
248
249 + /* Back up the TAR across context switches.
250 + * Note that the TAR is not available for use in the kernel. (To
251 + * provide this, the TAR should be backed up/restored on exception
252 + * entry/exit instead, and be in pt_regs. FIXME, this should be in
253 + * pt_regs anyway (for debug).)
254 + * Save the TAR here before we do treclaim/trecheckpoint as these
255 + * will change the TAR.
256 + */
257 + save_tar(&prev->thread);
258 +
259 __switch_to_tm(prev);
260
261 #ifdef CONFIG_SMP
262 diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
263 index 2da67e7..1edd6c2 100644
264 --- a/arch/powerpc/kernel/tm.S
265 +++ b/arch/powerpc/kernel/tm.S
266 @@ -224,6 +224,16 @@ dont_backup_fp:
267 std r5, _CCR(r7)
268 std r6, _XER(r7)
269
270 +
271 + /* ******************** TAR, PPR, DSCR ********** */
272 + mfspr r3, SPRN_TAR
273 + mfspr r4, SPRN_PPR
274 + mfspr r5, SPRN_DSCR
275 +
276 + std r3, THREAD_TM_TAR(r12)
277 + std r4, THREAD_TM_PPR(r12)
278 + std r5, THREAD_TM_DSCR(r12)
279 +
280 /* MSR and flags: We don't change CRs, and we don't need to alter
281 * MSR.
282 */
283 @@ -338,6 +348,16 @@ dont_restore_fp:
284 mtmsr r6 /* FP/Vec off again! */
285
286 restore_gprs:
287 +
288 + /* ******************** TAR, PPR, DSCR ********** */
289 + ld r4, THREAD_TM_TAR(r3)
290 + ld r5, THREAD_TM_PPR(r3)
291 + ld r6, THREAD_TM_DSCR(r3)
292 +
293 + mtspr SPRN_TAR, r4
294 + mtspr SPRN_PPR, r5
295 + mtspr SPRN_DSCR, r6
296 +
297 /* ******************** CR,LR,CCR,MSR ********** */
298 ld r3, _CTR(r7)
299 ld r4, _LINK(r7)
300 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
301 index e4f205a..88929b1 100644
302 --- a/arch/powerpc/kernel/traps.c
303 +++ b/arch/powerpc/kernel/traps.c
304 @@ -44,9 +44,7 @@
305 #include <asm/machdep.h>
306 #include <asm/rtas.h>
307 #include <asm/pmc.h>
308 -#ifdef CONFIG_PPC32
309 #include <asm/reg.h>
310 -#endif
311 #ifdef CONFIG_PMAC_BACKLIGHT
312 #include <asm/backlight.h>
313 #endif
314 @@ -1282,43 +1280,54 @@ void vsx_unavailable_exception(struct pt_regs *regs)
315 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
316 }
317
318 +#ifdef CONFIG_PPC64
319 void facility_unavailable_exception(struct pt_regs *regs)
320 {
321 static char *facility_strings[] = {
322 - "FPU",
323 - "VMX/VSX",
324 - "DSCR",
325 - "PMU SPRs",
326 - "BHRB",
327 - "TM",
328 - "AT",
329 - "EBB",
330 - "TAR",
331 + [FSCR_FP_LG] = "FPU",
332 + [FSCR_VECVSX_LG] = "VMX/VSX",
333 + [FSCR_DSCR_LG] = "DSCR",
334 + [FSCR_PM_LG] = "PMU SPRs",
335 + [FSCR_BHRB_LG] = "BHRB",
336 + [FSCR_TM_LG] = "TM",
337 + [FSCR_EBB_LG] = "EBB",
338 + [FSCR_TAR_LG] = "TAR",
339 };
340 - char *facility, *prefix;
341 + char *facility = "unknown";
342 u64 value;
343 + u8 status;
344 + bool hv;
345
346 - if (regs->trap == 0xf60) {
347 - value = mfspr(SPRN_FSCR);
348 - prefix = "";
349 - } else {
350 + hv = (regs->trap == 0xf80);
351 + if (hv)
352 value = mfspr(SPRN_HFSCR);
353 - prefix = "Hypervisor ";
354 + else
355 + value = mfspr(SPRN_FSCR);
356 +
357 + status = value >> 56;
358 + if (status == FSCR_DSCR_LG) {
359 + /* User is acessing the DSCR. Set the inherit bit and allow
360 + * the user to set it directly in future by setting via the
361 + * H/FSCR DSCR bit.
362 + */
363 + current->thread.dscr_inherit = 1;
364 + if (hv)
365 + mtspr(SPRN_HFSCR, value | HFSCR_DSCR);
366 + else
367 + mtspr(SPRN_FSCR, value | FSCR_DSCR);
368 + return;
369 }
370
371 - value = value >> 56;
372 + if ((status < ARRAY_SIZE(facility_strings)) &&
373 + facility_strings[status])
374 + facility = facility_strings[status];
375
376 /* We restore the interrupt state now */
377 if (!arch_irq_disabled_regs(regs))
378 local_irq_enable();
379
380 - if (value < ARRAY_SIZE(facility_strings))
381 - facility = facility_strings[value];
382 - else
383 - facility = "unknown";
384 -
385 pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
386 - prefix, facility, regs->nip, regs->msr);
387 + hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
388
389 if (user_mode(regs)) {
390 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
391 @@ -1327,6 +1336,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
392
393 die("Unexpected facility unavailable exception", regs, SIGABRT);
394 }
395 +#endif
396
397 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
398
399 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
400 index aa1227a..04a1378 100644
401 --- a/drivers/acpi/proc.c
402 +++ b/drivers/acpi/proc.c
403 @@ -311,6 +311,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
404 dev->pnp.bus_id,
405 (u32) dev->wakeup.sleep_state);
406
407 + mutex_lock(&dev->physical_node_lock);
408 +
409 if (!dev->physical_node_count) {
410 seq_printf(seq, "%c%-8s\n",
411 dev->wakeup.flags.run_wake ? '*' : ' ',
412 @@ -338,6 +340,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
413 put_device(ldev);
414 }
415 }
416 +
417 + mutex_unlock(&dev->physical_node_lock);
418 }
419 mutex_unlock(&acpi_device_lock);
420 return 0;
421 @@ -347,12 +351,16 @@ static void physical_device_enable_wakeup(struct acpi_device *adev)
422 {
423 struct acpi_device_physical_node *entry;
424
425 + mutex_lock(&adev->physical_node_lock);
426 +
427 list_for_each_entry(entry,
428 &adev->physical_node_list, node)
429 if (entry->dev && device_can_wakeup(entry->dev)) {
430 bool enable = !device_may_wakeup(entry->dev);
431 device_set_wakeup_enable(entry->dev, enable);
432 }
433 +
434 + mutex_unlock(&adev->physical_node_lock);
435 }
436
437 static ssize_t
438 diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
439 index 507ee2d..46283fd 100644
440 --- a/drivers/base/regmap/regcache.c
441 +++ b/drivers/base/regmap/regcache.c
442 @@ -644,7 +644,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
443 }
444 }
445
446 - return regcache_sync_block_raw_flush(map, &data, base, regtmp);
447 + return regcache_sync_block_raw_flush(map, &data, base, regtmp +
448 + map->reg_stride);
449 }
450
451 int regcache_sync_block(struct regmap *map, void *block,
452 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
453 index 1b456fe..fc45567 100644
454 --- a/drivers/char/virtio_console.c
455 +++ b/drivers/char/virtio_console.c
456 @@ -272,9 +272,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
457 unsigned long flags;
458
459 spin_lock_irqsave(&portdev->ports_lock, flags);
460 - list_for_each_entry(port, &portdev->ports, list)
461 - if (port->cdev->dev == dev)
462 + list_for_each_entry(port, &portdev->ports, list) {
463 + if (port->cdev->dev == dev) {
464 + kref_get(&port->kref);
465 goto out;
466 + }
467 + }
468 port = NULL;
469 out:
470 spin_unlock_irqrestore(&portdev->ports_lock, flags);
471 @@ -746,6 +749,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
472
473 port = filp->private_data;
474
475 + /* Port is hot-unplugged. */
476 + if (!port->guest_connected)
477 + return -ENODEV;
478 +
479 if (!port_has_data(port)) {
480 /*
481 * If nothing's connected on the host just return 0 in
482 @@ -762,7 +769,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
483 if (ret < 0)
484 return ret;
485 }
486 - /* Port got hot-unplugged. */
487 + /* Port got hot-unplugged while we were waiting above. */
488 if (!port->guest_connected)
489 return -ENODEV;
490 /*
491 @@ -932,13 +939,25 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
492 if (is_rproc_serial(port->out_vq->vdev))
493 return -EINVAL;
494
495 + /*
496 + * pipe->nrbufs == 0 means there are no data to transfer,
497 + * so this returns just 0 for no data.
498 + */
499 + pipe_lock(pipe);
500 + if (!pipe->nrbufs) {
501 + ret = 0;
502 + goto error_out;
503 + }
504 +
505 ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
506 if (ret < 0)
507 - return ret;
508 + goto error_out;
509
510 buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
511 - if (!buf)
512 - return -ENOMEM;
513 + if (!buf) {
514 + ret = -ENOMEM;
515 + goto error_out;
516 + }
517
518 sgl.n = 0;
519 sgl.len = 0;
520 @@ -946,12 +965,17 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
521 sgl.sg = buf->sg;
522 sg_init_table(sgl.sg, sgl.size);
523 ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
524 + pipe_unlock(pipe);
525 if (likely(ret > 0))
526 ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
527
528 if (unlikely(ret <= 0))
529 free_buf(buf, true);
530 return ret;
531 +
532 +error_out:
533 + pipe_unlock(pipe);
534 + return ret;
535 }
536
537 static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
538 @@ -1019,14 +1043,14 @@ static int port_fops_open(struct inode *inode, struct file *filp)
539 struct port *port;
540 int ret;
541
542 + /* We get the port with a kref here */
543 port = find_port_by_devt(cdev->dev);
544 + if (!port) {
545 + /* Port was unplugged before we could proceed */
546 + return -ENXIO;
547 + }
548 filp->private_data = port;
549
550 - /* Prevent against a port getting hot-unplugged at the same time */
551 - spin_lock_irq(&port->portdev->ports_lock);
552 - kref_get(&port->kref);
553 - spin_unlock_irq(&port->portdev->ports_lock);
554 -
555 /*
556 * Don't allow opening of console port devices -- that's done
557 * via /dev/hvc
558 @@ -1498,14 +1522,6 @@ static void remove_port(struct kref *kref)
559
560 port = container_of(kref, struct port, kref);
561
562 - sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
563 - device_destroy(pdrvdata.class, port->dev->devt);
564 - cdev_del(port->cdev);
565 -
566 - kfree(port->name);
567 -
568 - debugfs_remove(port->debugfs_file);
569 -
570 kfree(port);
571 }
572
573 @@ -1539,12 +1555,14 @@ static void unplug_port(struct port *port)
574 spin_unlock_irq(&port->portdev->ports_lock);
575
576 if (port->guest_connected) {
577 + /* Let the app know the port is going down. */
578 + send_sigio_to_port(port);
579 +
580 + /* Do this after sigio is actually sent */
581 port->guest_connected = false;
582 port->host_connected = false;
583 - wake_up_interruptible(&port->waitqueue);
584
585 - /* Let the app know the port is going down. */
586 - send_sigio_to_port(port);
587 + wake_up_interruptible(&port->waitqueue);
588 }
589
590 if (is_console_port(port)) {
591 @@ -1563,6 +1581,14 @@ static void unplug_port(struct port *port)
592 */
593 port->portdev = NULL;
594
595 + sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
596 + device_destroy(pdrvdata.class, port->dev->devt);
597 + cdev_del(port->cdev);
598 +
599 + kfree(port->name);
600 +
601 + debugfs_remove(port->debugfs_file);
602 +
603 /*
604 * Locks around here are not necessary - a port can't be
605 * opened after we removed the port struct from ports_list
606 diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
607 index 0ceb2ef..f97cb3d 100644
608 --- a/drivers/cpufreq/cpufreq_conservative.c
609 +++ b/drivers/cpufreq/cpufreq_conservative.c
610 @@ -221,8 +221,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
611 return count;
612 }
613
614 -static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
615 - size_t count)
616 +static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
617 + const char *buf, size_t count)
618 {
619 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
620 unsigned int input, j;
621 @@ -235,10 +235,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
622 if (input > 1)
623 input = 1;
624
625 - if (input == cs_tuners->ignore_nice) /* nothing to do */
626 + if (input == cs_tuners->ignore_nice_load) /* nothing to do */
627 return count;
628
629 - cs_tuners->ignore_nice = input;
630 + cs_tuners->ignore_nice_load = input;
631
632 /* we need to re-evaluate prev_cpu_idle */
633 for_each_online_cpu(j) {
634 @@ -246,7 +246,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
635 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
636 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
637 &dbs_info->cdbs.prev_cpu_wall, 0);
638 - if (cs_tuners->ignore_nice)
639 + if (cs_tuners->ignore_nice_load)
640 dbs_info->cdbs.prev_cpu_nice =
641 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
642 }
643 @@ -279,7 +279,7 @@ show_store_one(cs, sampling_rate);
644 show_store_one(cs, sampling_down_factor);
645 show_store_one(cs, up_threshold);
646 show_store_one(cs, down_threshold);
647 -show_store_one(cs, ignore_nice);
648 +show_store_one(cs, ignore_nice_load);
649 show_store_one(cs, freq_step);
650 declare_show_sampling_rate_min(cs);
651
652 @@ -287,7 +287,7 @@ gov_sys_pol_attr_rw(sampling_rate);
653 gov_sys_pol_attr_rw(sampling_down_factor);
654 gov_sys_pol_attr_rw(up_threshold);
655 gov_sys_pol_attr_rw(down_threshold);
656 -gov_sys_pol_attr_rw(ignore_nice);
657 +gov_sys_pol_attr_rw(ignore_nice_load);
658 gov_sys_pol_attr_rw(freq_step);
659 gov_sys_pol_attr_ro(sampling_rate_min);
660
661 @@ -297,7 +297,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
662 &sampling_down_factor_gov_sys.attr,
663 &up_threshold_gov_sys.attr,
664 &down_threshold_gov_sys.attr,
665 - &ignore_nice_gov_sys.attr,
666 + &ignore_nice_load_gov_sys.attr,
667 &freq_step_gov_sys.attr,
668 NULL
669 };
670 @@ -313,7 +313,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
671 &sampling_down_factor_gov_pol.attr,
672 &up_threshold_gov_pol.attr,
673 &down_threshold_gov_pol.attr,
674 - &ignore_nice_gov_pol.attr,
675 + &ignore_nice_load_gov_pol.attr,
676 &freq_step_gov_pol.attr,
677 NULL
678 };
679 @@ -338,7 +338,7 @@ static int cs_init(struct dbs_data *dbs_data)
680 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
681 tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
682 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
683 - tuners->ignore_nice = 0;
684 + tuners->ignore_nice_load = 0;
685 tuners->freq_step = DEF_FREQUENCY_STEP;
686
687 dbs_data->tuners = tuners;
688 diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
689 index 5af40ad..a86ff72 100644
690 --- a/drivers/cpufreq/cpufreq_governor.c
691 +++ b/drivers/cpufreq/cpufreq_governor.c
692 @@ -91,9 +91,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
693 unsigned int j;
694
695 if (dbs_data->cdata->governor == GOV_ONDEMAND)
696 - ignore_nice = od_tuners->ignore_nice;
697 + ignore_nice = od_tuners->ignore_nice_load;
698 else
699 - ignore_nice = cs_tuners->ignore_nice;
700 + ignore_nice = cs_tuners->ignore_nice_load;
701
702 policy = cdbs->cur_policy;
703
704 @@ -336,12 +336,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
705 cs_tuners = dbs_data->tuners;
706 cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
707 sampling_rate = cs_tuners->sampling_rate;
708 - ignore_nice = cs_tuners->ignore_nice;
709 + ignore_nice = cs_tuners->ignore_nice_load;
710 } else {
711 od_tuners = dbs_data->tuners;
712 od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
713 sampling_rate = od_tuners->sampling_rate;
714 - ignore_nice = od_tuners->ignore_nice;
715 + ignore_nice = od_tuners->ignore_nice_load;
716 od_ops = dbs_data->cdata->gov_ops;
717 io_busy = od_tuners->io_is_busy;
718 }
719 diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
720 index e16a961..0d9e6be 100644
721 --- a/drivers/cpufreq/cpufreq_governor.h
722 +++ b/drivers/cpufreq/cpufreq_governor.h
723 @@ -165,7 +165,7 @@ struct cs_cpu_dbs_info_s {
724
725 /* Per policy Governers sysfs tunables */
726 struct od_dbs_tuners {
727 - unsigned int ignore_nice;
728 + unsigned int ignore_nice_load;
729 unsigned int sampling_rate;
730 unsigned int sampling_down_factor;
731 unsigned int up_threshold;
732 @@ -175,7 +175,7 @@ struct od_dbs_tuners {
733 };
734
735 struct cs_dbs_tuners {
736 - unsigned int ignore_nice;
737 + unsigned int ignore_nice_load;
738 unsigned int sampling_rate;
739 unsigned int sampling_down_factor;
740 unsigned int up_threshold;
741 diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
742 index 93eb5cb..c087347 100644
743 --- a/drivers/cpufreq/cpufreq_ondemand.c
744 +++ b/drivers/cpufreq/cpufreq_ondemand.c
745 @@ -403,8 +403,8 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
746 return count;
747 }
748
749 -static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
750 - size_t count)
751 +static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
752 + const char *buf, size_t count)
753 {
754 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
755 unsigned int input;
756 @@ -419,10 +419,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
757 if (input > 1)
758 input = 1;
759
760 - if (input == od_tuners->ignore_nice) { /* nothing to do */
761 + if (input == od_tuners->ignore_nice_load) { /* nothing to do */
762 return count;
763 }
764 - od_tuners->ignore_nice = input;
765 + od_tuners->ignore_nice_load = input;
766
767 /* we need to re-evaluate prev_cpu_idle */
768 for_each_online_cpu(j) {
769 @@ -430,7 +430,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
770 dbs_info = &per_cpu(od_cpu_dbs_info, j);
771 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
772 &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
773 - if (od_tuners->ignore_nice)
774 + if (od_tuners->ignore_nice_load)
775 dbs_info->cdbs.prev_cpu_nice =
776 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
777
778 @@ -461,7 +461,7 @@ show_store_one(od, sampling_rate);
779 show_store_one(od, io_is_busy);
780 show_store_one(od, up_threshold);
781 show_store_one(od, sampling_down_factor);
782 -show_store_one(od, ignore_nice);
783 +show_store_one(od, ignore_nice_load);
784 show_store_one(od, powersave_bias);
785 declare_show_sampling_rate_min(od);
786
787 @@ -469,7 +469,7 @@ gov_sys_pol_attr_rw(sampling_rate);
788 gov_sys_pol_attr_rw(io_is_busy);
789 gov_sys_pol_attr_rw(up_threshold);
790 gov_sys_pol_attr_rw(sampling_down_factor);
791 -gov_sys_pol_attr_rw(ignore_nice);
792 +gov_sys_pol_attr_rw(ignore_nice_load);
793 gov_sys_pol_attr_rw(powersave_bias);
794 gov_sys_pol_attr_ro(sampling_rate_min);
795
796 @@ -478,7 +478,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
797 &sampling_rate_gov_sys.attr,
798 &up_threshold_gov_sys.attr,
799 &sampling_down_factor_gov_sys.attr,
800 - &ignore_nice_gov_sys.attr,
801 + &ignore_nice_load_gov_sys.attr,
802 &powersave_bias_gov_sys.attr,
803 &io_is_busy_gov_sys.attr,
804 NULL
805 @@ -494,7 +494,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
806 &sampling_rate_gov_pol.attr,
807 &up_threshold_gov_pol.attr,
808 &sampling_down_factor_gov_pol.attr,
809 - &ignore_nice_gov_pol.attr,
810 + &ignore_nice_load_gov_pol.attr,
811 &powersave_bias_gov_pol.attr,
812 &io_is_busy_gov_pol.attr,
813 NULL
814 @@ -544,7 +544,7 @@ static int od_init(struct dbs_data *dbs_data)
815 }
816
817 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
818 - tuners->ignore_nice = 0;
819 + tuners->ignore_nice_load = 0;
820 tuners->powersave_bias = default_powersave_bias;
821 tuners->io_is_busy = should_io_be_busy();
822
823 diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
824 index d539127..f92b02a 100644
825 --- a/drivers/cpufreq/loongson2_cpufreq.c
826 +++ b/drivers/cpufreq/loongson2_cpufreq.c
827 @@ -118,11 +118,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
828 clk_put(cpuclk);
829 return -EINVAL;
830 }
831 - ret = clk_set_rate(cpuclk, rate);
832 - if (ret) {
833 - clk_put(cpuclk);
834 - return ret;
835 - }
836
837 /* clock table init */
838 for (i = 2;
839 @@ -130,6 +125,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
840 i++)
841 loongson2_clockmod_table[i].frequency = (rate * i) / 8;
842
843 + ret = clk_set_rate(cpuclk, rate);
844 + if (ret) {
845 + clk_put(cpuclk);
846 + return ret;
847 + }
848 +
849 policy->cur = loongson2_cpufreq_get(policy->cpu);
850
851 cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
852 diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
853 index 09da339..d5902e2 100644
854 --- a/drivers/gpu/drm/ast/ast_ttm.c
855 +++ b/drivers/gpu/drm/ast/ast_ttm.c
856 @@ -348,6 +348,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
857
858 astbo->gem.driver_private = NULL;
859 astbo->bo.bdev = &ast->ttm.bdev;
860 + astbo->bo.bdev->dev_mapping = dev->dev_mapping;
861
862 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
863
864 diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
865 index 2ed8cfc..c18faff 100644
866 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
867 +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
868 @@ -353,6 +353,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
869
870 cirrusbo->gem.driver_private = NULL;
871 cirrusbo->bo.bdev = &cirrus->ttm.bdev;
872 + cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
873
874 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
875
876 diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
877 index 8bcce78..f92da0a 100644
878 --- a/drivers/gpu/drm/drm_irq.c
879 +++ b/drivers/gpu/drm/drm_irq.c
880 @@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
881 /* Subtract time delta from raw timestamp to get final
882 * vblank_time timestamp for end of vblank.
883 */
884 - etime = ktime_sub_ns(etime, delta_ns);
885 + if (delta_ns < 0)
886 + etime = ktime_add_ns(etime, -delta_ns);
887 + else
888 + etime = ktime_sub_ns(etime, delta_ns);
889 *vblank_time = ktime_to_timeval(etime);
890
891 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
892 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
893 index f968590..17d9b0b 100644
894 --- a/drivers/gpu/drm/i915/i915_dma.c
895 +++ b/drivers/gpu/drm/i915/i915_dma.c
896 @@ -1514,6 +1514,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
897 spin_lock_init(&dev_priv->irq_lock);
898 spin_lock_init(&dev_priv->gpu_error.lock);
899 spin_lock_init(&dev_priv->rps.lock);
900 + spin_lock_init(&dev_priv->gt_lock);
901 mutex_init(&dev_priv->dpio_lock);
902 mutex_init(&dev_priv->rps.hw_lock);
903 mutex_init(&dev_priv->modeset_restore_lock);
904 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
905 index 2cfe9f6..94ad6bc 100644
906 --- a/drivers/gpu/drm/i915/intel_pm.c
907 +++ b/drivers/gpu/drm/i915/intel_pm.c
908 @@ -4507,8 +4507,6 @@ void intel_gt_init(struct drm_device *dev)
909 {
910 struct drm_i915_private *dev_priv = dev->dev_private;
911
912 - spin_lock_init(&dev_priv->gt_lock);
913 -
914 if (IS_VALLEYVIEW(dev)) {
915 dev_priv->gt.force_wake_get = vlv_force_wake_get;
916 dev_priv->gt.force_wake_put = vlv_force_wake_put;
917 diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
918 index 401c989..d2cb32f 100644
919 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
920 +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
921 @@ -347,6 +347,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
922
923 mgabo->gem.driver_private = NULL;
924 mgabo->bo.bdev = &mdev->ttm.bdev;
925 + mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
926
927 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
928
929 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
930 index 0f89ce3..687b421 100644
931 --- a/drivers/gpu/drm/radeon/evergreen.c
932 +++ b/drivers/gpu/drm/radeon/evergreen.c
933 @@ -4681,6 +4681,8 @@ static int evergreen_startup(struct radeon_device *rdev)
934 /* enable pcie gen2 link */
935 evergreen_pcie_gen2_enable(rdev);
936
937 + evergreen_mc_program(rdev);
938 +
939 if (ASIC_IS_DCE5(rdev)) {
940 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
941 r = ni_init_microcode(rdev);
942 @@ -4708,7 +4710,6 @@ static int evergreen_startup(struct radeon_device *rdev)
943 if (r)
944 return r;
945
946 - evergreen_mc_program(rdev);
947 if (rdev->flags & RADEON_IS_AGP) {
948 evergreen_agp_enable(rdev);
949 } else {
950 @@ -4854,10 +4855,10 @@ int evergreen_resume(struct radeon_device *rdev)
951 int evergreen_suspend(struct radeon_device *rdev)
952 {
953 r600_audio_fini(rdev);
954 + r600_uvd_stop(rdev);
955 radeon_uvd_suspend(rdev);
956 r700_cp_stop(rdev);
957 r600_dma_stop(rdev);
958 - r600_uvd_rbc_stop(rdev);
959 evergreen_irq_suspend(rdev);
960 radeon_wb_disable(rdev);
961 evergreen_pcie_gart_disable(rdev);
962 @@ -4988,6 +4989,7 @@ void evergreen_fini(struct radeon_device *rdev)
963 radeon_ib_pool_fini(rdev);
964 radeon_irq_kms_fini(rdev);
965 evergreen_pcie_gart_fini(rdev);
966 + r600_uvd_stop(rdev);
967 radeon_uvd_fini(rdev);
968 r600_vram_scratch_fini(rdev);
969 radeon_gem_fini(rdev);
970 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
971 index 8458330..3bf43a1 100644
972 --- a/drivers/gpu/drm/radeon/ni.c
973 +++ b/drivers/gpu/drm/radeon/ni.c
974 @@ -1929,6 +1929,8 @@ static int cayman_startup(struct radeon_device *rdev)
975 /* enable pcie gen2 link */
976 evergreen_pcie_gen2_enable(rdev);
977
978 + evergreen_mc_program(rdev);
979 +
980 if (rdev->flags & RADEON_IS_IGP) {
981 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
982 r = ni_init_microcode(rdev);
983 @@ -1957,7 +1959,6 @@ static int cayman_startup(struct radeon_device *rdev)
984 if (r)
985 return r;
986
987 - evergreen_mc_program(rdev);
988 r = cayman_pcie_gart_enable(rdev);
989 if (r)
990 return r;
991 @@ -2133,7 +2134,7 @@ int cayman_suspend(struct radeon_device *rdev)
992 radeon_vm_manager_fini(rdev);
993 cayman_cp_enable(rdev, false);
994 cayman_dma_stop(rdev);
995 - r600_uvd_rbc_stop(rdev);
996 + r600_uvd_stop(rdev);
997 radeon_uvd_suspend(rdev);
998 evergreen_irq_suspend(rdev);
999 radeon_wb_disable(rdev);
1000 @@ -2265,6 +2266,7 @@ void cayman_fini(struct radeon_device *rdev)
1001 radeon_vm_manager_fini(rdev);
1002 radeon_ib_pool_fini(rdev);
1003 radeon_irq_kms_fini(rdev);
1004 + r600_uvd_stop(rdev);
1005 radeon_uvd_fini(rdev);
1006 cayman_pcie_gart_fini(rdev);
1007 r600_vram_scratch_fini(rdev);
1008 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
1009 index b60004e..f19620b 100644
1010 --- a/drivers/gpu/drm/radeon/r600.c
1011 +++ b/drivers/gpu/drm/radeon/r600.c
1012 @@ -2675,12 +2675,29 @@ int r600_uvd_rbc_start(struct radeon_device *rdev)
1013 return 0;
1014 }
1015
1016 -void r600_uvd_rbc_stop(struct radeon_device *rdev)
1017 +void r600_uvd_stop(struct radeon_device *rdev)
1018 {
1019 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
1020
1021 /* force RBC into idle state */
1022 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
1023 +
1024 + /* Stall UMC and register bus before resetting VCPU */
1025 + WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
1026 + WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
1027 + mdelay(1);
1028 +
1029 + /* put VCPU into reset */
1030 + WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
1031 + mdelay(5);
1032 +
1033 + /* disable VCPU clock */
1034 + WREG32(UVD_VCPU_CNTL, 0x0);
1035 +
1036 + /* Unstall UMC and register bus */
1037 + WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
1038 + WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
1039 +
1040 ring->ready = false;
1041 }
1042
1043 @@ -2700,6 +2717,11 @@ int r600_uvd_init(struct radeon_device *rdev)
1044 /* disable interupt */
1045 WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
1046
1047 + /* Stall UMC and register bus before resetting VCPU */
1048 + WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
1049 + WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
1050 + mdelay(1);
1051 +
1052 /* put LMI, VCPU, RBC etc... into reset */
1053 WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
1054 LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
1055 @@ -2729,10 +2751,6 @@ int r600_uvd_init(struct radeon_device *rdev)
1056 WREG32(UVD_MPC_SET_ALU, 0);
1057 WREG32(UVD_MPC_SET_MUX, 0x88);
1058
1059 - /* Stall UMC */
1060 - WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
1061 - WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
1062 -
1063 /* take all subblocks out of reset, except VCPU */
1064 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
1065 mdelay(5);
1066 @@ -3206,6 +3224,8 @@ static int r600_startup(struct radeon_device *rdev)
1067 /* enable pcie gen2 link */
1068 r600_pcie_gen2_enable(rdev);
1069
1070 + r600_mc_program(rdev);
1071 +
1072 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1073 r = r600_init_microcode(rdev);
1074 if (r) {
1075 @@ -3218,7 +3238,6 @@ static int r600_startup(struct radeon_device *rdev)
1076 if (r)
1077 return r;
1078
1079 - r600_mc_program(rdev);
1080 if (rdev->flags & RADEON_IS_AGP) {
1081 r600_agp_enable(rdev);
1082 } else {
1083 diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
1084 index f48240b..b9b1139 100644
1085 --- a/drivers/gpu/drm/radeon/r600_hdmi.c
1086 +++ b/drivers/gpu/drm/radeon/r600_hdmi.c
1087 @@ -242,9 +242,15 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1088 /* according to the reg specs, this should DCE3.2 only, but in
1089 * practice it seems to cover DCE3.0 as well.
1090 */
1091 - WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
1092 - WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
1093 - WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
1094 + if (dig->dig_encoder == 0) {
1095 + WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
1096 + WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
1097 + WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
1098 + } else {
1099 + WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
1100 + WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
1101 + WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
1102 + }
1103 } else {
1104 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
1105 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
1106 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
1107 index aad18e6..bdd9d56 100644
1108 --- a/drivers/gpu/drm/radeon/radeon.h
1109 +++ b/drivers/gpu/drm/radeon/radeon.h
1110 @@ -1146,7 +1146,6 @@ struct radeon_uvd {
1111 void *cpu_addr;
1112 uint64_t gpu_addr;
1113 void *saved_bo;
1114 - unsigned fw_size;
1115 atomic_t handles[RADEON_MAX_UVD_HANDLES];
1116 struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
1117 struct delayed_work idle_work;
1118 @@ -1686,6 +1685,7 @@ struct radeon_device {
1119 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
1120 const struct firmware *mc_fw; /* NI MC firmware */
1121 const struct firmware *ce_fw; /* SI CE firmware */
1122 + const struct firmware *uvd_fw; /* UVD firmware */
1123 struct r600_blit r600_blit;
1124 struct r600_vram_scratch vram_scratch;
1125 int msi_enabled; /* msi enabled */
1126 diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
1127 index a72759e..34223fc 100644
1128 --- a/drivers/gpu/drm/radeon/radeon_asic.h
1129 +++ b/drivers/gpu/drm/radeon/radeon_asic.h
1130 @@ -399,7 +399,7 @@ uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
1131 /* uvd */
1132 int r600_uvd_init(struct radeon_device *rdev);
1133 int r600_uvd_rbc_start(struct radeon_device *rdev);
1134 -void r600_uvd_rbc_stop(struct radeon_device *rdev);
1135 +void r600_uvd_stop(struct radeon_device *rdev);
1136 int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
1137 void r600_uvd_fence_emit(struct radeon_device *rdev,
1138 struct radeon_fence *fence);
1139 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
1140 index 7ddb0ef..ddb8f8e 100644
1141 --- a/drivers/gpu/drm/radeon/radeon_fence.c
1142 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
1143 @@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
1144
1145 } else {
1146 /* put fence directly behind firmware */
1147 - index = ALIGN(rdev->uvd.fw_size, 8);
1148 + index = ALIGN(rdev->uvd_fw->size, 8);
1149 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
1150 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
1151 }
1152 diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
1153 index 1b3a91b..97002a0 100644
1154 --- a/drivers/gpu/drm/radeon/radeon_uvd.c
1155 +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
1156 @@ -55,7 +55,6 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work);
1157 int radeon_uvd_init(struct radeon_device *rdev)
1158 {
1159 struct platform_device *pdev;
1160 - const struct firmware *fw;
1161 unsigned long bo_size;
1162 const char *fw_name;
1163 int i, r;
1164 @@ -105,7 +104,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
1165 return -EINVAL;
1166 }
1167
1168 - r = request_firmware(&fw, fw_name, &pdev->dev);
1169 + r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev);
1170 if (r) {
1171 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
1172 fw_name);
1173 @@ -115,7 +114,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
1174
1175 platform_device_unregister(pdev);
1176
1177 - bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) +
1178 + bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
1179 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
1180 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
1181 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
1182 @@ -148,12 +147,6 @@ int radeon_uvd_init(struct radeon_device *rdev)
1183
1184 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
1185
1186 - rdev->uvd.fw_size = fw->size;
1187 - memset(rdev->uvd.cpu_addr, 0, bo_size);
1188 - memcpy(rdev->uvd.cpu_addr, fw->data, fw->size);
1189 -
1190 - release_firmware(fw);
1191 -
1192 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
1193 atomic_set(&rdev->uvd.handles[i], 0);
1194 rdev->uvd.filp[i] = NULL;
1195 @@ -177,33 +170,60 @@ void radeon_uvd_fini(struct radeon_device *rdev)
1196 }
1197
1198 radeon_bo_unref(&rdev->uvd.vcpu_bo);
1199 +
1200 + release_firmware(rdev->uvd_fw);
1201 }
1202
1203 int radeon_uvd_suspend(struct radeon_device *rdev)
1204 {
1205 unsigned size;
1206 + void *ptr;
1207 + int i;
1208
1209 if (rdev->uvd.vcpu_bo == NULL)
1210 return 0;
1211
1212 + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
1213 + if (atomic_read(&rdev->uvd.handles[i]))
1214 + break;
1215 +
1216 + if (i == RADEON_MAX_UVD_HANDLES)
1217 + return 0;
1218 +
1219 size = radeon_bo_size(rdev->uvd.vcpu_bo);
1220 + size -= rdev->uvd_fw->size;
1221 +
1222 + ptr = rdev->uvd.cpu_addr;
1223 + ptr += rdev->uvd_fw->size;
1224 +
1225 rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
1226 - memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size);
1227 + memcpy(rdev->uvd.saved_bo, ptr, size);
1228
1229 return 0;
1230 }
1231
1232 int radeon_uvd_resume(struct radeon_device *rdev)
1233 {
1234 + unsigned size;
1235 + void *ptr;
1236 +
1237 if (rdev->uvd.vcpu_bo == NULL)
1238 return -EINVAL;
1239
1240 + memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
1241 +
1242 + size = radeon_bo_size(rdev->uvd.vcpu_bo);
1243 + size -= rdev->uvd_fw->size;
1244 +
1245 + ptr = rdev->uvd.cpu_addr;
1246 + ptr += rdev->uvd_fw->size;
1247 +
1248 if (rdev->uvd.saved_bo != NULL) {
1249 - unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo);
1250 - memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size);
1251 + memcpy(ptr, rdev->uvd.saved_bo, size);
1252 kfree(rdev->uvd.saved_bo);
1253 rdev->uvd.saved_bo = NULL;
1254 - }
1255 + } else
1256 + memset(ptr, 0, size);
1257
1258 return 0;
1259 }
1260 @@ -218,8 +238,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
1261 {
1262 int i, r;
1263 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
1264 - if (rdev->uvd.filp[i] == filp) {
1265 - uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
1266 + uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
1267 + if (handle != 0 && rdev->uvd.filp[i] == filp) {
1268 struct radeon_fence *fence;
1269
1270 r = radeon_uvd_get_destroy_msg(rdev,
1271 diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
1272 index 30ea14e..bcc68ec 100644
1273 --- a/drivers/gpu/drm/radeon/rv770.c
1274 +++ b/drivers/gpu/drm/radeon/rv770.c
1275 @@ -813,7 +813,7 @@ int rv770_uvd_resume(struct radeon_device *rdev)
1276
1277 /* programm the VCPU memory controller bits 0-27 */
1278 addr = rdev->uvd.gpu_addr >> 3;
1279 - size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
1280 + size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
1281 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
1282 WREG32(UVD_VCPU_CACHE_SIZE0, size);
1283
1284 @@ -1829,6 +1829,8 @@ static int rv770_startup(struct radeon_device *rdev)
1285 /* enable pcie gen2 link */
1286 rv770_pcie_gen2_enable(rdev);
1287
1288 + rv770_mc_program(rdev);
1289 +
1290 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1291 r = r600_init_microcode(rdev);
1292 if (r) {
1293 @@ -1841,7 +1843,6 @@ static int rv770_startup(struct radeon_device *rdev)
1294 if (r)
1295 return r;
1296
1297 - rv770_mc_program(rdev);
1298 if (rdev->flags & RADEON_IS_AGP) {
1299 rv770_agp_enable(rdev);
1300 } else {
1301 @@ -1983,6 +1984,7 @@ int rv770_resume(struct radeon_device *rdev)
1302 int rv770_suspend(struct radeon_device *rdev)
1303 {
1304 r600_audio_fini(rdev);
1305 + r600_uvd_stop(rdev);
1306 radeon_uvd_suspend(rdev);
1307 r700_cp_stop(rdev);
1308 r600_dma_stop(rdev);
1309 @@ -2098,6 +2100,7 @@ void rv770_fini(struct radeon_device *rdev)
1310 radeon_ib_pool_fini(rdev);
1311 radeon_irq_kms_fini(rdev);
1312 rv770_pcie_gart_fini(rdev);
1313 + r600_uvd_stop(rdev);
1314 radeon_uvd_fini(rdev);
1315 r600_vram_scratch_fini(rdev);
1316 radeon_gem_fini(rdev);
1317 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
1318 index a1b0da6..1a96a16 100644
1319 --- a/drivers/gpu/drm/radeon/si.c
1320 +++ b/drivers/gpu/drm/radeon/si.c
1321 @@ -5270,6 +5270,8 @@ static int si_startup(struct radeon_device *rdev)
1322 struct radeon_ring *ring;
1323 int r;
1324
1325 + si_mc_program(rdev);
1326 +
1327 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1328 !rdev->rlc_fw || !rdev->mc_fw) {
1329 r = si_init_microcode(rdev);
1330 @@ -5289,7 +5291,6 @@ static int si_startup(struct radeon_device *rdev)
1331 if (r)
1332 return r;
1333
1334 - si_mc_program(rdev);
1335 r = si_pcie_gart_enable(rdev);
1336 if (r)
1337 return r;
1338 @@ -5473,7 +5474,7 @@ int si_suspend(struct radeon_device *rdev)
1339 si_cp_enable(rdev, false);
1340 cayman_dma_stop(rdev);
1341 if (rdev->has_uvd) {
1342 - r600_uvd_rbc_stop(rdev);
1343 + r600_uvd_stop(rdev);
1344 radeon_uvd_suspend(rdev);
1345 }
1346 si_irq_suspend(rdev);
1347 @@ -5613,8 +5614,10 @@ void si_fini(struct radeon_device *rdev)
1348 radeon_vm_manager_fini(rdev);
1349 radeon_ib_pool_fini(rdev);
1350 radeon_irq_kms_fini(rdev);
1351 - if (rdev->has_uvd)
1352 + if (rdev->has_uvd) {
1353 + r600_uvd_stop(rdev);
1354 radeon_uvd_fini(rdev);
1355 + }
1356 si_pcie_gart_fini(rdev);
1357 r600_vram_scratch_fini(rdev);
1358 radeon_gem_fini(rdev);
1359 diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
1360 index b83bf4b..5863735 100644
1361 --- a/drivers/hwmon/adt7470.c
1362 +++ b/drivers/hwmon/adt7470.c
1363 @@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg,
1364 u16 value)
1365 {
1366 return i2c_smbus_write_byte_data(client, reg, value & 0xFF)
1367 - && i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
1368 + || i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
1369 }
1370
1371 static void adt7470_init_client(struct i2c_client *client)
1372 diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
1373 index 2039f23..6d8094d 100644
1374 --- a/drivers/i2c/busses/i2c-mxs.c
1375 +++ b/drivers/i2c/busses/i2c-mxs.c
1376 @@ -494,7 +494,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
1377 * based on this empirical measurement and a lot of previous frobbing.
1378 */
1379 i2c->cmd_err = 0;
1380 - if (msg->len < 8) {
1381 + if (0) { /* disable PIO mode until a proper fix is made */
1382 ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
1383 if (ret)
1384 mxs_i2c_reset(i2c);
1385 diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
1386 index 4851cc2..c4ff973 100644
1387 --- a/drivers/media/usb/em28xx/em28xx-i2c.c
1388 +++ b/drivers/media/usb/em28xx/em28xx-i2c.c
1389 @@ -726,7 +726,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
1390
1391 *eedata = data;
1392 *eedata_len = len;
1393 - dev_config = (void *)eedata;
1394 + dev_config = (void *)*eedata;
1395
1396 switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) {
1397 case 0:
1398 diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
1399 index a60f6c1..50543f1 100644
1400 --- a/drivers/mtd/nand/Kconfig
1401 +++ b/drivers/mtd/nand/Kconfig
1402 @@ -95,7 +95,7 @@ config MTD_NAND_OMAP2
1403
1404 config MTD_NAND_OMAP_BCH
1405 depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
1406 - bool "Enable support for hardware BCH error correction"
1407 + tristate "Enable support for hardware BCH error correction"
1408 default n
1409 select BCH
1410 select BCH_CONST_PARAMS
1411 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
1412 index 89178b8..9b60dc1 100644
1413 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
1414 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
1415 @@ -3508,11 +3508,21 @@ static int megasas_init_fw(struct megasas_instance *instance)
1416 break;
1417 }
1418
1419 - /*
1420 - * We expect the FW state to be READY
1421 - */
1422 - if (megasas_transition_to_ready(instance, 0))
1423 - goto fail_ready_state;
1424 + if (megasas_transition_to_ready(instance, 0)) {
1425 + atomic_set(&instance->fw_reset_no_pci_access, 1);
1426 + instance->instancet->adp_reset
1427 + (instance, instance->reg_set);
1428 + atomic_set(&instance->fw_reset_no_pci_access, 0);
1429 + dev_info(&instance->pdev->dev,
1430 + "megasas: FW restarted successfully from %s!\n",
1431 + __func__);
1432 +
1433 + /*waitting for about 30 second before retry*/
1434 + ssleep(30);
1435 +
1436 + if (megasas_transition_to_ready(instance, 0))
1437 + goto fail_ready_state;
1438 + }
1439
1440 /* Check if MSI-X is supported while in ready state */
1441 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
1442 diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
1443 index 1e3879d..0665f9c 100644
1444 --- a/drivers/scsi/nsp32.c
1445 +++ b/drivers/scsi/nsp32.c
1446 @@ -2899,7 +2899,7 @@ static void nsp32_do_bus_reset(nsp32_hw_data *data)
1447 * reset SCSI bus
1448 */
1449 nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST);
1450 - udelay(RESET_HOLD_TIME);
1451 + mdelay(RESET_HOLD_TIME / 1000);
1452 nsp32_write1(base, SCSI_BUS_CONTROL, 0);
1453 for(i = 0; i < 5; i++) {
1454 intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */
1455 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
1456 index 3b1ea34..eaa808e 100644
1457 --- a/drivers/scsi/scsi.c
1458 +++ b/drivers/scsi/scsi.c
1459 @@ -1031,6 +1031,9 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
1460 {
1461 int i, result;
1462
1463 + if (sdev->skip_vpd_pages)
1464 + goto fail;
1465 +
1466 /* Ask for all the pages supported by this device */
1467 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
1468 if (result)
1469 diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
1470 index 2168258..74b88ef 100644
1471 --- a/drivers/scsi/virtio_scsi.c
1472 +++ b/drivers/scsi/virtio_scsi.c
1473 @@ -751,7 +751,7 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
1474
1475 vscsi->affinity_hint_set = true;
1476 } else {
1477 - for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++)
1478 + for (i = 0; i < vscsi->num_queues; i++)
1479 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
1480
1481 vscsi->affinity_hint_set = false;
1482 diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
1483 index dcceed2..81972fa 100644
1484 --- a/drivers/staging/zcache/zcache-main.c
1485 +++ b/drivers/staging/zcache/zcache-main.c
1486 @@ -1811,10 +1811,12 @@ static int zcache_comp_init(void)
1487 #else
1488 if (*zcache_comp_name != '\0') {
1489 ret = crypto_has_comp(zcache_comp_name, 0, 0);
1490 - if (!ret)
1491 + if (!ret) {
1492 pr_info("zcache: %s not supported\n",
1493 zcache_comp_name);
1494 - goto out;
1495 + ret = 1;
1496 + goto out;
1497 + }
1498 }
1499 if (!ret)
1500 strcpy(zcache_comp_name, "lzo");
1501 diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
1502 index 1742ce5..a333d44 100644
1503 --- a/drivers/staging/zram/zram_drv.c
1504 +++ b/drivers/staging/zram/zram_drv.c
1505 @@ -432,7 +432,7 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
1506 end = start + (bio->bi_size >> SECTOR_SHIFT);
1507 bound = zram->disksize >> SECTOR_SHIFT;
1508 /* out of range range */
1509 - if (unlikely(start >= bound || end >= bound || start > end))
1510 + if (unlikely(start >= bound || end > bound || start > end))
1511 return 0;
1512
1513 /* I/O request is valid */
1514 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1515 index b93fc88..da2905a 100644
1516 --- a/drivers/usb/core/hub.c
1517 +++ b/drivers/usb/core/hub.c
1518 @@ -4796,7 +4796,8 @@ static void hub_events(void)
1519 hub->ports[i - 1]->child;
1520
1521 dev_dbg(hub_dev, "warm reset port %d\n", i);
1522 - if (!udev) {
1523 + if (!udev || !(portstatus &
1524 + USB_PORT_STAT_CONNECTION)) {
1525 status = hub_port_reset(hub, i,
1526 NULL, HUB_BH_RESET_TIME,
1527 true);
1528 @@ -4806,8 +4807,8 @@ static void hub_events(void)
1529 usb_lock_device(udev);
1530 status = usb_reset_device(udev);
1531 usb_unlock_device(udev);
1532 + connect_change = 0;
1533 }
1534 - connect_change = 0;
1535 }
1536
1537 if (connect_change)
1538 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1539 index c276ac9..cf68596 100644
1540 --- a/fs/btrfs/tree-log.c
1541 +++ b/fs/btrfs/tree-log.c
1542 @@ -3728,8 +3728,9 @@ next_slot:
1543 }
1544
1545 log_extents:
1546 + btrfs_release_path(path);
1547 + btrfs_release_path(dst_path);
1548 if (fast_search) {
1549 - btrfs_release_path(dst_path);
1550 ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
1551 if (ret) {
1552 err = ret;
1553 @@ -3746,8 +3747,6 @@ log_extents:
1554 }
1555
1556 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
1557 - btrfs_release_path(path);
1558 - btrfs_release_path(dst_path);
1559 ret = log_directory_changes(trans, root, inode, path, dst_path);
1560 if (ret) {
1561 err = ret;
1562 diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
1563 index f59d0d5..5c807b2 100644
1564 --- a/fs/cifs/cifsencrypt.c
1565 +++ b/fs/cifs/cifsencrypt.c
1566 @@ -389,7 +389,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
1567 if (blobptr + attrsize > blobend)
1568 break;
1569 if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
1570 - if (!attrsize)
1571 + if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
1572 break;
1573 if (!ses->domainName) {
1574 ses->domainName =
1575 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
1576 index 4f07f6f..ea3a0b3 100644
1577 --- a/fs/cifs/cifsglob.h
1578 +++ b/fs/cifs/cifsglob.h
1579 @@ -44,6 +44,7 @@
1580 #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
1581 #define MAX_SERVER_SIZE 15
1582 #define MAX_SHARE_SIZE 80
1583 +#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
1584 #define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
1585 #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
1586
1587 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1588 index e3bc39b..d6a5c5a 100644
1589 --- a/fs/cifs/connect.c
1590 +++ b/fs/cifs/connect.c
1591 @@ -1662,7 +1662,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1592 if (string == NULL)
1593 goto out_nomem;
1594
1595 - if (strnlen(string, 256) == 256) {
1596 + if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN)
1597 + == CIFS_MAX_DOMAINNAME_LEN) {
1598 printk(KERN_WARNING "CIFS: domain name too"
1599 " long\n");
1600 goto cifs_parse_mount_err;
1601 @@ -2288,8 +2289,8 @@ cifs_put_smb_ses(struct cifs_ses *ses)
1602
1603 #ifdef CONFIG_KEYS
1604
1605 -/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */
1606 -#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1)
1607 +/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
1608 +#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
1609
1610 /* Populate username and pw fields from keyring if possible */
1611 static int
1612 diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
1613 index 770d5a9..036279c 100644
1614 --- a/fs/cifs/readdir.c
1615 +++ b/fs/cifs/readdir.c
1616 @@ -111,6 +111,14 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
1617 return;
1618 }
1619
1620 + /*
1621 + * If we know that the inode will need to be revalidated immediately,
1622 + * then don't create a new dentry for it. We'll end up doing an on
1623 + * the wire call either way and this spares us an invalidation.
1624 + */
1625 + if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
1626 + return;
1627 +
1628 dentry = d_alloc(parent, name);
1629 if (!dentry)
1630 return;
1631 diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
1632 index f230571..8edc9eb 100644
1633 --- a/fs/cifs/sess.c
1634 +++ b/fs/cifs/sess.c
1635 @@ -198,7 +198,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
1636 bytes_ret = 0;
1637 } else
1638 bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
1639 - 256, nls_cp);
1640 + CIFS_MAX_DOMAINNAME_LEN, nls_cp);
1641 bcc_ptr += 2 * bytes_ret;
1642 bcc_ptr += 2; /* account for null terminator */
1643
1644 @@ -256,8 +256,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
1645
1646 /* copy domain */
1647 if (ses->domainName != NULL) {
1648 - strncpy(bcc_ptr, ses->domainName, 256);
1649 - bcc_ptr += strnlen(ses->domainName, 256);
1650 + strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
1651 + bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
1652 } /* else we will send a null domain name
1653 so the server will default to its own domain */
1654 *bcc_ptr = 0;
1655 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
1656 index 4888cb3..c7c83ff 100644
1657 --- a/fs/debugfs/inode.c
1658 +++ b/fs/debugfs/inode.c
1659 @@ -533,8 +533,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
1660 */
1661 void debugfs_remove_recursive(struct dentry *dentry)
1662 {
1663 - struct dentry *child;
1664 - struct dentry *parent;
1665 + struct dentry *child, *next, *parent;
1666
1667 if (IS_ERR_OR_NULL(dentry))
1668 return;
1669 @@ -544,61 +543,37 @@ void debugfs_remove_recursive(struct dentry *dentry)
1670 return;
1671
1672 parent = dentry;
1673 + down:
1674 mutex_lock(&parent->d_inode->i_mutex);
1675 + list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
1676 + if (!debugfs_positive(child))
1677 + continue;
1678
1679 - while (1) {
1680 - /*
1681 - * When all dentries under "parent" has been removed,
1682 - * walk up the tree until we reach our starting point.
1683 - */
1684 - if (list_empty(&parent->d_subdirs)) {
1685 - mutex_unlock(&parent->d_inode->i_mutex);
1686 - if (parent == dentry)
1687 - break;
1688 - parent = parent->d_parent;
1689 - mutex_lock(&parent->d_inode->i_mutex);
1690 - }
1691 - child = list_entry(parent->d_subdirs.next, struct dentry,
1692 - d_u.d_child);
1693 - next_sibling:
1694 -
1695 - /*
1696 - * If "child" isn't empty, walk down the tree and
1697 - * remove all its descendants first.
1698 - */
1699 + /* perhaps simple_empty(child) makes more sense */
1700 if (!list_empty(&child->d_subdirs)) {
1701 mutex_unlock(&parent->d_inode->i_mutex);
1702 parent = child;
1703 - mutex_lock(&parent->d_inode->i_mutex);
1704 - continue;
1705 + goto down;
1706 }
1707 - __debugfs_remove(child, parent);
1708 - if (parent->d_subdirs.next == &child->d_u.d_child) {
1709 - /*
1710 - * Try the next sibling.
1711 - */
1712 - if (child->d_u.d_child.next != &parent->d_subdirs) {
1713 - child = list_entry(child->d_u.d_child.next,
1714 - struct dentry,
1715 - d_u.d_child);
1716 - goto next_sibling;
1717 - }
1718 -
1719 - /*
1720 - * Avoid infinite loop if we fail to remove
1721 - * one dentry.
1722 - */
1723 - mutex_unlock(&parent->d_inode->i_mutex);
1724 - break;
1725 - }
1726 - simple_release_fs(&debugfs_mount, &debugfs_mount_count);
1727 + up:
1728 + if (!__debugfs_remove(child, parent))
1729 + simple_release_fs(&debugfs_mount, &debugfs_mount_count);
1730 }
1731
1732 - parent = dentry->d_parent;
1733 + mutex_unlock(&parent->d_inode->i_mutex);
1734 + child = parent;
1735 + parent = parent->d_parent;
1736 mutex_lock(&parent->d_inode->i_mutex);
1737 - __debugfs_remove(dentry, parent);
1738 +
1739 + if (child != dentry) {
1740 + next = list_entry(child->d_u.d_child.next, struct dentry,
1741 + d_u.d_child);
1742 + goto up;
1743 + }
1744 +
1745 + if (!__debugfs_remove(child, parent))
1746 + simple_release_fs(&debugfs_mount, &debugfs_mount_count);
1747 mutex_unlock(&parent->d_inode->i_mutex);
1748 - simple_release_fs(&debugfs_mount, &debugfs_mount_count);
1749 }
1750 EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
1751
1752 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1753 index fddf3d9..dc1e030 100644
1754 --- a/fs/ext4/extents.c
1755 +++ b/fs/ext4/extents.c
1756 @@ -4389,7 +4389,7 @@ void ext4_ext_truncate(handle_t *handle, struct inode *inode)
1757 retry:
1758 err = ext4_es_remove_extent(inode, last_block,
1759 EXT_MAX_BLOCKS - last_block);
1760 - if (err == ENOMEM) {
1761 + if (err == -ENOMEM) {
1762 cond_resched();
1763 congestion_wait(BLK_RW_ASYNC, HZ/50);
1764 goto retry;
1765 diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
1766 index 00a818d..3da3bf1 100644
1767 --- a/fs/ext4/ialloc.c
1768 +++ b/fs/ext4/ialloc.c
1769 @@ -734,11 +734,8 @@ repeat_in_this_group:
1770 ino = ext4_find_next_zero_bit((unsigned long *)
1771 inode_bitmap_bh->b_data,
1772 EXT4_INODES_PER_GROUP(sb), ino);
1773 - if (ino >= EXT4_INODES_PER_GROUP(sb)) {
1774 - if (++group == ngroups)
1775 - group = 0;
1776 - continue;
1777 - }
1778 + if (ino >= EXT4_INODES_PER_GROUP(sb))
1779 + goto next_group;
1780 if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
1781 ext4_error(sb, "reserved inode found cleared - "
1782 "inode=%lu", ino + 1);
1783 @@ -768,6 +765,9 @@ repeat_in_this_group:
1784 goto got; /* we grabbed the inode! */
1785 if (ino < EXT4_INODES_PER_GROUP(sb))
1786 goto repeat_in_this_group;
1787 +next_group:
1788 + if (++group == ngroups)
1789 + group = 0;
1790 }
1791 err = -ENOSPC;
1792 goto out;
1793 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
1794 index 9491ac0..c0427e2 100644
1795 --- a/fs/ext4/ioctl.c
1796 +++ b/fs/ext4/ioctl.c
1797 @@ -77,8 +77,10 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
1798 memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
1799 memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags));
1800 memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
1801 - memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree));
1802 - memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr));
1803 + ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
1804 + ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
1805 + ext4_es_lru_del(inode1);
1806 + ext4_es_lru_del(inode2);
1807
1808 isize = i_size_read(inode1);
1809 i_size_write(inode1, i_size_read(inode2));
1810 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1811 index 6681c03..3f7c39e 100644
1812 --- a/fs/ext4/super.c
1813 +++ b/fs/ext4/super.c
1814 @@ -1341,7 +1341,7 @@ static const struct mount_opts {
1815 {Opt_delalloc, EXT4_MOUNT_DELALLOC,
1816 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1817 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1818 - MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT},
1819 + MOPT_EXT4_ONLY | MOPT_CLEAR},
1820 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1821 MOPT_EXT4_ONLY | MOPT_SET},
1822 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1823 @@ -3445,7 +3445,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1824 }
1825 if (test_opt(sb, DIOREAD_NOLOCK)) {
1826 ext4_msg(sb, KERN_ERR, "can't mount with "
1827 - "both data=journal and delalloc");
1828 + "both data=journal and dioread_nolock");
1829 goto failed_mount;
1830 }
1831 if (test_opt(sb, DELALLOC))
1832 @@ -4646,6 +4646,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
1833 goto restore_opts;
1834 }
1835
1836 + if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
1837 + if (test_opt2(sb, EXPLICIT_DELALLOC)) {
1838 + ext4_msg(sb, KERN_ERR, "can't mount with "
1839 + "both data=journal and delalloc");
1840 + err = -EINVAL;
1841 + goto restore_opts;
1842 + }
1843 + if (test_opt(sb, DIOREAD_NOLOCK)) {
1844 + ext4_msg(sb, KERN_ERR, "can't mount with "
1845 + "both data=journal and dioread_nolock");
1846 + err = -EINVAL;
1847 + goto restore_opts;
1848 + }
1849 + }
1850 +
1851 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
1852 ext4_abort(sb, "Abort forced by user");
1853
1854 @@ -5400,6 +5415,7 @@ static void __exit ext4_exit_fs(void)
1855 kset_unregister(ext4_kset);
1856 ext4_exit_system_zone();
1857 ext4_exit_pageio();
1858 + ext4_exit_es();
1859 }
1860
1861 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
1862 diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
1863 index 01bfe76..41e491b 100644
1864 --- a/fs/lockd/clntlock.c
1865 +++ b/fs/lockd/clntlock.c
1866 @@ -64,12 +64,17 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
1867 nlm_init->protocol, nlm_version,
1868 nlm_init->hostname, nlm_init->noresvport,
1869 nlm_init->net);
1870 - if (host == NULL) {
1871 - lockd_down(nlm_init->net);
1872 - return ERR_PTR(-ENOLCK);
1873 - }
1874 + if (host == NULL)
1875 + goto out_nohost;
1876 + if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL)
1877 + goto out_nobind;
1878
1879 return host;
1880 +out_nobind:
1881 + nlmclnt_release_host(host);
1882 +out_nohost:
1883 + lockd_down(nlm_init->net);
1884 + return ERR_PTR(-ENOLCK);
1885 }
1886 EXPORT_SYMBOL_GPL(nlmclnt_init);
1887
1888 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
1889 index 9760ecb..acd3947 100644
1890 --- a/fs/lockd/clntproc.c
1891 +++ b/fs/lockd/clntproc.c
1892 @@ -125,14 +125,15 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
1893 {
1894 struct nlm_args *argp = &req->a_args;
1895 struct nlm_lock *lock = &argp->lock;
1896 + char *nodename = req->a_host->h_rpcclnt->cl_nodename;
1897
1898 nlmclnt_next_cookie(&argp->cookie);
1899 memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
1900 - lock->caller = utsname()->nodename;
1901 + lock->caller = nodename;
1902 lock->oh.data = req->a_owner;
1903 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
1904 (unsigned int)fl->fl_u.nfs_fl.owner->pid,
1905 - utsname()->nodename);
1906 + nodename);
1907 lock->svid = fl->fl_u.nfs_fl.owner->pid;
1908 lock->fl.fl_start = fl->fl_start;
1909 lock->fl.fl_end = fl->fl_end;
1910 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
1911 index 33532f7..1d48974 100644
1912 --- a/fs/reiserfs/procfs.c
1913 +++ b/fs/reiserfs/procfs.c
1914 @@ -19,12 +19,13 @@
1915 /*
1916 * LOCKING:
1917 *
1918 - * We rely on new Alexander Viro's super-block locking.
1919 + * These guys are evicted from procfs as the very first step in ->kill_sb().
1920 *
1921 */
1922
1923 -static int show_version(struct seq_file *m, struct super_block *sb)
1924 +static int show_version(struct seq_file *m, void *unused)
1925 {
1926 + struct super_block *sb = m->private;
1927 char *format;
1928
1929 if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) {
1930 @@ -66,8 +67,9 @@ static int show_version(struct seq_file *m, struct super_block *sb)
1931 #define DJP( x ) le32_to_cpu( jp -> x )
1932 #define JF( x ) ( r -> s_journal -> x )
1933
1934 -static int show_super(struct seq_file *m, struct super_block *sb)
1935 +static int show_super(struct seq_file *m, void *unused)
1936 {
1937 + struct super_block *sb = m->private;
1938 struct reiserfs_sb_info *r = REISERFS_SB(sb);
1939
1940 seq_printf(m, "state: \t%s\n"
1941 @@ -128,8 +130,9 @@ static int show_super(struct seq_file *m, struct super_block *sb)
1942 return 0;
1943 }
1944
1945 -static int show_per_level(struct seq_file *m, struct super_block *sb)
1946 +static int show_per_level(struct seq_file *m, void *unused)
1947 {
1948 + struct super_block *sb = m->private;
1949 struct reiserfs_sb_info *r = REISERFS_SB(sb);
1950 int level;
1951
1952 @@ -186,8 +189,9 @@ static int show_per_level(struct seq_file *m, struct super_block *sb)
1953 return 0;
1954 }
1955
1956 -static int show_bitmap(struct seq_file *m, struct super_block *sb)
1957 +static int show_bitmap(struct seq_file *m, void *unused)
1958 {
1959 + struct super_block *sb = m->private;
1960 struct reiserfs_sb_info *r = REISERFS_SB(sb);
1961
1962 seq_printf(m, "free_block: %lu\n"
1963 @@ -218,8 +222,9 @@ static int show_bitmap(struct seq_file *m, struct super_block *sb)
1964 return 0;
1965 }
1966
1967 -static int show_on_disk_super(struct seq_file *m, struct super_block *sb)
1968 +static int show_on_disk_super(struct seq_file *m, void *unused)
1969 {
1970 + struct super_block *sb = m->private;
1971 struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
1972 struct reiserfs_super_block *rs = sb_info->s_rs;
1973 int hash_code = DFL(s_hash_function_code);
1974 @@ -261,8 +266,9 @@ static int show_on_disk_super(struct seq_file *m, struct super_block *sb)
1975 return 0;
1976 }
1977
1978 -static int show_oidmap(struct seq_file *m, struct super_block *sb)
1979 +static int show_oidmap(struct seq_file *m, void *unused)
1980 {
1981 + struct super_block *sb = m->private;
1982 struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
1983 struct reiserfs_super_block *rs = sb_info->s_rs;
1984 unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize);
1985 @@ -291,8 +297,9 @@ static int show_oidmap(struct seq_file *m, struct super_block *sb)
1986 return 0;
1987 }
1988
1989 -static int show_journal(struct seq_file *m, struct super_block *sb)
1990 +static int show_journal(struct seq_file *m, void *unused)
1991 {
1992 + struct super_block *sb = m->private;
1993 struct reiserfs_sb_info *r = REISERFS_SB(sb);
1994 struct reiserfs_super_block *rs = r->s_rs;
1995 struct journal_params *jp = &rs->s_v1.s_journal;
1996 @@ -383,92 +390,24 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
1997 return 0;
1998 }
1999
2000 -/* iterator */
2001 -static int test_sb(struct super_block *sb, void *data)
2002 -{
2003 - return data == sb;
2004 -}
2005 -
2006 -static int set_sb(struct super_block *sb, void *data)
2007 -{
2008 - return -ENOENT;
2009 -}
2010 -
2011 -struct reiserfs_seq_private {
2012 - struct super_block *sb;
2013 - int (*show) (struct seq_file *, struct super_block *);
2014 -};
2015 -
2016 -static void *r_start(struct seq_file *m, loff_t * pos)
2017 -{
2018 - struct reiserfs_seq_private *priv = m->private;
2019 - loff_t l = *pos;
2020 -
2021 - if (l)
2022 - return NULL;
2023 -
2024 - if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, 0, priv->sb)))
2025 - return NULL;
2026 -
2027 - up_write(&priv->sb->s_umount);
2028 - return priv->sb;
2029 -}
2030 -
2031 -static void *r_next(struct seq_file *m, void *v, loff_t * pos)
2032 -{
2033 - ++*pos;
2034 - if (v)
2035 - deactivate_super(v);
2036 - return NULL;
2037 -}
2038 -
2039 -static void r_stop(struct seq_file *m, void *v)
2040 -{
2041 - if (v)
2042 - deactivate_super(v);
2043 -}
2044 -
2045 -static int r_show(struct seq_file *m, void *v)
2046 -{
2047 - struct reiserfs_seq_private *priv = m->private;
2048 - return priv->show(m, v);
2049 -}
2050 -
2051 -static const struct seq_operations r_ops = {
2052 - .start = r_start,
2053 - .next = r_next,
2054 - .stop = r_stop,
2055 - .show = r_show,
2056 -};
2057 -
2058 static int r_open(struct inode *inode, struct file *file)
2059 {
2060 - struct reiserfs_seq_private *priv;
2061 - int ret = seq_open_private(file, &r_ops,
2062 - sizeof(struct reiserfs_seq_private));
2063 -
2064 - if (!ret) {
2065 - struct seq_file *m = file->private_data;
2066 - priv = m->private;
2067 - priv->sb = proc_get_parent_data(inode);
2068 - priv->show = PDE_DATA(inode);
2069 - }
2070 - return ret;
2071 + return single_open(file, PDE_DATA(inode),
2072 + proc_get_parent_data(inode));
2073 }
2074
2075 static const struct file_operations r_file_operations = {
2076 .open = r_open,
2077 .read = seq_read,
2078 .llseek = seq_lseek,
2079 - .release = seq_release_private,
2080 - .owner = THIS_MODULE,
2081 + .release = single_release,
2082 };
2083
2084 static struct proc_dir_entry *proc_info_root = NULL;
2085 static const char proc_info_root_name[] = "fs/reiserfs";
2086
2087 static void add_file(struct super_block *sb, char *name,
2088 - int (*func) (struct seq_file *, struct super_block *))
2089 + int (*func) (struct seq_file *, void *))
2090 {
2091 proc_create_data(name, 0, REISERFS_SB(sb)->procdir,
2092 &r_file_operations, func);
2093 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
2094 index f8a23c3..e2e202a 100644
2095 --- a/fs/reiserfs/super.c
2096 +++ b/fs/reiserfs/super.c
2097 @@ -499,6 +499,7 @@ int remove_save_link(struct inode *inode, int truncate)
2098 static void reiserfs_kill_sb(struct super_block *s)
2099 {
2100 if (REISERFS_SB(s)) {
2101 + reiserfs_proc_info_done(s);
2102 /*
2103 * Force any pending inode evictions to occur now. Any
2104 * inodes to be removed that have extended attributes
2105 @@ -554,8 +555,6 @@ static void reiserfs_put_super(struct super_block *s)
2106 REISERFS_SB(s)->reserved_blocks);
2107 }
2108
2109 - reiserfs_proc_info_done(s);
2110 -
2111 reiserfs_write_unlock(s);
2112 mutex_destroy(&REISERFS_SB(s)->lock);
2113 kfree(s->s_fs_info);
2114 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
2115 index 4372658..44cdc11 100644
2116 --- a/include/linux/ftrace_event.h
2117 +++ b/include/linux/ftrace_event.h
2118 @@ -78,6 +78,11 @@ struct trace_iterator {
2119 /* trace_seq for __print_flags() and __print_symbolic() etc. */
2120 struct trace_seq tmp_seq;
2121
2122 + cpumask_var_t started;
2123 +
2124 + /* it's true when current open file is snapshot */
2125 + bool snapshot;
2126 +
2127 /* The below is zeroed out in pipe_read */
2128 struct trace_seq seq;
2129 struct trace_entry *ent;
2130 @@ -90,10 +95,7 @@ struct trace_iterator {
2131 loff_t pos;
2132 long idx;
2133
2134 - cpumask_var_t started;
2135 -
2136 - /* it's true when current open file is snapshot */
2137 - bool snapshot;
2138 + /* All new field here will be zeroed out in pipe_read */
2139 };
2140
2141 enum trace_iter_flags {
2142 diff --git a/include/linux/regmap.h b/include/linux/regmap.h
2143 index 02d84e2..f91bb41 100644
2144 --- a/include/linux/regmap.h
2145 +++ b/include/linux/regmap.h
2146 @@ -15,6 +15,7 @@
2147
2148 #include <linux/list.h>
2149 #include <linux/rbtree.h>
2150 +#include <linux/err.h>
2151
2152 struct module;
2153 struct device;
2154 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
2155 index 84ca436..9faf0f4 100644
2156 --- a/include/linux/sunrpc/sched.h
2157 +++ b/include/linux/sunrpc/sched.h
2158 @@ -130,6 +130,7 @@ struct rpc_task_setup {
2159 #define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
2160 #define RPC_TASK_SENT 0x0800 /* message was sent */
2161 #define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
2162 +#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
2163
2164 #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
2165 #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
2166 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2167 index f7bc3ce..06a5bce 100644
2168 --- a/kernel/trace/trace.c
2169 +++ b/kernel/trace/trace.c
2170 @@ -232,23 +232,43 @@ int filter_current_check_discard(struct ring_buffer *buffer,
2171 }
2172 EXPORT_SYMBOL_GPL(filter_current_check_discard);
2173
2174 -cycle_t ftrace_now(int cpu)
2175 +cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
2176 {
2177 u64 ts;
2178
2179 /* Early boot up does not have a buffer yet */
2180 - if (!global_trace.trace_buffer.buffer)
2181 + if (!buf->buffer)
2182 return trace_clock_local();
2183
2184 - ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
2185 - ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
2186 + ts = ring_buffer_time_stamp(buf->buffer, cpu);
2187 + ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
2188
2189 return ts;
2190 }
2191
2192 +cycle_t ftrace_now(int cpu)
2193 +{
2194 + return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
2195 +}
2196 +
2197 +/**
2198 + * tracing_is_enabled - Show if global_trace has been disabled
2199 + *
2200 + * Shows if the global trace has been enabled or not. It uses the
2201 + * mirror flag "buffer_disabled" to be used in fast paths such as for
2202 + * the irqsoff tracer. But it may be inaccurate due to races. If you
2203 + * need to know the accurate state, use tracing_is_on() which is a little
2204 + * slower, but accurate.
2205 + */
2206 int tracing_is_enabled(void)
2207 {
2208 - return tracing_is_on();
2209 + /*
2210 + * For quick access (irqsoff uses this in fast path), just
2211 + * return the mirror variable of the state of the ring buffer.
2212 + * It's a little racy, but we don't really care.
2213 + */
2214 + smp_rmb();
2215 + return !global_trace.buffer_disabled;
2216 }
2217
2218 /*
2219 @@ -361,6 +381,23 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
2220 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
2221 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
2222
2223 +void tracer_tracing_on(struct trace_array *tr)
2224 +{
2225 + if (tr->trace_buffer.buffer)
2226 + ring_buffer_record_on(tr->trace_buffer.buffer);
2227 + /*
2228 + * This flag is looked at when buffers haven't been allocated
2229 + * yet, or by some tracers (like irqsoff), that just want to
2230 + * know if the ring buffer has been disabled, but it can handle
2231 + * races of where it gets disabled but we still do a record.
2232 + * As the check is in the fast path of the tracers, it is more
2233 + * important to be fast than accurate.
2234 + */
2235 + tr->buffer_disabled = 0;
2236 + /* Make the flag seen by readers */
2237 + smp_wmb();
2238 +}
2239 +
2240 /**
2241 * tracing_on - enable tracing buffers
2242 *
2243 @@ -369,15 +406,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
2244 */
2245 void tracing_on(void)
2246 {
2247 - if (global_trace.trace_buffer.buffer)
2248 - ring_buffer_record_on(global_trace.trace_buffer.buffer);
2249 - /*
2250 - * This flag is only looked at when buffers haven't been
2251 - * allocated yet. We don't really care about the race
2252 - * between setting this flag and actually turning
2253 - * on the buffer.
2254 - */
2255 - global_trace.buffer_disabled = 0;
2256 + tracer_tracing_on(&global_trace);
2257 }
2258 EXPORT_SYMBOL_GPL(tracing_on);
2259
2260 @@ -571,6 +600,23 @@ void tracing_snapshot_alloc(void)
2261 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
2262 #endif /* CONFIG_TRACER_SNAPSHOT */
2263
2264 +void tracer_tracing_off(struct trace_array *tr)
2265 +{
2266 + if (tr->trace_buffer.buffer)
2267 + ring_buffer_record_off(tr->trace_buffer.buffer);
2268 + /*
2269 + * This flag is looked at when buffers haven't been allocated
2270 + * yet, or by some tracers (like irqsoff), that just want to
2271 + * know if the ring buffer has been disabled, but it can handle
2272 + * races of where it gets disabled but we still do a record.
2273 + * As the check is in the fast path of the tracers, it is more
2274 + * important to be fast than accurate.
2275 + */
2276 + tr->buffer_disabled = 1;
2277 + /* Make the flag seen by readers */
2278 + smp_wmb();
2279 +}
2280 +
2281 /**
2282 * tracing_off - turn off tracing buffers
2283 *
2284 @@ -581,26 +627,29 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
2285 */
2286 void tracing_off(void)
2287 {
2288 - if (global_trace.trace_buffer.buffer)
2289 - ring_buffer_record_off(global_trace.trace_buffer.buffer);
2290 - /*
2291 - * This flag is only looked at when buffers haven't been
2292 - * allocated yet. We don't really care about the race
2293 - * between setting this flag and actually turning
2294 - * on the buffer.
2295 - */
2296 - global_trace.buffer_disabled = 1;
2297 + tracer_tracing_off(&global_trace);
2298 }
2299 EXPORT_SYMBOL_GPL(tracing_off);
2300
2301 /**
2302 + * tracer_tracing_is_on - show real state of ring buffer enabled
2303 + * @tr : the trace array to know if ring buffer is enabled
2304 + *
2305 + * Shows real state of the ring buffer if it is enabled or not.
2306 + */
2307 +int tracer_tracing_is_on(struct trace_array *tr)
2308 +{
2309 + if (tr->trace_buffer.buffer)
2310 + return ring_buffer_record_is_on(tr->trace_buffer.buffer);
2311 + return !tr->buffer_disabled;
2312 +}
2313 +
2314 +/**
2315 * tracing_is_on - show state of ring buffers enabled
2316 */
2317 int tracing_is_on(void)
2318 {
2319 - if (global_trace.trace_buffer.buffer)
2320 - return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
2321 - return !global_trace.buffer_disabled;
2322 + return tracer_tracing_is_on(&global_trace);
2323 }
2324 EXPORT_SYMBOL_GPL(tracing_is_on);
2325
2326 @@ -1150,7 +1199,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
2327 /* Make sure all commits have finished */
2328 synchronize_sched();
2329
2330 - buf->time_start = ftrace_now(buf->cpu);
2331 + buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2332
2333 for_each_online_cpu(cpu)
2334 ring_buffer_reset_cpu(buffer, cpu);
2335 @@ -1158,11 +1207,6 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
2336 ring_buffer_record_enable(buffer);
2337 }
2338
2339 -void tracing_reset_current(int cpu)
2340 -{
2341 - tracing_reset(&global_trace.trace_buffer, cpu);
2342 -}
2343 -
2344 /* Must have trace_types_lock held */
2345 void tracing_reset_all_online_cpus(void)
2346 {
2347 @@ -4060,7 +4104,7 @@ static int tracing_wait_pipe(struct file *filp)
2348 *
2349 * iter->pos will be 0 if we haven't read anything.
2350 */
2351 - if (!tracing_is_enabled() && iter->pos)
2352 + if (!tracing_is_on() && iter->pos)
2353 break;
2354 }
2355
2356 @@ -4121,6 +4165,7 @@ waitagain:
2357 memset(&iter->seq, 0,
2358 sizeof(struct trace_iterator) -
2359 offsetof(struct trace_iterator, seq));
2360 + cpumask_clear(iter->started);
2361 iter->pos = -1;
2362
2363 trace_event_read_lock();
2364 @@ -4437,7 +4482,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
2365
2366 /* disable tracing ? */
2367 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
2368 - tracing_off();
2369 + tracer_tracing_off(tr);
2370 /* resize the ring buffer to 0 */
2371 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
2372
2373 @@ -4602,12 +4647,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
2374 * New clock may not be consistent with the previous clock.
2375 * Reset the buffer so that it doesn't have incomparable timestamps.
2376 */
2377 - tracing_reset_online_cpus(&global_trace.trace_buffer);
2378 + tracing_reset_online_cpus(&tr->trace_buffer);
2379
2380 #ifdef CONFIG_TRACER_MAX_TRACE
2381 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
2382 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
2383 - tracing_reset_online_cpus(&global_trace.max_buffer);
2384 + tracing_reset_online_cpus(&tr->max_buffer);
2385 #endif
2386
2387 mutex_unlock(&trace_types_lock);
2388 @@ -5771,15 +5816,10 @@ rb_simple_read(struct file *filp, char __user *ubuf,
2389 size_t cnt, loff_t *ppos)
2390 {
2391 struct trace_array *tr = filp->private_data;
2392 - struct ring_buffer *buffer = tr->trace_buffer.buffer;
2393 char buf[64];
2394 int r;
2395
2396 - if (buffer)
2397 - r = ring_buffer_record_is_on(buffer);
2398 - else
2399 - r = 0;
2400 -
2401 + r = tracer_tracing_is_on(tr);
2402 r = sprintf(buf, "%d\n", r);
2403
2404 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2405 @@ -5801,11 +5841,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
2406 if (buffer) {
2407 mutex_lock(&trace_types_lock);
2408 if (val) {
2409 - ring_buffer_record_on(buffer);
2410 + tracer_tracing_on(tr);
2411 if (tr->current_trace->start)
2412 tr->current_trace->start(tr);
2413 } else {
2414 - ring_buffer_record_off(buffer);
2415 + tracer_tracing_off(tr);
2416 if (tr->current_trace->stop)
2417 tr->current_trace->stop(tr);
2418 }
2419 diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
2420 index b19d065..2aefbee 100644
2421 --- a/kernel/trace/trace_irqsoff.c
2422 +++ b/kernel/trace/trace_irqsoff.c
2423 @@ -373,7 +373,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
2424 struct trace_array_cpu *data;
2425 unsigned long flags;
2426
2427 - if (likely(!tracer_enabled))
2428 + if (!tracer_enabled || !tracing_is_enabled())
2429 return;
2430
2431 cpu = raw_smp_processor_id();
2432 @@ -416,7 +416,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
2433 else
2434 return;
2435
2436 - if (!tracer_enabled)
2437 + if (!tracer_enabled || !tracing_is_enabled())
2438 return;
2439
2440 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
2441 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
2442 index 5a750b9..426f8fc 100644
2443 --- a/net/sunrpc/clnt.c
2444 +++ b/net/sunrpc/clnt.c
2445 @@ -1644,6 +1644,10 @@ call_connect(struct rpc_task *task)
2446 task->tk_action = call_connect_status;
2447 if (task->tk_status < 0)
2448 return;
2449 + if (task->tk_flags & RPC_TASK_NOCONNECT) {
2450 + rpc_exit(task, -ENOTCONN);
2451 + return;
2452 + }
2453 xprt_connect(task);
2454 }
2455 }
2456 diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
2457 index 74d948f..779742c 100644
2458 --- a/net/sunrpc/netns.h
2459 +++ b/net/sunrpc/netns.h
2460 @@ -23,6 +23,7 @@ struct sunrpc_net {
2461 struct rpc_clnt *rpcb_local_clnt4;
2462 spinlock_t rpcb_clnt_lock;
2463 unsigned int rpcb_users;
2464 + unsigned int rpcb_is_af_local : 1;
2465
2466 struct mutex gssp_lock;
2467 wait_queue_head_t gssp_wq;
2468 diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
2469 index 3df764d..1891a10 100644
2470 --- a/net/sunrpc/rpcb_clnt.c
2471 +++ b/net/sunrpc/rpcb_clnt.c
2472 @@ -204,13 +204,15 @@ void rpcb_put_local(struct net *net)
2473 }
2474
2475 static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
2476 - struct rpc_clnt *clnt4)
2477 + struct rpc_clnt *clnt4,
2478 + bool is_af_local)
2479 {
2480 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2481
2482 /* Protected by rpcb_create_local_mutex */
2483 sn->rpcb_local_clnt = clnt;
2484 sn->rpcb_local_clnt4 = clnt4;
2485 + sn->rpcb_is_af_local = is_af_local ? 1 : 0;
2486 smp_wmb();
2487 sn->rpcb_users = 1;
2488 dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: "
2489 @@ -238,6 +240,14 @@ static int rpcb_create_local_unix(struct net *net)
2490 .program = &rpcb_program,
2491 .version = RPCBVERS_2,
2492 .authflavor = RPC_AUTH_NULL,
2493 + /*
2494 + * We turn off the idle timeout to prevent the kernel
2495 + * from automatically disconnecting the socket.
2496 + * Otherwise, we'd have to cache the mount namespace
2497 + * of the caller and somehow pass that to the socket
2498 + * reconnect code.
2499 + */
2500 + .flags = RPC_CLNT_CREATE_NO_IDLE_TIMEOUT,
2501 };
2502 struct rpc_clnt *clnt, *clnt4;
2503 int result = 0;
2504 @@ -263,7 +273,7 @@ static int rpcb_create_local_unix(struct net *net)
2505 clnt4 = NULL;
2506 }
2507
2508 - rpcb_set_local(net, clnt, clnt4);
2509 + rpcb_set_local(net, clnt, clnt4, true);
2510
2511 out:
2512 return result;
2513 @@ -315,7 +325,7 @@ static int rpcb_create_local_net(struct net *net)
2514 clnt4 = NULL;
2515 }
2516
2517 - rpcb_set_local(net, clnt, clnt4);
2518 + rpcb_set_local(net, clnt, clnt4, false);
2519
2520 out:
2521 return result;
2522 @@ -376,13 +386,16 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
2523 return rpc_create(&args);
2524 }
2525
2526 -static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
2527 +static int rpcb_register_call(struct sunrpc_net *sn, struct rpc_clnt *clnt, struct rpc_message *msg, bool is_set)
2528 {
2529 - int result, error = 0;
2530 + int flags = RPC_TASK_NOCONNECT;
2531 + int error, result = 0;
2532
2533 + if (is_set || !sn->rpcb_is_af_local)
2534 + flags = RPC_TASK_SOFTCONN;
2535 msg->rpc_resp = &result;
2536
2537 - error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
2538 + error = rpc_call_sync(clnt, msg, flags);
2539 if (error < 0) {
2540 dprintk("RPC: failed to contact local rpcbind "
2541 "server (errno %d).\n", -error);
2542 @@ -439,16 +452,19 @@ int rpcb_register(struct net *net, u32 prog, u32 vers, int prot, unsigned short
2543 .rpc_argp = &map,
2544 };
2545 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2546 + bool is_set = false;
2547
2548 dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
2549 "rpcbind\n", (port ? "" : "un"),
2550 prog, vers, prot, port);
2551
2552 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET];
2553 - if (port)
2554 + if (port != 0) {
2555 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
2556 + is_set = true;
2557 + }
2558
2559 - return rpcb_register_call(sn->rpcb_local_clnt, &msg);
2560 + return rpcb_register_call(sn, sn->rpcb_local_clnt, &msg, is_set);
2561 }
2562
2563 /*
2564 @@ -461,6 +477,7 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
2565 const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
2566 struct rpcbind_args *map = msg->rpc_argp;
2567 unsigned short port = ntohs(sin->sin_port);
2568 + bool is_set = false;
2569 int result;
2570
2571 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
2572 @@ -471,10 +488,12 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
2573 map->r_addr, map->r_netid);
2574
2575 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
2576 - if (port)
2577 + if (port != 0) {
2578 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
2579 + is_set = true;
2580 + }
2581
2582 - result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
2583 + result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
2584 kfree(map->r_addr);
2585 return result;
2586 }
2587 @@ -489,6 +508,7 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
2588 const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
2589 struct rpcbind_args *map = msg->rpc_argp;
2590 unsigned short port = ntohs(sin6->sin6_port);
2591 + bool is_set = false;
2592 int result;
2593
2594 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
2595 @@ -499,10 +519,12 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
2596 map->r_addr, map->r_netid);
2597
2598 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
2599 - if (port)
2600 + if (port != 0) {
2601 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
2602 + is_set = true;
2603 + }
2604
2605 - result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
2606 + result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
2607 kfree(map->r_addr);
2608 return result;
2609 }
2610 @@ -519,7 +541,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
2611 map->r_addr = "";
2612 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
2613
2614 - return rpcb_register_call(sn->rpcb_local_clnt4, msg);
2615 + return rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, false);
2616 }
2617
2618 /**
2619 diff --git a/sound/usb/6fire/comm.c b/sound/usb/6fire/comm.c
2620 index 9e6e3ff..23452ee 100644
2621 --- a/sound/usb/6fire/comm.c
2622 +++ b/sound/usb/6fire/comm.c
2623 @@ -110,19 +110,37 @@ static int usb6fire_comm_send_buffer(u8 *buffer, struct usb_device *dev)
2624 static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request,
2625 u8 reg, u8 value)
2626 {
2627 - u8 buffer[13]; /* 13: maximum length of message */
2628 + u8 *buffer;
2629 + int ret;
2630 +
2631 + /* 13: maximum length of message */
2632 + buffer = kmalloc(13, GFP_KERNEL);
2633 + if (!buffer)
2634 + return -ENOMEM;
2635
2636 usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00);
2637 - return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
2638 + ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
2639 +
2640 + kfree(buffer);
2641 + return ret;
2642 }
2643
2644 static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request,
2645 u8 reg, u8 vl, u8 vh)
2646 {
2647 - u8 buffer[13]; /* 13: maximum length of message */
2648 + u8 *buffer;
2649 + int ret;
2650 +
2651 + /* 13: maximum length of message */
2652 + buffer = kmalloc(13, GFP_KERNEL);
2653 + if (!buffer)
2654 + return -ENOMEM;
2655
2656 usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh);
2657 - return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
2658 + ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
2659 +
2660 + kfree(buffer);
2661 + return ret;
2662 }
2663
2664 int usb6fire_comm_init(struct sfire_chip *chip)
2665 @@ -135,6 +153,12 @@ int usb6fire_comm_init(struct sfire_chip *chip)
2666 if (!rt)
2667 return -ENOMEM;
2668
2669 + rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL);
2670 + if (!rt->receiver_buffer) {
2671 + kfree(rt);
2672 + return -ENOMEM;
2673 + }
2674 +
2675 urb = &rt->receiver;
2676 rt->serial = 1;
2677 rt->chip = chip;
2678 @@ -153,6 +177,7 @@ int usb6fire_comm_init(struct sfire_chip *chip)
2679 urb->interval = 1;
2680 ret = usb_submit_urb(urb, GFP_KERNEL);
2681 if (ret < 0) {
2682 + kfree(rt->receiver_buffer);
2683 kfree(rt);
2684 snd_printk(KERN_ERR PREFIX "cannot create comm data receiver.");
2685 return ret;
2686 @@ -171,6 +196,9 @@ void usb6fire_comm_abort(struct sfire_chip *chip)
2687
2688 void usb6fire_comm_destroy(struct sfire_chip *chip)
2689 {
2690 - kfree(chip->comm);
2691 + struct comm_runtime *rt = chip->comm;
2692 +
2693 + kfree(rt->receiver_buffer);
2694 + kfree(rt);
2695 chip->comm = NULL;
2696 }
2697 diff --git a/sound/usb/6fire/comm.h b/sound/usb/6fire/comm.h
2698 index 6a0840b..780d5ed 100644
2699 --- a/sound/usb/6fire/comm.h
2700 +++ b/sound/usb/6fire/comm.h
2701 @@ -24,7 +24,7 @@ struct comm_runtime {
2702 struct sfire_chip *chip;
2703
2704 struct urb receiver;
2705 - u8 receiver_buffer[COMM_RECEIVER_BUFSIZE];
2706 + u8 *receiver_buffer;
2707
2708 u8 serial; /* urb serial */
2709
2710 diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
2711 index 7a444b5..659950e 100644
2712 --- a/sound/usb/endpoint.c
2713 +++ b/sound/usb/endpoint.c
2714 @@ -591,17 +591,16 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
2715 ep->stride = frame_bits >> 3;
2716 ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
2717
2718 - /* calculate max. frequency */
2719 - if (ep->maxpacksize) {
2720 + /* assume max. frequency is 25% higher than nominal */
2721 + ep->freqmax = ep->freqn + (ep->freqn >> 2);
2722 + maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
2723 + >> (16 - ep->datainterval);
2724 + /* but wMaxPacketSize might reduce this */
2725 + if (ep->maxpacksize && ep->maxpacksize < maxsize) {
2726 /* whatever fits into a max. size packet */
2727 maxsize = ep->maxpacksize;
2728 ep->freqmax = (maxsize / (frame_bits >> 3))
2729 << (16 - ep->datainterval);
2730 - } else {
2731 - /* no max. packet size: just take 25% higher than nominal */
2732 - ep->freqmax = ep->freqn + (ep->freqn >> 2);
2733 - maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
2734 - >> (16 - ep->datainterval);
2735 }
2736
2737 if (ep->fill_max)