Magellan Linux

Contents of /trunk/kernel-alx/patches-3.10/0175-3.10.76-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2663 - (show annotations) (download)
Tue Jul 21 16:20:23 2015 UTC (9 years, 3 months ago) by niro
File size: 68487 byte(s)
-linux-3.10.76
1 diff --git a/Makefile b/Makefile
2 index 87909d8302ad..019a6a4b386d 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 10
8 -SUBLEVEL = 75
9 +SUBLEVEL = 76
10 EXTRAVERSION =
11 NAME = TOSSUG Baby Fish
12
13 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
14 index 98838a05ba6d..9d0ac091a52a 100644
15 --- a/arch/alpha/mm/fault.c
16 +++ b/arch/alpha/mm/fault.c
17 @@ -156,6 +156,8 @@ retry:
18 if (unlikely(fault & VM_FAULT_ERROR)) {
19 if (fault & VM_FAULT_OOM)
20 goto out_of_memory;
21 + else if (fault & VM_FAULT_SIGSEGV)
22 + goto bad_area;
23 else if (fault & VM_FAULT_SIGBUS)
24 goto do_sigbus;
25 BUG();
26 diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
27 index 50533b750a99..08f65bcf9130 100644
28 --- a/arch/arc/mm/fault.c
29 +++ b/arch/arc/mm/fault.c
30 @@ -160,6 +160,8 @@ good_area:
31 /* TBD: switch to pagefault_out_of_memory() */
32 if (fault & VM_FAULT_OOM)
33 goto out_of_memory;
34 + else if (fault & VM_FAULT_SIGSEGV)
35 + goto bad_area;
36 else if (fault & VM_FAULT_SIGBUS)
37 goto do_sigbus;
38
39 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
40 index 0eca93327195..d223a8b57c1e 100644
41 --- a/arch/avr32/mm/fault.c
42 +++ b/arch/avr32/mm/fault.c
43 @@ -142,6 +142,8 @@ good_area:
44 if (unlikely(fault & VM_FAULT_ERROR)) {
45 if (fault & VM_FAULT_OOM)
46 goto out_of_memory;
47 + else if (fault & VM_FAULT_SIGSEGV)
48 + goto bad_area;
49 else if (fault & VM_FAULT_SIGBUS)
50 goto do_sigbus;
51 BUG();
52 diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
53 index 1790f22e71a2..2686a7aa8ec8 100644
54 --- a/arch/cris/mm/fault.c
55 +++ b/arch/cris/mm/fault.c
56 @@ -176,6 +176,8 @@ retry:
57 if (unlikely(fault & VM_FAULT_ERROR)) {
58 if (fault & VM_FAULT_OOM)
59 goto out_of_memory;
60 + else if (fault & VM_FAULT_SIGSEGV)
61 + goto bad_area;
62 else if (fault & VM_FAULT_SIGBUS)
63 goto do_sigbus;
64 BUG();
65 diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
66 index 9a66372fc7c7..ec4917ddf678 100644
67 --- a/arch/frv/mm/fault.c
68 +++ b/arch/frv/mm/fault.c
69 @@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
70 if (unlikely(fault & VM_FAULT_ERROR)) {
71 if (fault & VM_FAULT_OOM)
72 goto out_of_memory;
73 + else if (fault & VM_FAULT_SIGSEGV)
74 + goto bad_area;
75 else if (fault & VM_FAULT_SIGBUS)
76 goto do_sigbus;
77 BUG();
78 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
79 index 7225dad87094..ba5ba7accd0d 100644
80 --- a/arch/ia64/mm/fault.c
81 +++ b/arch/ia64/mm/fault.c
82 @@ -172,6 +172,8 @@ retry:
83 */
84 if (fault & VM_FAULT_OOM) {
85 goto out_of_memory;
86 + } else if (fault & VM_FAULT_SIGSEGV) {
87 + goto bad_area;
88 } else if (fault & VM_FAULT_SIGBUS) {
89 signal = SIGBUS;
90 goto bad_area;
91 diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
92 index e9c6a8014bd6..e3d4d4890104 100644
93 --- a/arch/m32r/mm/fault.c
94 +++ b/arch/m32r/mm/fault.c
95 @@ -200,6 +200,8 @@ good_area:
96 if (unlikely(fault & VM_FAULT_ERROR)) {
97 if (fault & VM_FAULT_OOM)
98 goto out_of_memory;
99 + else if (fault & VM_FAULT_SIGSEGV)
100 + goto bad_area;
101 else if (fault & VM_FAULT_SIGBUS)
102 goto do_sigbus;
103 BUG();
104 diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
105 index eb1d61f68725..f0eef0491f77 100644
106 --- a/arch/m68k/mm/fault.c
107 +++ b/arch/m68k/mm/fault.c
108 @@ -153,6 +153,8 @@ good_area:
109 if (unlikely(fault & VM_FAULT_ERROR)) {
110 if (fault & VM_FAULT_OOM)
111 goto out_of_memory;
112 + else if (fault & VM_FAULT_SIGSEGV)
113 + goto map_err;
114 else if (fault & VM_FAULT_SIGBUS)
115 goto bus_err;
116 BUG();
117 diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
118 index 332680e5ebf2..2de5dc695a87 100644
119 --- a/arch/metag/mm/fault.c
120 +++ b/arch/metag/mm/fault.c
121 @@ -141,6 +141,8 @@ good_area:
122 if (unlikely(fault & VM_FAULT_ERROR)) {
123 if (fault & VM_FAULT_OOM)
124 goto out_of_memory;
125 + else if (fault & VM_FAULT_SIGSEGV)
126 + goto bad_area;
127 else if (fault & VM_FAULT_SIGBUS)
128 goto do_sigbus;
129 BUG();
130 diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
131 index fa4cf52aa7a6..d46a5ebb7570 100644
132 --- a/arch/microblaze/mm/fault.c
133 +++ b/arch/microblaze/mm/fault.c
134 @@ -224,6 +224,8 @@ good_area:
135 if (unlikely(fault & VM_FAULT_ERROR)) {
136 if (fault & VM_FAULT_OOM)
137 goto out_of_memory;
138 + else if (fault & VM_FAULT_SIGSEGV)
139 + goto bad_area;
140 else if (fault & VM_FAULT_SIGBUS)
141 goto do_sigbus;
142 BUG();
143 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
144 index 0214a43b9911..c40a8d1c43ba 100644
145 --- a/arch/mips/mm/fault.c
146 +++ b/arch/mips/mm/fault.c
147 @@ -157,6 +157,8 @@ good_area:
148 if (unlikely(fault & VM_FAULT_ERROR)) {
149 if (fault & VM_FAULT_OOM)
150 goto out_of_memory;
151 + else if (fault & VM_FAULT_SIGSEGV)
152 + goto bad_area;
153 else if (fault & VM_FAULT_SIGBUS)
154 goto do_sigbus;
155 BUG();
156 diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
157 index 3516cbdf1ee9..0c2cc5d39c8e 100644
158 --- a/arch/mn10300/mm/fault.c
159 +++ b/arch/mn10300/mm/fault.c
160 @@ -262,6 +262,8 @@ good_area:
161 if (unlikely(fault & VM_FAULT_ERROR)) {
162 if (fault & VM_FAULT_OOM)
163 goto out_of_memory;
164 + else if (fault & VM_FAULT_SIGSEGV)
165 + goto bad_area;
166 else if (fault & VM_FAULT_SIGBUS)
167 goto do_sigbus;
168 BUG();
169 diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
170 index 0703acf7d327..230ac20ae794 100644
171 --- a/arch/openrisc/mm/fault.c
172 +++ b/arch/openrisc/mm/fault.c
173 @@ -171,6 +171,8 @@ good_area:
174 if (unlikely(fault & VM_FAULT_ERROR)) {
175 if (fault & VM_FAULT_OOM)
176 goto out_of_memory;
177 + else if (fault & VM_FAULT_SIGSEGV)
178 + goto bad_area;
179 else if (fault & VM_FAULT_SIGBUS)
180 goto do_sigbus;
181 BUG();
182 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
183 index d10d27a720c0..c45130f56a93 100644
184 --- a/arch/parisc/mm/fault.c
185 +++ b/arch/parisc/mm/fault.c
186 @@ -220,6 +220,8 @@ good_area:
187 */
188 if (fault & VM_FAULT_OOM)
189 goto out_of_memory;
190 + else if (fault & VM_FAULT_SIGSEGV)
191 + goto bad_area;
192 else if (fault & VM_FAULT_SIGBUS)
193 goto bad_area;
194 BUG();
195 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
196 index d9196c9f93d9..d51a0c110eb4 100644
197 --- a/arch/powerpc/mm/fault.c
198 +++ b/arch/powerpc/mm/fault.c
199 @@ -425,6 +425,8 @@ good_area:
200 */
201 fault = handle_mm_fault(mm, vma, address, flags);
202 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
203 + if (fault & VM_FAULT_SIGSEGV)
204 + goto bad_area;
205 rc = mm_fault_error(regs, address, fault);
206 if (rc >= MM_FAULT_RETURN)
207 goto bail;
208 diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
209 index 641e7273d75a..62f3e4e48a0b 100644
210 --- a/arch/powerpc/platforms/cell/spu_fault.c
211 +++ b/arch/powerpc/platforms/cell/spu_fault.c
212 @@ -75,7 +75,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
213 if (*flt & VM_FAULT_OOM) {
214 ret = -ENOMEM;
215 goto out_unlock;
216 - } else if (*flt & VM_FAULT_SIGBUS) {
217 + } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
218 ret = -EFAULT;
219 goto out_unlock;
220 }
221 diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
222 index 35f77a42bedf..c5c5788e8a13 100644
223 --- a/arch/powerpc/platforms/cell/spufs/inode.c
224 +++ b/arch/powerpc/platforms/cell/spufs/inode.c
225 @@ -164,7 +164,7 @@ static void spufs_prune_dir(struct dentry *dir)
226 struct dentry *dentry, *tmp;
227
228 mutex_lock(&dir->d_inode->i_mutex);
229 - list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
230 + list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
231 spin_lock(&dentry->d_lock);
232 if (!(d_unhashed(dentry)) && dentry->d_inode) {
233 dget_dlock(dentry);
234 diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
235 index 416facec4a33..d214321db727 100644
236 --- a/arch/s390/mm/fault.c
237 +++ b/arch/s390/mm/fault.c
238 @@ -244,6 +244,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
239 do_no_context(regs);
240 else
241 pagefault_out_of_memory();
242 + } else if (fault & VM_FAULT_SIGSEGV) {
243 + /* Kernel mode? Handle exceptions or die */
244 + if (!user_mode(regs))
245 + do_no_context(regs);
246 + else
247 + do_sigsegv(regs, SEGV_MAPERR);
248 } else if (fault & VM_FAULT_SIGBUS) {
249 /* Kernel mode? Handle exceptions or die */
250 if (!user_mode(regs))
251 diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
252 index 52238983527d..6860beb2a280 100644
253 --- a/arch/score/mm/fault.c
254 +++ b/arch/score/mm/fault.c
255 @@ -114,6 +114,8 @@ good_area:
256 if (unlikely(fault & VM_FAULT_ERROR)) {
257 if (fault & VM_FAULT_OOM)
258 goto out_of_memory;
259 + else if (fault & VM_FAULT_SIGSEGV)
260 + goto bad_area;
261 else if (fault & VM_FAULT_SIGBUS)
262 goto do_sigbus;
263 BUG();
264 diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
265 index 541dc6101508..a58fec9b55e0 100644
266 --- a/arch/sh/mm/fault.c
267 +++ b/arch/sh/mm/fault.c
268 @@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
269 } else {
270 if (fault & VM_FAULT_SIGBUS)
271 do_sigbus(regs, error_code, address);
272 + else if (fault & VM_FAULT_SIGSEGV)
273 + bad_area(regs, error_code, address);
274 else
275 BUG();
276 }
277 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
278 index 59dbd4645725..163c78712110 100644
279 --- a/arch/sparc/mm/fault_32.c
280 +++ b/arch/sparc/mm/fault_32.c
281 @@ -252,6 +252,8 @@ good_area:
282 if (unlikely(fault & VM_FAULT_ERROR)) {
283 if (fault & VM_FAULT_OOM)
284 goto out_of_memory;
285 + else if (fault & VM_FAULT_SIGSEGV)
286 + goto bad_area;
287 else if (fault & VM_FAULT_SIGBUS)
288 goto do_sigbus;
289 BUG();
290 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
291 index 3841a081beb3..ac2db923e51a 100644
292 --- a/arch/sparc/mm/fault_64.c
293 +++ b/arch/sparc/mm/fault_64.c
294 @@ -443,6 +443,8 @@ good_area:
295 if (unlikely(fault & VM_FAULT_ERROR)) {
296 if (fault & VM_FAULT_OOM)
297 goto out_of_memory;
298 + else if (fault & VM_FAULT_SIGSEGV)
299 + goto bad_area;
300 else if (fault & VM_FAULT_SIGBUS)
301 goto do_sigbus;
302 BUG();
303 diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
304 index 3ff289f422e6..12b732f593bb 100644
305 --- a/arch/tile/mm/fault.c
306 +++ b/arch/tile/mm/fault.c
307 @@ -446,6 +446,8 @@ good_area:
308 if (unlikely(fault & VM_FAULT_ERROR)) {
309 if (fault & VM_FAULT_OOM)
310 goto out_of_memory;
311 + else if (fault & VM_FAULT_SIGSEGV)
312 + goto bad_area;
313 else if (fault & VM_FAULT_SIGBUS)
314 goto do_sigbus;
315 BUG();
316 diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
317 index 5c3aef74237f..06ab0ebe0a0f 100644
318 --- a/arch/um/kernel/trap.c
319 +++ b/arch/um/kernel/trap.c
320 @@ -80,6 +80,8 @@ good_area:
321 if (unlikely(fault & VM_FAULT_ERROR)) {
322 if (fault & VM_FAULT_OOM) {
323 goto out_of_memory;
324 + } else if (fault & VM_FAULT_SIGSEGV) {
325 + goto out;
326 } else if (fault & VM_FAULT_SIGBUS) {
327 err = -EACCES;
328 goto out;
329 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
330 index af88fa20dbe8..ddad189e596e 100644
331 --- a/arch/x86/kvm/emulate.c
332 +++ b/arch/x86/kvm/emulate.c
333 @@ -2450,7 +2450,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
334 * Not recognized on AMD in compat mode (but is recognized in legacy
335 * mode).
336 */
337 - if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
338 + if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
339 && !vendor_intel(ctxt))
340 return emulate_ud(ctxt);
341
342 @@ -2463,25 +2463,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
343 setup_syscalls_segments(ctxt, &cs, &ss);
344
345 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
346 - switch (ctxt->mode) {
347 - case X86EMUL_MODE_PROT32:
348 - if ((msr_data & 0xfffc) == 0x0)
349 - return emulate_gp(ctxt, 0);
350 - break;
351 - case X86EMUL_MODE_PROT64:
352 - if (msr_data == 0x0)
353 - return emulate_gp(ctxt, 0);
354 - break;
355 - default:
356 - break;
357 - }
358 + if ((msr_data & 0xfffc) == 0x0)
359 + return emulate_gp(ctxt, 0);
360
361 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
362 - cs_sel = (u16)msr_data;
363 - cs_sel &= ~SELECTOR_RPL_MASK;
364 + cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
365 ss_sel = cs_sel + 8;
366 - ss_sel &= ~SELECTOR_RPL_MASK;
367 - if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
368 + if (efer & EFER_LMA) {
369 cs.d = 0;
370 cs.l = 1;
371 }
372 @@ -2490,10 +2478,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
373 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
374
375 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
376 - ctxt->_eip = msr_data;
377 + ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
378
379 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
380 - *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
381 + *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
382 + (u32)msr_data;
383
384 return X86EMUL_CONTINUE;
385 }
386 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
387 index d8b1ff68dbb9..e4780b052531 100644
388 --- a/arch/x86/mm/fault.c
389 +++ b/arch/x86/mm/fault.c
390 @@ -812,11 +812,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
391 unsigned int fault)
392 {
393 struct task_struct *tsk = current;
394 - struct mm_struct *mm = tsk->mm;
395 int code = BUS_ADRERR;
396
397 - up_read(&mm->mmap_sem);
398 -
399 /* Kernel mode? Handle exceptions or die: */
400 if (!(error_code & PF_USER)) {
401 no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
402 @@ -847,7 +844,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
403 unsigned long address, unsigned int fault)
404 {
405 if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
406 - up_read(&current->mm->mmap_sem);
407 no_context(regs, error_code, address, 0, 0);
408 return;
409 }
410 @@ -855,14 +851,11 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
411 if (fault & VM_FAULT_OOM) {
412 /* Kernel mode? Handle exceptions or die: */
413 if (!(error_code & PF_USER)) {
414 - up_read(&current->mm->mmap_sem);
415 no_context(regs, error_code, address,
416 SIGSEGV, SEGV_MAPERR);
417 return;
418 }
419
420 - up_read(&current->mm->mmap_sem);
421 -
422 /*
423 * We ran out of memory, call the OOM killer, and return the
424 * userspace (which will retry the fault, or kill us if we got
425 @@ -873,6 +866,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
426 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
427 VM_FAULT_HWPOISON_LARGE))
428 do_sigbus(regs, error_code, address, fault);
429 + else if (fault & VM_FAULT_SIGSEGV)
430 + bad_area_nosemaphore(regs, error_code, address);
431 else
432 BUG();
433 }
434 @@ -1193,6 +1188,7 @@ good_area:
435 return;
436
437 if (unlikely(fault & VM_FAULT_ERROR)) {
438 + up_read(&mm->mmap_sem);
439 mm_fault_error(regs, error_code, address, fault);
440 return;
441 }
442 diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
443 index 70fa7bc42b4a..38278337d85e 100644
444 --- a/arch/xtensa/mm/fault.c
445 +++ b/arch/xtensa/mm/fault.c
446 @@ -117,6 +117,8 @@ good_area:
447 if (unlikely(fault & VM_FAULT_ERROR)) {
448 if (fault & VM_FAULT_OOM)
449 goto out_of_memory;
450 + else if (fault & VM_FAULT_SIGSEGV)
451 + goto bad_area;
452 else if (fault & VM_FAULT_SIGBUS)
453 goto do_sigbus;
454 BUG();
455 diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
456 index dad8891ecbfa..9c2c4eca52e3 100644
457 --- a/drivers/bluetooth/ath3k.c
458 +++ b/drivers/bluetooth/ath3k.c
459 @@ -77,6 +77,8 @@ static struct usb_device_id ath3k_table[] = {
460 { USB_DEVICE(0x0CF3, 0x3004) },
461 { USB_DEVICE(0x0CF3, 0x3008) },
462 { USB_DEVICE(0x0CF3, 0x311D) },
463 + { USB_DEVICE(0x0CF3, 0x311E) },
464 + { USB_DEVICE(0x0CF3, 0x311F) },
465 { USB_DEVICE(0x0CF3, 0x817a) },
466 { USB_DEVICE(0x13d3, 0x3375) },
467 { USB_DEVICE(0x04CA, 0x3004) },
468 @@ -120,6 +122,8 @@ static struct usb_device_id ath3k_blist_tbl[] = {
469 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
470 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
471 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
472 + { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
473 + { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
474 { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
475 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
476 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
477 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
478 index 61a8ec4e5f4d..92b985317770 100644
479 --- a/drivers/bluetooth/btusb.c
480 +++ b/drivers/bluetooth/btusb.c
481 @@ -49,6 +49,7 @@ static struct usb_driver btusb_driver;
482 #define BTUSB_WRONG_SCO_MTU 0x40
483 #define BTUSB_ATH3012 0x80
484 #define BTUSB_INTEL 0x100
485 +#define BTUSB_INTEL_BOOT 0x200
486
487 static struct usb_device_id btusb_table[] = {
488 /* Generic Bluetooth USB device */
489 @@ -113,6 +114,13 @@ static struct usb_device_id btusb_table[] = {
490 /*Broadcom devices with vendor specific id */
491 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
492
493 + /* IMC Networks - Broadcom based */
494 + { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
495 +
496 + /* Intel Bluetooth USB Bootloader (RAM module) */
497 + { USB_DEVICE(0x8087, 0x0a5a),
498 + .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
499 +
500 { } /* Terminating entry */
501 };
502
503 @@ -141,6 +149,8 @@ static struct usb_device_id blacklist_table[] = {
504 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
505 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
506 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
507 + { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
508 + { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
509 { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
510 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
511 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
512 @@ -1444,6 +1454,9 @@ static int btusb_probe(struct usb_interface *intf,
513 if (id->driver_info & BTUSB_INTEL)
514 hdev->setup = btusb_setup_intel;
515
516 + if (id->driver_info & BTUSB_INTEL_BOOT)
517 + set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
518 +
519 /* Interface numbers are hardcoded in the specification */
520 data->isoc = usb_ifnum_to_if(data->udev, 1);
521
522 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
523 index e04462b60756..f505e4ca6d58 100644
524 --- a/drivers/edac/sb_edac.c
525 +++ b/drivers/edac/sb_edac.c
526 @@ -270,8 +270,9 @@ static const u32 correrrthrsld[] = {
527 * sbridge structs
528 */
529
530 -#define NUM_CHANNELS 4
531 -#define MAX_DIMMS 3 /* Max DIMMS per channel */
532 +#define NUM_CHANNELS 4
533 +#define MAX_DIMMS 3 /* Max DIMMS per channel */
534 +#define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */
535
536 struct sbridge_info {
537 u32 mcmtr;
538 @@ -1451,6 +1452,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
539
540 /* FIXME: need support for channel mask */
541
542 + if (channel == CHANNEL_UNSPECIFIED)
543 + channel = -1;
544 +
545 /* Call the helper to output message */
546 edac_mc_handle_error(tp_event, mci, core_err_cnt,
547 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
548 diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
549 index 5d204492c603..161dcba13c47 100644
550 --- a/drivers/net/ethernet/broadcom/bnx2.c
551 +++ b/drivers/net/ethernet/broadcom/bnx2.c
552 @@ -2869,7 +2869,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
553 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
554
555 tx_bytes += skb->len;
556 - dev_kfree_skb(skb);
557 + dev_kfree_skb_any(skb);
558 tx_pkt++;
559 if (tx_pkt == budget)
560 break;
561 @@ -6610,7 +6610,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
562
563 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
564 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
565 - dev_kfree_skb(skb);
566 + dev_kfree_skb_any(skb);
567 return NETDEV_TX_OK;
568 }
569
570 @@ -6703,7 +6703,7 @@ dma_error:
571 PCI_DMA_TODEVICE);
572 }
573
574 - dev_kfree_skb(skb);
575 + dev_kfree_skb_any(skb);
576 return NETDEV_TX_OK;
577 }
578
579 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
580 index 8c1eab1151b8..680d26d6d2c3 100644
581 --- a/drivers/net/ethernet/broadcom/tg3.c
582 +++ b/drivers/net/ethernet/broadcom/tg3.c
583 @@ -6437,7 +6437,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
584 pkts_compl++;
585 bytes_compl += skb->len;
586
587 - dev_kfree_skb(skb);
588 + dev_kfree_skb_any(skb);
589
590 if (unlikely(tx_bug)) {
591 tg3_tx_recover(tp);
592 @@ -6769,7 +6769,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
593 if (len > (tp->dev->mtu + ETH_HLEN) &&
594 skb->protocol != htons(ETH_P_8021Q) &&
595 skb->protocol != htons(ETH_P_8021AD)) {
596 - dev_kfree_skb(skb);
597 + dev_kfree_skb_any(skb);
598 goto drop_it_no_recycle;
599 }
600
601 @@ -7652,7 +7652,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
602 PCI_DMA_TODEVICE);
603 /* Make sure the mapping succeeded */
604 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
605 - dev_kfree_skb(new_skb);
606 + dev_kfree_skb_any(new_skb);
607 ret = -1;
608 } else {
609 u32 save_entry = *entry;
610 @@ -7667,13 +7667,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
611 new_skb->len, base_flags,
612 mss, vlan)) {
613 tg3_tx_skb_unmap(tnapi, save_entry, -1);
614 - dev_kfree_skb(new_skb);
615 + dev_kfree_skb_any(new_skb);
616 ret = -1;
617 }
618 }
619 }
620
621 - dev_kfree_skb(skb);
622 + dev_kfree_skb_any(skb);
623 *pskb = new_skb;
624 return ret;
625 }
626 @@ -7716,7 +7716,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
627 } while (segs);
628
629 tg3_tso_bug_end:
630 - dev_kfree_skb(skb);
631 + dev_kfree_skb_any(skb);
632
633 return NETDEV_TX_OK;
634 }
635 @@ -7954,7 +7954,7 @@ dma_error:
636 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
637 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
638 drop:
639 - dev_kfree_skb(skb);
640 + dev_kfree_skb_any(skb);
641 drop_nofree:
642 tp->tx_dropped++;
643 return NETDEV_TX_OK;
644 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
645 index d81a7dbfeef6..88e85cb88342 100644
646 --- a/drivers/net/ethernet/emulex/benet/be_main.c
647 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
648 @@ -1767,7 +1767,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
649 queue_tail_inc(txq);
650 } while (cur_index != last_index);
651
652 - kfree_skb(sent_skb);
653 + dev_kfree_skb_any(sent_skb);
654 return num_wrbs;
655 }
656
657 diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
658 index fce3e92f9d11..c5a9dcc01ca8 100644
659 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
660 +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
661 @@ -1527,12 +1527,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
662 int tso;
663
664 if (test_bit(__IXGB_DOWN, &adapter->flags)) {
665 - dev_kfree_skb(skb);
666 + dev_kfree_skb_any(skb);
667 return NETDEV_TX_OK;
668 }
669
670 if (skb->len <= 0) {
671 - dev_kfree_skb(skb);
672 + dev_kfree_skb_any(skb);
673 return NETDEV_TX_OK;
674 }
675
676 @@ -1549,7 +1549,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
677
678 tso = ixgb_tso(adapter, skb);
679 if (tso < 0) {
680 - dev_kfree_skb(skb);
681 + dev_kfree_skb_any(skb);
682 return NETDEV_TX_OK;
683 }
684
685 diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
686 index 064425d3178d..437d4cfd42cc 100644
687 --- a/drivers/net/ethernet/realtek/8139cp.c
688 +++ b/drivers/net/ethernet/realtek/8139cp.c
689 @@ -899,7 +899,7 @@ out_unlock:
690
691 return NETDEV_TX_OK;
692 out_dma_error:
693 - kfree_skb(skb);
694 + dev_kfree_skb_any(skb);
695 cp->dev->stats.tx_dropped++;
696 goto out_unlock;
697 }
698 diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
699 index 3ccedeb8aba0..942673fcb391 100644
700 --- a/drivers/net/ethernet/realtek/8139too.c
701 +++ b/drivers/net/ethernet/realtek/8139too.c
702 @@ -1715,9 +1715,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
703 if (len < ETH_ZLEN)
704 memset(tp->tx_buf[entry], 0, ETH_ZLEN);
705 skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
706 - dev_kfree_skb(skb);
707 + dev_kfree_skb_any(skb);
708 } else {
709 - dev_kfree_skb(skb);
710 + dev_kfree_skb_any(skb);
711 dev->stats.tx_dropped++;
712 return NETDEV_TX_OK;
713 }
714 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
715 index e9b5d77a90db..2183c6189148 100644
716 --- a/drivers/net/ethernet/realtek/r8169.c
717 +++ b/drivers/net/ethernet/realtek/r8169.c
718 @@ -5768,7 +5768,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
719 tp->TxDescArray + entry);
720 if (skb) {
721 tp->dev->stats.tx_dropped++;
722 - dev_kfree_skb(skb);
723 + dev_kfree_skb_any(skb);
724 tx_skb->skb = NULL;
725 }
726 }
727 @@ -5993,7 +5993,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
728 err_dma_1:
729 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
730 err_dma_0:
731 - dev_kfree_skb(skb);
732 + dev_kfree_skb_any(skb);
733 err_update_stats:
734 dev->stats.tx_dropped++;
735 return NETDEV_TX_OK;
736 @@ -6076,7 +6076,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
737 tp->tx_stats.packets++;
738 tp->tx_stats.bytes += tx_skb->skb->len;
739 u64_stats_update_end(&tp->tx_stats.syncp);
740 - dev_kfree_skb(tx_skb->skb);
741 + dev_kfree_skb_any(tx_skb->skb);
742 tx_skb->skb = NULL;
743 }
744 dirty_tx++;
745 diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
746 index 345b5ddcb1a0..86281fa5dcc3 100644
747 --- a/drivers/tty/serial/8250/8250_dw.c
748 +++ b/drivers/tty/serial/8250/8250_dw.c
749 @@ -98,7 +98,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
750 dw8250_force_idle(p);
751 writeb(value, p->membase + (UART_LCR << p->regshift));
752 }
753 - dev_err(p->dev, "Couldn't set LCR to %d\n", value);
754 + /*
755 + * FIXME: this deadlocks if port->lock is already held
756 + * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
757 + */
758 }
759 }
760
761 @@ -128,7 +131,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
762 dw8250_force_idle(p);
763 writel(value, p->membase + (UART_LCR << p->regshift));
764 }
765 - dev_err(p->dev, "Couldn't set LCR to %d\n", value);
766 + /*
767 + * FIXME: this deadlocks if port->lock is already held
768 + * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
769 + */
770 }
771 }
772
773 diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
774 index d9a43674cb94..9cca0ea4e479 100644
775 --- a/fs/affs/amigaffs.c
776 +++ b/fs/affs/amigaffs.c
777 @@ -126,7 +126,7 @@ affs_fix_dcache(struct inode *inode, u32 entry_ino)
778 {
779 struct dentry *dentry;
780 spin_lock(&inode->i_lock);
781 - hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
782 + hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
783 if (entry_ino == (u32)(long)dentry->d_fsdata) {
784 dentry->d_fsdata = (void *)inode->i_ino;
785 break;
786 diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
787 index 13ddec92341c..8ad277990eac 100644
788 --- a/fs/autofs4/expire.c
789 +++ b/fs/autofs4/expire.c
790 @@ -91,7 +91,7 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev,
791 spin_lock(&root->d_lock);
792
793 if (prev)
794 - next = prev->d_u.d_child.next;
795 + next = prev->d_child.next;
796 else {
797 prev = dget_dlock(root);
798 next = prev->d_subdirs.next;
799 @@ -105,13 +105,13 @@ cont:
800 return NULL;
801 }
802
803 - q = list_entry(next, struct dentry, d_u.d_child);
804 + q = list_entry(next, struct dentry, d_child);
805
806 spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
807 /* Already gone or negative dentry (under construction) - try next */
808 if (q->d_count == 0 || !simple_positive(q)) {
809 spin_unlock(&q->d_lock);
810 - next = q->d_u.d_child.next;
811 + next = q->d_child.next;
812 goto cont;
813 }
814 dget_dlock(q);
815 @@ -161,13 +161,13 @@ again:
816 goto relock;
817 }
818 spin_unlock(&p->d_lock);
819 - next = p->d_u.d_child.next;
820 + next = p->d_child.next;
821 p = parent;
822 if (next != &parent->d_subdirs)
823 break;
824 }
825 }
826 - ret = list_entry(next, struct dentry, d_u.d_child);
827 + ret = list_entry(next, struct dentry, d_child);
828
829 spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
830 /* Negative dentry - try next */
831 @@ -447,7 +447,7 @@ found:
832 spin_lock(&sbi->lookup_lock);
833 spin_lock(&expired->d_parent->d_lock);
834 spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
835 - list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
836 + list_move(&expired->d_parent->d_subdirs, &expired->d_child);
837 spin_unlock(&expired->d_lock);
838 spin_unlock(&expired->d_parent->d_lock);
839 spin_unlock(&sbi->lookup_lock);
840 diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
841 index 085da86e07c2..79ab4cb3590a 100644
842 --- a/fs/autofs4/root.c
843 +++ b/fs/autofs4/root.c
844 @@ -655,7 +655,7 @@ static void autofs_clear_leaf_automount_flags(struct dentry *dentry)
845 /* only consider parents below dentrys in the root */
846 if (IS_ROOT(parent->d_parent))
847 return;
848 - d_child = &dentry->d_u.d_child;
849 + d_child = &dentry->d_child;
850 /* Set parent managed if it's becoming empty */
851 if (d_child->next == &parent->d_subdirs &&
852 d_child->prev == &parent->d_subdirs)
853 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
854 index f02d82b7933e..ccb43298e272 100644
855 --- a/fs/ceph/dir.c
856 +++ b/fs/ceph/dir.c
857 @@ -103,7 +103,7 @@ static unsigned fpos_off(loff_t p)
858 /*
859 * When possible, we try to satisfy a readdir by peeking at the
860 * dcache. We make this work by carefully ordering dentries on
861 - * d_u.d_child when we initially get results back from the MDS, and
862 + * d_child when we initially get results back from the MDS, and
863 * falling back to a "normal" sync readdir if any dentries in the dir
864 * are dropped.
865 *
866 @@ -139,11 +139,11 @@ static int __dcache_readdir(struct file *filp,
867 p = parent->d_subdirs.prev;
868 dout(" initial p %p/%p\n", p->prev, p->next);
869 } else {
870 - p = last->d_u.d_child.prev;
871 + p = last->d_child.prev;
872 }
873
874 more:
875 - dentry = list_entry(p, struct dentry, d_u.d_child);
876 + dentry = list_entry(p, struct dentry, d_child);
877 di = ceph_dentry(dentry);
878 while (1) {
879 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
880 @@ -165,7 +165,7 @@ more:
881 !dentry->d_inode ? " null" : "");
882 spin_unlock(&dentry->d_lock);
883 p = p->prev;
884 - dentry = list_entry(p, struct dentry, d_u.d_child);
885 + dentry = list_entry(p, struct dentry, d_child);
886 di = ceph_dentry(dentry);
887 }
888
889 diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
890 index be0f7e20d62e..0cf23a7b88c2 100644
891 --- a/fs/ceph/inode.c
892 +++ b/fs/ceph/inode.c
893 @@ -867,9 +867,9 @@ static void ceph_set_dentry_offset(struct dentry *dn)
894
895 spin_lock(&dir->d_lock);
896 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
897 - list_move(&dn->d_u.d_child, &dir->d_subdirs);
898 + list_move(&dn->d_child, &dir->d_subdirs);
899 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
900 - dn->d_u.d_child.prev, dn->d_u.d_child.next);
901 + dn->d_child.prev, dn->d_child.next);
902 spin_unlock(&dn->d_lock);
903 spin_unlock(&dir->d_lock);
904 }
905 @@ -1296,7 +1296,7 @@ retry_lookup:
906 /* reorder parent's d_subdirs */
907 spin_lock(&parent->d_lock);
908 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
909 - list_move(&dn->d_u.d_child, &parent->d_subdirs);
910 + list_move(&dn->d_child, &parent->d_subdirs);
911 spin_unlock(&dn->d_lock);
912 spin_unlock(&parent->d_lock);
913 }
914 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
915 index 0dee93706c98..54304ccae7e7 100644
916 --- a/fs/cifs/inode.c
917 +++ b/fs/cifs/inode.c
918 @@ -832,7 +832,7 @@ inode_has_hashed_dentries(struct inode *inode)
919 struct dentry *dentry;
920
921 spin_lock(&inode->i_lock);
922 - hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
923 + hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
924 if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
925 spin_unlock(&inode->i_lock);
926 return true;
927 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
928 index 1da168c61d35..9bc1147a6c5d 100644
929 --- a/fs/coda/cache.c
930 +++ b/fs/coda/cache.c
931 @@ -92,7 +92,7 @@ static void coda_flag_children(struct dentry *parent, int flag)
932 struct dentry *de;
933
934 spin_lock(&parent->d_lock);
935 - list_for_each_entry(de, &parent->d_subdirs, d_u.d_child) {
936 + list_for_each_entry(de, &parent->d_subdirs, d_child) {
937 /* don't know what to do with negative dentries */
938 if (de->d_inode )
939 coda_flag_inode(de->d_inode, flag);
940 diff --git a/fs/dcache.c b/fs/dcache.c
941 index 25c0a1b5f6c0..efa4602e064f 100644
942 --- a/fs/dcache.c
943 +++ b/fs/dcache.c
944 @@ -43,7 +43,7 @@
945 /*
946 * Usage:
947 * dcache->d_inode->i_lock protects:
948 - * - i_dentry, d_alias, d_inode of aliases
949 + * - i_dentry, d_u.d_alias, d_inode of aliases
950 * dcache_hash_bucket lock protects:
951 * - the dcache hash table
952 * s_anon bl list spinlock protects:
953 @@ -58,7 +58,7 @@
954 * - d_unhashed()
955 * - d_parent and d_subdirs
956 * - childrens' d_child and d_parent
957 - * - d_alias, d_inode
958 + * - d_u.d_alias, d_inode
959 *
960 * Ordering:
961 * dentry->d_inode->i_lock
962 @@ -215,7 +215,6 @@ static void __d_free(struct rcu_head *head)
963 {
964 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
965
966 - WARN_ON(!hlist_unhashed(&dentry->d_alias));
967 if (dname_external(dentry))
968 kfree(dentry->d_name.name);
969 kmem_cache_free(dentry_cache, dentry);
970 @@ -226,6 +225,7 @@ static void __d_free(struct rcu_head *head)
971 */
972 static void d_free(struct dentry *dentry)
973 {
974 + WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
975 BUG_ON(dentry->d_count);
976 this_cpu_dec(nr_dentry);
977 if (dentry->d_op && dentry->d_op->d_release)
978 @@ -264,7 +264,7 @@ static void dentry_iput(struct dentry * dentry)
979 struct inode *inode = dentry->d_inode;
980 if (inode) {
981 dentry->d_inode = NULL;
982 - hlist_del_init(&dentry->d_alias);
983 + hlist_del_init(&dentry->d_u.d_alias);
984 spin_unlock(&dentry->d_lock);
985 spin_unlock(&inode->i_lock);
986 if (!inode->i_nlink)
987 @@ -288,7 +288,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
988 {
989 struct inode *inode = dentry->d_inode;
990 dentry->d_inode = NULL;
991 - hlist_del_init(&dentry->d_alias);
992 + hlist_del_init(&dentry->d_u.d_alias);
993 dentry_rcuwalk_barrier(dentry);
994 spin_unlock(&dentry->d_lock);
995 spin_unlock(&inode->i_lock);
996 @@ -364,9 +364,9 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
997 __releases(parent->d_lock)
998 __releases(dentry->d_inode->i_lock)
999 {
1000 - list_del(&dentry->d_u.d_child);
1001 + __list_del_entry(&dentry->d_child);
1002 /*
1003 - * Inform try_to_ascend() that we are no longer attached to the
1004 + * Inform ascending readers that we are no longer attached to the
1005 * dentry tree
1006 */
1007 dentry->d_flags |= DCACHE_DENTRY_KILLED;
1008 @@ -660,7 +660,7 @@ static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
1009
1010 again:
1011 discon_alias = NULL;
1012 - hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1013 + hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1014 spin_lock(&alias->d_lock);
1015 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
1016 if (IS_ROOT(alias) &&
1017 @@ -713,7 +713,7 @@ void d_prune_aliases(struct inode *inode)
1018 struct dentry *dentry;
1019 restart:
1020 spin_lock(&inode->i_lock);
1021 - hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
1022 + hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1023 spin_lock(&dentry->d_lock);
1024 if (!dentry->d_count) {
1025 __dget_dlock(dentry);
1026 @@ -893,7 +893,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
1027 /* descend to the first leaf in the current subtree */
1028 while (!list_empty(&dentry->d_subdirs))
1029 dentry = list_entry(dentry->d_subdirs.next,
1030 - struct dentry, d_u.d_child);
1031 + struct dentry, d_child);
1032
1033 /* consume the dentries from this leaf up through its parents
1034 * until we find one with children or run out altogether */
1035 @@ -927,17 +927,17 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
1036
1037 if (IS_ROOT(dentry)) {
1038 parent = NULL;
1039 - list_del(&dentry->d_u.d_child);
1040 + list_del(&dentry->d_child);
1041 } else {
1042 parent = dentry->d_parent;
1043 parent->d_count--;
1044 - list_del(&dentry->d_u.d_child);
1045 + list_del(&dentry->d_child);
1046 }
1047
1048 inode = dentry->d_inode;
1049 if (inode) {
1050 dentry->d_inode = NULL;
1051 - hlist_del_init(&dentry->d_alias);
1052 + hlist_del_init(&dentry->d_u.d_alias);
1053 if (dentry->d_op && dentry->d_op->d_iput)
1054 dentry->d_op->d_iput(dentry, inode);
1055 else
1056 @@ -955,7 +955,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
1057 } while (list_empty(&dentry->d_subdirs));
1058
1059 dentry = list_entry(dentry->d_subdirs.next,
1060 - struct dentry, d_u.d_child);
1061 + struct dentry, d_child);
1062 }
1063 }
1064
1065 @@ -988,35 +988,6 @@ void shrink_dcache_for_umount(struct super_block *sb)
1066 }
1067
1068 /*
1069 - * This tries to ascend one level of parenthood, but
1070 - * we can race with renaming, so we need to re-check
1071 - * the parenthood after dropping the lock and check
1072 - * that the sequence number still matches.
1073 - */
1074 -static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq)
1075 -{
1076 - struct dentry *new = old->d_parent;
1077 -
1078 - rcu_read_lock();
1079 - spin_unlock(&old->d_lock);
1080 - spin_lock(&new->d_lock);
1081 -
1082 - /*
1083 - * might go back up the wrong parent if we have had a rename
1084 - * or deletion
1085 - */
1086 - if (new != old->d_parent ||
1087 - (old->d_flags & DCACHE_DENTRY_KILLED) ||
1088 - (!locked && read_seqretry(&rename_lock, seq))) {
1089 - spin_unlock(&new->d_lock);
1090 - new = NULL;
1091 - }
1092 - rcu_read_unlock();
1093 - return new;
1094 -}
1095 -
1096 -
1097 -/*
1098 * Search for at least 1 mount point in the dentry's subdirs.
1099 * We descend to the next level whenever the d_subdirs
1100 * list is non-empty and continue searching.
1101 @@ -1048,7 +1019,7 @@ repeat:
1102 resume:
1103 while (next != &this_parent->d_subdirs) {
1104 struct list_head *tmp = next;
1105 - struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1106 + struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1107 next = tmp->next;
1108
1109 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1110 @@ -1070,30 +1041,48 @@ resume:
1111 /*
1112 * All done at this level ... ascend and resume the search.
1113 */
1114 + rcu_read_lock();
1115 +ascend:
1116 if (this_parent != parent) {
1117 struct dentry *child = this_parent;
1118 - this_parent = try_to_ascend(this_parent, locked, seq);
1119 - if (!this_parent)
1120 + this_parent = child->d_parent;
1121 +
1122 + spin_unlock(&child->d_lock);
1123 + spin_lock(&this_parent->d_lock);
1124 +
1125 + /* might go back up the wrong parent if we have had a rename. */
1126 + if (!locked && read_seqretry(&rename_lock, seq))
1127 goto rename_retry;
1128 - next = child->d_u.d_child.next;
1129 + next = child->d_child.next;
1130 + while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
1131 + if (next == &this_parent->d_subdirs)
1132 + goto ascend;
1133 + child = list_entry(next, struct dentry, d_child);
1134 + next = next->next;
1135 + }
1136 + rcu_read_unlock();
1137 goto resume;
1138 }
1139 - spin_unlock(&this_parent->d_lock);
1140 if (!locked && read_seqretry(&rename_lock, seq))
1141 goto rename_retry;
1142 + spin_unlock(&this_parent->d_lock);
1143 + rcu_read_unlock();
1144 if (locked)
1145 write_sequnlock(&rename_lock);
1146 return 0; /* No mount points found in tree */
1147 positive:
1148 if (!locked && read_seqretry(&rename_lock, seq))
1149 - goto rename_retry;
1150 + goto rename_retry_unlocked;
1151 if (locked)
1152 write_sequnlock(&rename_lock);
1153 return 1;
1154
1155 rename_retry:
1156 + spin_unlock(&this_parent->d_lock);
1157 + rcu_read_unlock();
1158 if (locked)
1159 goto again;
1160 +rename_retry_unlocked:
1161 locked = 1;
1162 write_seqlock(&rename_lock);
1163 goto again;
1164 @@ -1131,7 +1120,7 @@ repeat:
1165 resume:
1166 while (next != &this_parent->d_subdirs) {
1167 struct list_head *tmp = next;
1168 - struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1169 + struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1170 next = tmp->next;
1171
1172 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1173 @@ -1158,6 +1147,7 @@ resume:
1174 */
1175 if (found && need_resched()) {
1176 spin_unlock(&dentry->d_lock);
1177 + rcu_read_lock();
1178 goto out;
1179 }
1180
1181 @@ -1177,23 +1167,40 @@ resume:
1182 /*
1183 * All done at this level ... ascend and resume the search.
1184 */
1185 + rcu_read_lock();
1186 +ascend:
1187 if (this_parent != parent) {
1188 struct dentry *child = this_parent;
1189 - this_parent = try_to_ascend(this_parent, locked, seq);
1190 - if (!this_parent)
1191 + this_parent = child->d_parent;
1192 +
1193 + spin_unlock(&child->d_lock);
1194 + spin_lock(&this_parent->d_lock);
1195 +
1196 + /* might go back up the wrong parent if we have had a rename. */
1197 + if (!locked && read_seqretry(&rename_lock, seq))
1198 goto rename_retry;
1199 - next = child->d_u.d_child.next;
1200 + next = child->d_child.next;
1201 + while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
1202 + if (next == &this_parent->d_subdirs)
1203 + goto ascend;
1204 + child = list_entry(next, struct dentry, d_child);
1205 + next = next->next;
1206 + }
1207 + rcu_read_unlock();
1208 goto resume;
1209 }
1210 out:
1211 - spin_unlock(&this_parent->d_lock);
1212 if (!locked && read_seqretry(&rename_lock, seq))
1213 goto rename_retry;
1214 + spin_unlock(&this_parent->d_lock);
1215 + rcu_read_unlock();
1216 if (locked)
1217 write_sequnlock(&rename_lock);
1218 return found;
1219
1220 rename_retry:
1221 + spin_unlock(&this_parent->d_lock);
1222 + rcu_read_unlock();
1223 if (found)
1224 return found;
1225 if (locked)
1226 @@ -1278,8 +1285,8 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1227 INIT_HLIST_BL_NODE(&dentry->d_hash);
1228 INIT_LIST_HEAD(&dentry->d_lru);
1229 INIT_LIST_HEAD(&dentry->d_subdirs);
1230 - INIT_HLIST_NODE(&dentry->d_alias);
1231 - INIT_LIST_HEAD(&dentry->d_u.d_child);
1232 + INIT_HLIST_NODE(&dentry->d_u.d_alias);
1233 + INIT_LIST_HEAD(&dentry->d_child);
1234 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1235
1236 this_cpu_inc(nr_dentry);
1237 @@ -1309,7 +1316,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1238 */
1239 __dget_dlock(parent);
1240 dentry->d_parent = parent;
1241 - list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1242 + list_add(&dentry->d_child, &parent->d_subdirs);
1243 spin_unlock(&parent->d_lock);
1244
1245 return dentry;
1246 @@ -1369,7 +1376,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1247 if (inode) {
1248 if (unlikely(IS_AUTOMOUNT(inode)))
1249 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
1250 - hlist_add_head(&dentry->d_alias, &inode->i_dentry);
1251 + hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1252 }
1253 dentry->d_inode = inode;
1254 dentry_rcuwalk_barrier(dentry);
1255 @@ -1394,7 +1401,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1256
1257 void d_instantiate(struct dentry *entry, struct inode * inode)
1258 {
1259 - BUG_ON(!hlist_unhashed(&entry->d_alias));
1260 + BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1261 if (inode)
1262 spin_lock(&inode->i_lock);
1263 __d_instantiate(entry, inode);
1264 @@ -1433,7 +1440,7 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
1265 return NULL;
1266 }
1267
1268 - hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1269 + hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1270 /*
1271 * Don't need alias->d_lock here, because aliases with
1272 * d_parent == entry->d_parent are not subject to name or
1273 @@ -1459,7 +1466,7 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1274 {
1275 struct dentry *result;
1276
1277 - BUG_ON(!hlist_unhashed(&entry->d_alias));
1278 + BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1279
1280 if (inode)
1281 spin_lock(&inode->i_lock);
1282 @@ -1502,7 +1509,7 @@ static struct dentry * __d_find_any_alias(struct inode *inode)
1283
1284 if (hlist_empty(&inode->i_dentry))
1285 return NULL;
1286 - alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1287 + alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1288 __dget(alias);
1289 return alias;
1290 }
1291 @@ -1576,7 +1583,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
1292 spin_lock(&tmp->d_lock);
1293 tmp->d_inode = inode;
1294 tmp->d_flags |= DCACHE_DISCONNECTED;
1295 - hlist_add_head(&tmp->d_alias, &inode->i_dentry);
1296 + hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1297 hlist_bl_lock(&tmp->d_sb->s_anon);
1298 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1299 hlist_bl_unlock(&tmp->d_sb->s_anon);
1300 @@ -2019,7 +2026,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
1301 struct dentry *child;
1302
1303 spin_lock(&dparent->d_lock);
1304 - list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
1305 + list_for_each_entry(child, &dparent->d_subdirs, d_child) {
1306 if (dentry == child) {
1307 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1308 __dget_dlock(dentry);
1309 @@ -2266,8 +2273,8 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
1310 /* Unhash the target: dput() will then get rid of it */
1311 __d_drop(target);
1312
1313 - list_del(&dentry->d_u.d_child);
1314 - list_del(&target->d_u.d_child);
1315 + list_del(&dentry->d_child);
1316 + list_del(&target->d_child);
1317
1318 /* Switch the names.. */
1319 switch_names(dentry, target);
1320 @@ -2277,15 +2284,15 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
1321 if (IS_ROOT(dentry)) {
1322 dentry->d_parent = target->d_parent;
1323 target->d_parent = target;
1324 - INIT_LIST_HEAD(&target->d_u.d_child);
1325 + INIT_LIST_HEAD(&target->d_child);
1326 } else {
1327 swap(dentry->d_parent, target->d_parent);
1328
1329 /* And add them back to the (new) parent lists */
1330 - list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
1331 + list_add(&target->d_child, &target->d_parent->d_subdirs);
1332 }
1333
1334 - list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
1335 + list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
1336
1337 write_seqcount_end(&target->d_seq);
1338 write_seqcount_end(&dentry->d_seq);
1339 @@ -2392,9 +2399,9 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
1340 swap(dentry->d_name.hash, anon->d_name.hash);
1341
1342 dentry->d_parent = dentry;
1343 - list_del_init(&dentry->d_u.d_child);
1344 + list_del_init(&dentry->d_child);
1345 anon->d_parent = dparent;
1346 - list_move(&anon->d_u.d_child, &dparent->d_subdirs);
1347 + list_move(&anon->d_child, &dparent->d_subdirs);
1348
1349 write_seqcount_end(&dentry->d_seq);
1350 write_seqcount_end(&anon->d_seq);
1351 @@ -2933,7 +2940,7 @@ repeat:
1352 resume:
1353 while (next != &this_parent->d_subdirs) {
1354 struct list_head *tmp = next;
1355 - struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1356 + struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1357 next = tmp->next;
1358
1359 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1360 @@ -2954,26 +2961,43 @@ resume:
1361 }
1362 spin_unlock(&dentry->d_lock);
1363 }
1364 + rcu_read_lock();
1365 +ascend:
1366 if (this_parent != root) {
1367 struct dentry *child = this_parent;
1368 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
1369 this_parent->d_flags |= DCACHE_GENOCIDE;
1370 this_parent->d_count--;
1371 }
1372 - this_parent = try_to_ascend(this_parent, locked, seq);
1373 - if (!this_parent)
1374 + this_parent = child->d_parent;
1375 +
1376 + spin_unlock(&child->d_lock);
1377 + spin_lock(&this_parent->d_lock);
1378 +
1379 + /* might go back up the wrong parent if we have had a rename. */
1380 + if (!locked && read_seqretry(&rename_lock, seq))
1381 goto rename_retry;
1382 - next = child->d_u.d_child.next;
1383 + next = child->d_child.next;
1384 + while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
1385 + if (next == &this_parent->d_subdirs)
1386 + goto ascend;
1387 + child = list_entry(next, struct dentry, d_child);
1388 + next = next->next;
1389 + }
1390 + rcu_read_unlock();
1391 goto resume;
1392 }
1393 - spin_unlock(&this_parent->d_lock);
1394 if (!locked && read_seqretry(&rename_lock, seq))
1395 goto rename_retry;
1396 + spin_unlock(&this_parent->d_lock);
1397 + rcu_read_unlock();
1398 if (locked)
1399 write_sequnlock(&rename_lock);
1400 return;
1401
1402 rename_retry:
1403 + spin_unlock(&this_parent->d_lock);
1404 + rcu_read_unlock();
1405 if (locked)
1406 goto again;
1407 locked = 1;
1408 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
1409 index 7269ec329c01..26d7fff8d78e 100644
1410 --- a/fs/debugfs/inode.c
1411 +++ b/fs/debugfs/inode.c
1412 @@ -545,7 +545,7 @@ void debugfs_remove_recursive(struct dentry *dentry)
1413 parent = dentry;
1414 down:
1415 mutex_lock(&parent->d_inode->i_mutex);
1416 - list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
1417 + list_for_each_entry_safe(child, next, &parent->d_subdirs, d_child) {
1418 if (!debugfs_positive(child))
1419 continue;
1420
1421 @@ -566,8 +566,8 @@ void debugfs_remove_recursive(struct dentry *dentry)
1422 mutex_lock(&parent->d_inode->i_mutex);
1423
1424 if (child != dentry) {
1425 - next = list_entry(child->d_u.d_child.next, struct dentry,
1426 - d_u.d_child);
1427 + next = list_entry(child->d_child.next, struct dentry,
1428 + d_child);
1429 goto up;
1430 }
1431
1432 diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
1433 index 262fc9940982..b4eec4c9a790 100644
1434 --- a/fs/exportfs/expfs.c
1435 +++ b/fs/exportfs/expfs.c
1436 @@ -50,7 +50,7 @@ find_acceptable_alias(struct dentry *result,
1437
1438 inode = result->d_inode;
1439 spin_lock(&inode->i_lock);
1440 - hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
1441 + hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1442 dget(dentry);
1443 spin_unlock(&inode->i_lock);
1444 if (toput)
1445 diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
1446 index c450fdb3d78d..5d876b1c9ea4 100644
1447 --- a/fs/jfs/jfs_dtree.c
1448 +++ b/fs/jfs/jfs_dtree.c
1449 @@ -3103,7 +3103,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
1450 * self "."
1451 */
1452 filp->f_pos = 1;
1453 - if (filldir(dirent, ".", 1, 0, ip->i_ino,
1454 + if (filldir(dirent, ".", 1, 1, ip->i_ino,
1455 DT_DIR))
1456 return 0;
1457 }
1458 @@ -3111,7 +3111,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
1459 * parent ".."
1460 */
1461 filp->f_pos = 2;
1462 - if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR))
1463 + if (filldir(dirent, "..", 2, 2, PARENT(ip), DT_DIR))
1464 return 0;
1465
1466 /*
1467 diff --git a/fs/libfs.c b/fs/libfs.c
1468 index 916da8c4158b..1299bd5e07b7 100644
1469 --- a/fs/libfs.c
1470 +++ b/fs/libfs.c
1471 @@ -104,18 +104,18 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
1472
1473 spin_lock(&dentry->d_lock);
1474 /* d_lock not required for cursor */
1475 - list_del(&cursor->d_u.d_child);
1476 + list_del(&cursor->d_child);
1477 p = dentry->d_subdirs.next;
1478 while (n && p != &dentry->d_subdirs) {
1479 struct dentry *next;
1480 - next = list_entry(p, struct dentry, d_u.d_child);
1481 + next = list_entry(p, struct dentry, d_child);
1482 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
1483 if (simple_positive(next))
1484 n--;
1485 spin_unlock(&next->d_lock);
1486 p = p->next;
1487 }
1488 - list_add_tail(&cursor->d_u.d_child, p);
1489 + list_add_tail(&cursor->d_child, p);
1490 spin_unlock(&dentry->d_lock);
1491 }
1492 }
1493 @@ -139,7 +139,7 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
1494 {
1495 struct dentry *dentry = filp->f_path.dentry;
1496 struct dentry *cursor = filp->private_data;
1497 - struct list_head *p, *q = &cursor->d_u.d_child;
1498 + struct list_head *p, *q = &cursor->d_child;
1499 ino_t ino;
1500 int i = filp->f_pos;
1501
1502 @@ -165,7 +165,7 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
1503
1504 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
1505 struct dentry *next;
1506 - next = list_entry(p, struct dentry, d_u.d_child);
1507 + next = list_entry(p, struct dentry, d_child);
1508 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
1509 if (!simple_positive(next)) {
1510 spin_unlock(&next->d_lock);
1511 @@ -289,7 +289,7 @@ int simple_empty(struct dentry *dentry)
1512 int ret = 0;
1513
1514 spin_lock(&dentry->d_lock);
1515 - list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
1516 + list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1517 spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
1518 if (simple_positive(child)) {
1519 spin_unlock(&child->d_lock);
1520 diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
1521 index 6792ce11f2bf..c578ba9949e6 100644
1522 --- a/fs/ncpfs/dir.c
1523 +++ b/fs/ncpfs/dir.c
1524 @@ -391,7 +391,7 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
1525 spin_lock(&parent->d_lock);
1526 next = parent->d_subdirs.next;
1527 while (next != &parent->d_subdirs) {
1528 - dent = list_entry(next, struct dentry, d_u.d_child);
1529 + dent = list_entry(next, struct dentry, d_child);
1530 if ((unsigned long)dent->d_fsdata == fpos) {
1531 if (dent->d_inode)
1532 dget(dent);
1533 diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
1534 index 32c06587351a..6d5e7c56c79d 100644
1535 --- a/fs/ncpfs/ncplib_kernel.h
1536 +++ b/fs/ncpfs/ncplib_kernel.h
1537 @@ -194,7 +194,7 @@ ncp_renew_dentries(struct dentry *parent)
1538 spin_lock(&parent->d_lock);
1539 next = parent->d_subdirs.next;
1540 while (next != &parent->d_subdirs) {
1541 - dentry = list_entry(next, struct dentry, d_u.d_child);
1542 + dentry = list_entry(next, struct dentry, d_child);
1543
1544 if (dentry->d_fsdata == NULL)
1545 ncp_age_dentry(server, dentry);
1546 @@ -216,7 +216,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent)
1547 spin_lock(&parent->d_lock);
1548 next = parent->d_subdirs.next;
1549 while (next != &parent->d_subdirs) {
1550 - dentry = list_entry(next, struct dentry, d_u.d_child);
1551 + dentry = list_entry(next, struct dentry, d_child);
1552 dentry->d_fsdata = NULL;
1553 ncp_age_dentry(server, dentry);
1554 next = next->next;
1555 diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
1556 index 44efaa8c5f78..0fe3ced6438c 100644
1557 --- a/fs/nfs/getroot.c
1558 +++ b/fs/nfs/getroot.c
1559 @@ -58,7 +58,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
1560 */
1561 spin_lock(&sb->s_root->d_inode->i_lock);
1562 spin_lock(&sb->s_root->d_lock);
1563 - hlist_del_init(&sb->s_root->d_alias);
1564 + hlist_del_init(&sb->s_root->d_u.d_alias);
1565 spin_unlock(&sb->s_root->d_lock);
1566 spin_unlock(&sb->s_root->d_inode->i_lock);
1567 }
1568 diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
1569 index 4bb21d67d9b1..a3153e2d0f1f 100644
1570 --- a/fs/notify/fsnotify.c
1571 +++ b/fs/notify/fsnotify.c
1572 @@ -63,14 +63,14 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
1573 spin_lock(&inode->i_lock);
1574 /* run all of the dentries associated with this inode. Since this is a
1575 * directory, there damn well better only be one item on this list */
1576 - hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1577 + hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1578 struct dentry *child;
1579
1580 /* run all of the children of the original inode and fix their
1581 * d_flags to indicate parental interest (their parent is the
1582 * original inode) */
1583 spin_lock(&alias->d_lock);
1584 - list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
1585 + list_for_each_entry(child, &alias->d_subdirs, d_child) {
1586 if (!child->d_inode)
1587 continue;
1588
1589 diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
1590 index ef999729e274..ce37013b4a59 100644
1591 --- a/fs/ocfs2/dcache.c
1592 +++ b/fs/ocfs2/dcache.c
1593 @@ -172,7 +172,7 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
1594 struct dentry *dentry;
1595
1596 spin_lock(&inode->i_lock);
1597 - hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
1598 + hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1599 spin_lock(&dentry->d_lock);
1600 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
1601 trace_ocfs2_find_local_alias(dentry->d_name.len,
1602 diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
1603 index 8cd6474e248f..d0e8c0b1767f 100644
1604 --- a/fs/ocfs2/file.c
1605 +++ b/fs/ocfs2/file.c
1606 @@ -2459,12 +2459,14 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1607 struct address_space *mapping = out->f_mapping;
1608 struct inode *inode = mapping->host;
1609 struct splice_desc sd = {
1610 - .total_len = len,
1611 .flags = flags,
1612 - .pos = *ppos,
1613 .u.file = out,
1614 };
1615 -
1616 + ret = generic_write_checks(out, ppos, &len, 0);
1617 + if(ret)
1618 + return ret;
1619 + sd.total_len = len;
1620 + sd.pos = *ppos;
1621
1622 trace_ocfs2_file_splice_write(inode, out, out->f_path.dentry,
1623 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1624 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
1625 index 157e474ab303..635a1425d370 100644
1626 --- a/fs/reiserfs/reiserfs.h
1627 +++ b/fs/reiserfs/reiserfs.h
1628 @@ -1954,8 +1954,6 @@ struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,}
1629 #define MAX_US_INT 0xffff
1630
1631 // reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset
1632 -#define U32_MAX (~(__u32)0)
1633 -
1634 static inline loff_t max_reiserfs_offset(struct inode *inode)
1635 {
1636 if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5)
1637 diff --git a/fs/splice.c b/fs/splice.c
1638 index 4b5a5fac3383..f183f1342c01 100644
1639 --- a/fs/splice.c
1640 +++ b/fs/splice.c
1641 @@ -1012,13 +1012,17 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
1642 struct address_space *mapping = out->f_mapping;
1643 struct inode *inode = mapping->host;
1644 struct splice_desc sd = {
1645 - .total_len = len,
1646 .flags = flags,
1647 - .pos = *ppos,
1648 .u.file = out,
1649 };
1650 ssize_t ret;
1651
1652 + ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode));
1653 + if (ret)
1654 + return ret;
1655 + sd.total_len = len;
1656 + sd.pos = *ppos;
1657 +
1658 pipe_lock(pipe);
1659
1660 splice_from_pipe_begin(&sd);
1661 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
1662 index 17bccd3a4b03..dd6d9b89d338 100644
1663 --- a/include/asm-generic/pgtable.h
1664 +++ b/include/asm-generic/pgtable.h
1665 @@ -550,11 +550,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
1666 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1667 barrier();
1668 #endif
1669 - if (pmd_none(pmdval))
1670 + if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
1671 return 1;
1672 if (unlikely(pmd_bad(pmdval))) {
1673 - if (!pmd_trans_huge(pmdval))
1674 - pmd_clear_bad(pmd);
1675 + pmd_clear_bad(pmd);
1676 return 1;
1677 }
1678 return 0;
1679 diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
1680 index 0442c3d800f0..a6ef9cc267ec 100644
1681 --- a/include/linux/ceph/decode.h
1682 +++ b/include/linux/ceph/decode.h
1683 @@ -8,23 +8,6 @@
1684
1685 #include <linux/ceph/types.h>
1686
1687 -/* This seemed to be the easiest place to define these */
1688 -
1689 -#define U8_MAX ((u8)(~0U))
1690 -#define U16_MAX ((u16)(~0U))
1691 -#define U32_MAX ((u32)(~0U))
1692 -#define U64_MAX ((u64)(~0ULL))
1693 -
1694 -#define S8_MAX ((s8)(U8_MAX >> 1))
1695 -#define S16_MAX ((s16)(U16_MAX >> 1))
1696 -#define S32_MAX ((s32)(U32_MAX >> 1))
1697 -#define S64_MAX ((s64)(U64_MAX >> 1LL))
1698 -
1699 -#define S8_MIN ((s8)(-S8_MAX - 1))
1700 -#define S16_MIN ((s16)(-S16_MAX - 1))
1701 -#define S32_MIN ((s32)(-S32_MAX - 1))
1702 -#define S64_MIN ((s64)(-S64_MAX - 1LL))
1703 -
1704 /*
1705 * in all cases,
1706 * void **p pointer to position pointer
1707 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
1708 index 9be5ac960fd8..c1999d1fe6f8 100644
1709 --- a/include/linux/dcache.h
1710 +++ b/include/linux/dcache.h
1711 @@ -120,15 +120,15 @@ struct dentry {
1712 void *d_fsdata; /* fs-specific data */
1713
1714 struct list_head d_lru; /* LRU list */
1715 + struct list_head d_child; /* child of parent list */
1716 + struct list_head d_subdirs; /* our children */
1717 /*
1718 - * d_child and d_rcu can share memory
1719 + * d_alias and d_rcu can share memory
1720 */
1721 union {
1722 - struct list_head d_child; /* child of parent list */
1723 + struct hlist_node d_alias; /* inode alias list */
1724 struct rcu_head d_rcu;
1725 } d_u;
1726 - struct list_head d_subdirs; /* our children */
1727 - struct hlist_node d_alias; /* inode alias list */
1728 };
1729
1730 /*
1731 diff --git a/include/linux/mm.h b/include/linux/mm.h
1732 index c4085192c2b6..53b0d70120a1 100644
1733 --- a/include/linux/mm.h
1734 +++ b/include/linux/mm.h
1735 @@ -891,6 +891,7 @@ static inline int page_mapped(struct page *page)
1736 #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
1737 #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
1738 #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
1739 +#define VM_FAULT_SIGSEGV 0x0040
1740
1741 #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
1742 #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
1743 @@ -898,8 +899,8 @@ static inline int page_mapped(struct page *page)
1744
1745 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
1746
1747 -#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
1748 - VM_FAULT_HWPOISON_LARGE)
1749 +#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1750 + VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)
1751
1752 /* Encode hstate index for a hwpoisoned large page */
1753 #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1754 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1755 index d0def7fc2848..ef130605ac43 100644
1756 --- a/kernel/cgroup.c
1757 +++ b/kernel/cgroup.c
1758 @@ -984,7 +984,7 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
1759 parent = dentry->d_parent;
1760 spin_lock(&parent->d_lock);
1761 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1762 - list_del_init(&dentry->d_u.d_child);
1763 + list_del_init(&dentry->d_child);
1764 spin_unlock(&dentry->d_lock);
1765 spin_unlock(&parent->d_lock);
1766 remove_dir(dentry);
1767 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1768 index 8d7e8098e768..640e4c44b170 100644
1769 --- a/kernel/trace/trace.c
1770 +++ b/kernel/trace/trace.c
1771 @@ -6063,7 +6063,7 @@ static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t m
1772 int ret;
1773
1774 /* Paranoid: Make sure the parent is the "instances" directory */
1775 - parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1776 + parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1777 if (WARN_ON_ONCE(parent != trace_instance_dir))
1778 return -ENOENT;
1779
1780 @@ -6090,7 +6090,7 @@ static int instance_rmdir(struct inode *inode, struct dentry *dentry)
1781 int ret;
1782
1783 /* Paranoid: Make sure the parent is the "instances" directory */
1784 - parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1785 + parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1786 if (WARN_ON_ONCE(parent != trace_instance_dir))
1787 return -ENOENT;
1788
1789 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
1790 index 001b349af939..5a898f15bfc6 100644
1791 --- a/kernel/trace/trace_events.c
1792 +++ b/kernel/trace/trace_events.c
1793 @@ -425,7 +425,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
1794
1795 if (dir) {
1796 spin_lock(&dir->d_lock); /* probably unneeded */
1797 - list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
1798 + list_for_each_entry(child, &dir->d_subdirs, d_child) {
1799 if (child->d_inode) /* probably unneeded */
1800 child->d_inode->i_private = NULL;
1801 }
1802 diff --git a/mm/ksm.c b/mm/ksm.c
1803 index 784d1e4bc385..7bf748f30aab 100644
1804 --- a/mm/ksm.c
1805 +++ b/mm/ksm.c
1806 @@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
1807 else
1808 ret = VM_FAULT_WRITE;
1809 put_page(page);
1810 - } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
1811 + } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
1812 /*
1813 * We must loop because handle_mm_fault() may back out if there's
1814 * any difficulty e.g. if pte accessed bit gets updated concurrently.
1815 diff --git a/mm/memory.c b/mm/memory.c
1816 index 04232bb173f0..e6b1da3a8924 100644
1817 --- a/mm/memory.c
1818 +++ b/mm/memory.c
1819 @@ -1844,7 +1844,8 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1820 else
1821 return -EFAULT;
1822 }
1823 - if (ret & VM_FAULT_SIGBUS)
1824 + if (ret & (VM_FAULT_SIGBUS |
1825 + VM_FAULT_SIGSEGV))
1826 return i ? i : -EFAULT;
1827 BUG();
1828 }
1829 @@ -1954,7 +1955,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1830 return -ENOMEM;
1831 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
1832 return -EHWPOISON;
1833 - if (ret & VM_FAULT_SIGBUS)
1834 + if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
1835 return -EFAULT;
1836 BUG();
1837 }
1838 @@ -3231,7 +3232,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1839
1840 /* Check if we need to add a guard page to the stack */
1841 if (check_stack_guard_page(vma, address) < 0)
1842 - return VM_FAULT_SIGBUS;
1843 + return VM_FAULT_SIGSEGV;
1844
1845 /* Use the zero-page for reads */
1846 if (!(flags & FAULT_FLAG_WRITE)) {
1847 diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
1848 index 834857f3c871..86183c4e4fd5 100644
1849 --- a/net/ipv4/tcp_illinois.c
1850 +++ b/net/ipv4/tcp_illinois.c
1851 @@ -23,7 +23,6 @@
1852 #define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */
1853 #define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */
1854 #define ALPHA_BASE ALPHA_SCALE /* 1.0 */
1855 -#define U32_MAX ((u32)~0U)
1856 #define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */
1857
1858 #define BETA_SHIFT 6
1859 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1860 index ea7f52f3062d..a8be45e4d34f 100644
1861 --- a/net/ipv4/tcp_input.c
1862 +++ b/net/ipv4/tcp_input.c
1863 @@ -3076,10 +3076,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
1864 if (seq_rtt < 0) {
1865 seq_rtt = ca_seq_rtt;
1866 }
1867 - if (!(sacked & TCPCB_SACKED_ACKED))
1868 + if (!(sacked & TCPCB_SACKED_ACKED)) {
1869 reord = min(pkts_acked, reord);
1870 - if (!after(scb->end_seq, tp->high_seq))
1871 - flag |= FLAG_ORIG_SACK_ACKED;
1872 + if (!after(scb->end_seq, tp->high_seq))
1873 + flag |= FLAG_ORIG_SACK_ACKED;
1874 + }
1875 }
1876
1877 if (sacked & TCPCB_SACKED_ACKED)
1878 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1879 index cce35e5a7ee6..7c3eec386a4b 100644
1880 --- a/net/ipv4/tcp_ipv4.c
1881 +++ b/net/ipv4/tcp_ipv4.c
1882 @@ -1901,7 +1901,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
1883 skb->sk = sk;
1884 skb->destructor = sock_edemux;
1885 if (sk->sk_state != TCP_TIME_WAIT) {
1886 - struct dst_entry *dst = sk->sk_rx_dst;
1887 + struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
1888
1889 if (dst)
1890 dst = dst_check(dst, 0);
1891 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1892 index 92b5e1f7d3b0..7681a1bbd97f 100644
1893 --- a/net/ipv4/tcp_output.c
1894 +++ b/net/ipv4/tcp_output.c
1895 @@ -2772,6 +2772,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
1896 }
1897 #endif
1898
1899 + /* Do not fool tcpdump (if any), clean our debris */
1900 + skb->tstamp.tv64 = 0;
1901 return skb;
1902 }
1903 EXPORT_SYMBOL(tcp_make_synack);
1904 diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
1905 index 060a0449acaa..05f361338c2e 100644
1906 --- a/net/ipv6/ndisc.c
1907 +++ b/net/ipv6/ndisc.c
1908 @@ -1193,7 +1193,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1909 if (rt)
1910 rt6_set_expires(rt, jiffies + (HZ * lifetime));
1911 if (ra_msg->icmph.icmp6_hop_limit) {
1912 - in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
1913 + /* Only set hop_limit on the interface if it is higher than
1914 + * the current hop_limit.
1915 + */
1916 + if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
1917 + in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
1918 + } else {
1919 + ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
1920 + }
1921 if (rt)
1922 dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
1923 ra_msg->icmph.icmp6_hop_limit);
1924 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1925 index 1a87659a6139..4659b8ab55d9 100644
1926 --- a/net/ipv6/tcp_ipv6.c
1927 +++ b/net/ipv6/tcp_ipv6.c
1928 @@ -1616,7 +1616,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
1929 skb->sk = sk;
1930 skb->destructor = sock_edemux;
1931 if (sk->sk_state != TCP_TIME_WAIT) {
1932 - struct dst_entry *dst = sk->sk_rx_dst;
1933 + struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
1934
1935 if (dst)
1936 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1937 diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
1938 index d25f29377648..957c1db66652 100644
1939 --- a/net/netfilter/nf_conntrack_proto_generic.c
1940 +++ b/net/netfilter/nf_conntrack_proto_generic.c
1941 @@ -14,6 +14,30 @@
1942
1943 static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
1944
1945 +static bool nf_generic_should_process(u8 proto)
1946 +{
1947 + switch (proto) {
1948 +#ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE
1949 + case IPPROTO_SCTP:
1950 + return false;
1951 +#endif
1952 +#ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE
1953 + case IPPROTO_DCCP:
1954 + return false;
1955 +#endif
1956 +#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE
1957 + case IPPROTO_GRE:
1958 + return false;
1959 +#endif
1960 +#ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE
1961 + case IPPROTO_UDPLITE:
1962 + return false;
1963 +#endif
1964 + default:
1965 + return true;
1966 + }
1967 +}
1968 +
1969 static inline struct nf_generic_net *generic_pernet(struct net *net)
1970 {
1971 return &net->ct.nf_ct_proto.generic;
1972 @@ -67,7 +91,7 @@ static int generic_packet(struct nf_conn *ct,
1973 static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
1974 unsigned int dataoff, unsigned int *timeouts)
1975 {
1976 - return true;
1977 + return nf_generic_should_process(nf_ct_protonum(ct));
1978 }
1979
1980 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1981 diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
1982 index fd3f0180e08f..6af1c42a9cf3 100644
1983 --- a/scripts/kconfig/menu.c
1984 +++ b/scripts/kconfig/menu.c
1985 @@ -525,7 +525,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
1986 {
1987 int i, j;
1988 struct menu *submenu[8], *menu, *location = NULL;
1989 - struct jump_key *jump;
1990 + struct jump_key *jump = NULL;
1991
1992 str_printf(r, _("Prompt: %s\n"), _(prop->text));
1993 menu = prop->menu->parent;
1994 @@ -563,7 +563,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
1995 str_printf(r, _(" Location:\n"));
1996 for (j = 4; --i >= 0; j += 2) {
1997 menu = submenu[i];
1998 - if (head && location && menu == location)
1999 + if (jump && menu == location)
2000 jump->offset = r->len - 1;
2001 str_printf(r, "%*c-> %s", j, ' ',
2002 _(menu_get_prompt(menu)));
2003 diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
2004 index 464be51025f6..a96bed4db3e8 100644
2005 --- a/security/selinux/selinuxfs.c
2006 +++ b/security/selinux/selinuxfs.c
2007 @@ -1190,7 +1190,7 @@ static void sel_remove_entries(struct dentry *de)
2008 spin_lock(&de->d_lock);
2009 node = de->d_subdirs.next;
2010 while (node != &de->d_subdirs) {
2011 - struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
2012 + struct dentry *d = list_entry(node, struct dentry, d_child);
2013
2014 spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
2015 list_del_init(node);
2016 @@ -1664,12 +1664,12 @@ static void sel_remove_classes(void)
2017
2018 list_for_each(class_node, &class_dir->d_subdirs) {
2019 struct dentry *class_subdir = list_entry(class_node,
2020 - struct dentry, d_u.d_child);
2021 + struct dentry, d_child);
2022 struct list_head *class_subdir_node;
2023
2024 list_for_each(class_subdir_node, &class_subdir->d_subdirs) {
2025 struct dentry *d = list_entry(class_subdir_node,
2026 - struct dentry, d_u.d_child);
2027 + struct dentry, d_child);
2028
2029 if (d->d_inode)
2030 if (d->d_inode->i_mode & S_IFDIR)