Contents of /trunk/kernel-lts/patches-3.10/0127-3.10.28-all-fixes.patch
Parent Directory | Revision Log
Revision 2396 -
(show annotations)
(download)
Mon Feb 3 12:42:26 2014 UTC (10 years, 7 months ago) by niro
File size: 24151 byte(s)
Mon Feb 3 12:42:26 2014 UTC (10 years, 7 months ago) by niro
File size: 24151 byte(s)
-linux-3.10.28
1 | diff --git a/Makefile b/Makefile |
2 | index 09675a57059c..addf1b007fe3 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 3 |
7 | PATCHLEVEL = 10 |
8 | -SUBLEVEL = 27 |
9 | +SUBLEVEL = 28 |
10 | EXTRAVERSION = |
11 | NAME = TOSSUG Baby Fish |
12 | |
13 | diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c |
14 | index 4fb074c446bf..163b160c69e9 100644 |
15 | --- a/arch/arm/kernel/machine_kexec.c |
16 | +++ b/arch/arm/kernel/machine_kexec.c |
17 | @@ -73,6 +73,7 @@ void machine_crash_nonpanic_core(void *unused) |
18 | crash_save_cpu(®s, smp_processor_id()); |
19 | flush_cache_all(); |
20 | |
21 | + set_cpu_online(smp_processor_id(), false); |
22 | atomic_dec(&waiting_for_crash_ipi); |
23 | while (1) |
24 | cpu_relax(); |
25 | diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c |
26 | index e7df2dd43a40..5ed19e88874b 100644 |
27 | --- a/arch/arm/mach-highbank/highbank.c |
28 | +++ b/arch/arm/mach-highbank/highbank.c |
29 | @@ -68,6 +68,7 @@ void highbank_set_cpu_jump(int cpu, void *jump_addr) |
30 | #ifdef CONFIG_CACHE_L2X0 |
31 | static void highbank_l2x0_disable(void) |
32 | { |
33 | + outer_flush_all(); |
34 | /* Disable PL310 L2 Cache controller */ |
35 | highbank_smc1(0x102, 0x0); |
36 | } |
37 | diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c |
38 | index 13b27ffaf45e..ab99ab8fce8a 100644 |
39 | --- a/arch/arm/mach-omap2/omap4-common.c |
40 | +++ b/arch/arm/mach-omap2/omap4-common.c |
41 | @@ -162,6 +162,7 @@ void __iomem *omap4_get_l2cache_base(void) |
42 | |
43 | static void omap4_l2x0_disable(void) |
44 | { |
45 | + outer_flush_all(); |
46 | /* Disable PL310 L2 Cache controller */ |
47 | omap_smc1(0x102, 0x0); |
48 | } |
49 | diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c |
50 | index 5f0581e713c2..b46601ada813 100644 |
51 | --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c |
52 | +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c |
53 | @@ -10,6 +10,7 @@ |
54 | #include <linux/module.h> |
55 | #include <linux/pci.h> |
56 | #include <linux/ptrace.h> |
57 | +#include <linux/syscore_ops.h> |
58 | |
59 | #include <asm/apic.h> |
60 | |
61 | @@ -816,6 +817,18 @@ out: |
62 | return ret; |
63 | } |
64 | |
65 | +static void ibs_eilvt_setup(void) |
66 | +{ |
67 | + /* |
68 | + * Force LVT offset assignment for family 10h: The offsets are |
69 | + * not assigned by the BIOS for this family, so the OS is |
70 | + * responsible for doing it. If the OS assignment fails, fall |
71 | + * back to BIOS settings and try to setup this. |
72 | + */ |
73 | + if (boot_cpu_data.x86 == 0x10) |
74 | + force_ibs_eilvt_setup(); |
75 | +} |
76 | + |
77 | static inline int get_ibs_lvt_offset(void) |
78 | { |
79 | u64 val; |
80 | @@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy) |
81 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); |
82 | } |
83 | |
84 | +#ifdef CONFIG_PM |
85 | + |
86 | +static int perf_ibs_suspend(void) |
87 | +{ |
88 | + clear_APIC_ibs(NULL); |
89 | + return 0; |
90 | +} |
91 | + |
92 | +static void perf_ibs_resume(void) |
93 | +{ |
94 | + ibs_eilvt_setup(); |
95 | + setup_APIC_ibs(NULL); |
96 | +} |
97 | + |
98 | +static struct syscore_ops perf_ibs_syscore_ops = { |
99 | + .resume = perf_ibs_resume, |
100 | + .suspend = perf_ibs_suspend, |
101 | +}; |
102 | + |
103 | +static void perf_ibs_pm_init(void) |
104 | +{ |
105 | + register_syscore_ops(&perf_ibs_syscore_ops); |
106 | +} |
107 | + |
108 | +#else |
109 | + |
110 | +static inline void perf_ibs_pm_init(void) { } |
111 | + |
112 | +#endif |
113 | + |
114 | static int __cpuinit |
115 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
116 | { |
117 | @@ -877,18 +920,12 @@ static __init int amd_ibs_init(void) |
118 | if (!caps) |
119 | return -ENODEV; /* ibs not supported by the cpu */ |
120 | |
121 | - /* |
122 | - * Force LVT offset assignment for family 10h: The offsets are |
123 | - * not assigned by the BIOS for this family, so the OS is |
124 | - * responsible for doing it. If the OS assignment fails, fall |
125 | - * back to BIOS settings and try to setup this. |
126 | - */ |
127 | - if (boot_cpu_data.x86 == 0x10) |
128 | - force_ibs_eilvt_setup(); |
129 | + ibs_eilvt_setup(); |
130 | |
131 | if (!ibs_eilvt_valid()) |
132 | goto out; |
133 | |
134 | + perf_ibs_pm_init(); |
135 | get_online_cpus(); |
136 | ibs_caps = caps; |
137 | /* make ibs_caps visible to other cpus: */ |
138 | diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S |
139 | index 8f3e2dec1df3..94e52cf064b0 100644 |
140 | --- a/arch/x86/kernel/entry_32.S |
141 | +++ b/arch/x86/kernel/entry_32.S |
142 | @@ -1075,7 +1075,7 @@ ENTRY(ftrace_caller) |
143 | pushl $0 /* Pass NULL as regs pointer */ |
144 | movl 4*4(%esp), %eax |
145 | movl 0x4(%ebp), %edx |
146 | - leal function_trace_op, %ecx |
147 | + movl function_trace_op, %ecx |
148 | subl $MCOUNT_INSN_SIZE, %eax |
149 | |
150 | .globl ftrace_call |
151 | @@ -1133,7 +1133,7 @@ ENTRY(ftrace_regs_caller) |
152 | movl 12*4(%esp), %eax /* Load ip (1st parameter) */ |
153 | subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ |
154 | movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ |
155 | - leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ |
156 | + movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ |
157 | pushl %esp /* Save pt_regs as 4th parameter */ |
158 | |
159 | GLOBAL(ftrace_regs_call) |
160 | diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S |
161 | index 727208941030..7ac938a4bfab 100644 |
162 | --- a/arch/x86/kernel/entry_64.S |
163 | +++ b/arch/x86/kernel/entry_64.S |
164 | @@ -88,7 +88,7 @@ END(function_hook) |
165 | MCOUNT_SAVE_FRAME \skip |
166 | |
167 | /* Load the ftrace_ops into the 3rd parameter */ |
168 | - leaq function_trace_op, %rdx |
169 | + movq function_trace_op(%rip), %rdx |
170 | |
171 | /* Load ip into the first parameter */ |
172 | movq RIP(%rsp), %rdi |
173 | diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c |
174 | index 16e674af4d57..7ce38344e484 100644 |
175 | --- a/drivers/gpu/drm/i915/intel_ddi.c |
176 | +++ b/drivers/gpu/drm/i915/intel_ddi.c |
177 | @@ -1193,12 +1193,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev) |
178 | enum pipe pipe; |
179 | struct intel_crtc *intel_crtc; |
180 | |
181 | + dev_priv->ddi_plls.spll_refcount = 0; |
182 | + dev_priv->ddi_plls.wrpll1_refcount = 0; |
183 | + dev_priv->ddi_plls.wrpll2_refcount = 0; |
184 | + |
185 | for_each_pipe(pipe) { |
186 | intel_crtc = |
187 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
188 | |
189 | - if (!intel_crtc->active) |
190 | + if (!intel_crtc->active) { |
191 | + intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; |
192 | continue; |
193 | + } |
194 | |
195 | intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, |
196 | pipe); |
197 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
198 | index 15358add4f70..54ae96f7bec6 100644 |
199 | --- a/drivers/gpu/drm/i915/intel_display.c |
200 | +++ b/drivers/gpu/drm/i915/intel_display.c |
201 | @@ -9456,9 +9456,9 @@ void intel_modeset_gem_init(struct drm_device *dev) |
202 | |
203 | intel_setup_overlay(dev); |
204 | |
205 | - drm_modeset_lock_all(dev); |
206 | + mutex_lock(&dev->mode_config.mutex); |
207 | intel_modeset_setup_hw_state(dev, false); |
208 | - drm_modeset_unlock_all(dev); |
209 | + mutex_unlock(&dev->mode_config.mutex); |
210 | } |
211 | |
212 | void intel_modeset_cleanup(struct drm_device *dev) |
213 | diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c |
214 | index 658ce3a8717f..0cf25101b3c3 100644 |
215 | --- a/drivers/hwmon/coretemp.c |
216 | +++ b/drivers/hwmon/coretemp.c |
217 | @@ -52,7 +52,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); |
218 | |
219 | #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ |
220 | #define NUM_REAL_CORES 32 /* Number of Real cores per cpu */ |
221 | -#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ |
222 | +#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */ |
223 | #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ |
224 | #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) |
225 | #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) |
226 | diff --git a/drivers/md/md.c b/drivers/md/md.c |
227 | index 2290b95009de..a2dda416c9cb 100644 |
228 | --- a/drivers/md/md.c |
229 | +++ b/drivers/md/md.c |
230 | @@ -1118,6 +1118,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) |
231 | rdev->raid_disk = -1; |
232 | clear_bit(Faulty, &rdev->flags); |
233 | clear_bit(In_sync, &rdev->flags); |
234 | + clear_bit(Bitmap_sync, &rdev->flags); |
235 | clear_bit(WriteMostly, &rdev->flags); |
236 | |
237 | if (mddev->raid_disks == 0) { |
238 | @@ -1196,6 +1197,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) |
239 | */ |
240 | if (ev1 < mddev->bitmap->events_cleared) |
241 | return 0; |
242 | + if (ev1 < mddev->events) |
243 | + set_bit(Bitmap_sync, &rdev->flags); |
244 | } else { |
245 | if (ev1 < mddev->events) |
246 | /* just a hot-add of a new device, leave raid_disk at -1 */ |
247 | @@ -1604,6 +1607,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) |
248 | rdev->raid_disk = -1; |
249 | clear_bit(Faulty, &rdev->flags); |
250 | clear_bit(In_sync, &rdev->flags); |
251 | + clear_bit(Bitmap_sync, &rdev->flags); |
252 | clear_bit(WriteMostly, &rdev->flags); |
253 | |
254 | if (mddev->raid_disks == 0) { |
255 | @@ -1686,6 +1690,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) |
256 | */ |
257 | if (ev1 < mddev->bitmap->events_cleared) |
258 | return 0; |
259 | + if (ev1 < mddev->events) |
260 | + set_bit(Bitmap_sync, &rdev->flags); |
261 | } else { |
262 | if (ev1 < mddev->events) |
263 | /* just a hot-add of a new device, leave raid_disk at -1 */ |
264 | @@ -2829,6 +2835,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) |
265 | else |
266 | rdev->saved_raid_disk = -1; |
267 | clear_bit(In_sync, &rdev->flags); |
268 | + clear_bit(Bitmap_sync, &rdev->flags); |
269 | err = rdev->mddev->pers-> |
270 | hot_add_disk(rdev->mddev, rdev); |
271 | if (err) { |
272 | @@ -5761,6 +5768,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) |
273 | info->raid_disk < mddev->raid_disks) { |
274 | rdev->raid_disk = info->raid_disk; |
275 | set_bit(In_sync, &rdev->flags); |
276 | + clear_bit(Bitmap_sync, &rdev->flags); |
277 | } else |
278 | rdev->raid_disk = -1; |
279 | } else |
280 | @@ -7694,7 +7702,8 @@ static int remove_and_add_spares(struct mddev *mddev, |
281 | if (test_bit(Faulty, &rdev->flags)) |
282 | continue; |
283 | if (mddev->ro && |
284 | - rdev->saved_raid_disk < 0) |
285 | + ! (rdev->saved_raid_disk >= 0 && |
286 | + !test_bit(Bitmap_sync, &rdev->flags))) |
287 | continue; |
288 | |
289 | rdev->recovery_offset = 0; |
290 | @@ -7775,9 +7784,12 @@ void md_check_recovery(struct mddev *mddev) |
291 | * As we only add devices that are already in-sync, |
292 | * we can activate the spares immediately. |
293 | */ |
294 | - clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
295 | remove_and_add_spares(mddev, NULL); |
296 | - mddev->pers->spare_active(mddev); |
297 | + /* There is no thread, but we need to call |
298 | + * ->spare_active and clear saved_raid_disk |
299 | + */ |
300 | + md_reap_sync_thread(mddev); |
301 | + clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
302 | goto unlock; |
303 | } |
304 | |
305 | diff --git a/drivers/md/md.h b/drivers/md/md.h |
306 | index 653f992b687a..ebe748e57416 100644 |
307 | --- a/drivers/md/md.h |
308 | +++ b/drivers/md/md.h |
309 | @@ -129,6 +129,9 @@ struct md_rdev { |
310 | enum flag_bits { |
311 | Faulty, /* device is known to have a fault */ |
312 | In_sync, /* device is in_sync with rest of array */ |
313 | + Bitmap_sync, /* ..actually, not quite In_sync. Need a |
314 | + * bitmap-based recovery to get fully in sync |
315 | + */ |
316 | Unmerged, /* device is being added to array and should |
317 | * be considerred for bvec_merge_fn but not |
318 | * yet for actual IO |
319 | diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c |
320 | index 0add86821755..d2f8cd332b4a 100644 |
321 | --- a/drivers/md/raid10.c |
322 | +++ b/drivers/md/raid10.c |
323 | @@ -1321,7 +1321,7 @@ read_again: |
324 | /* Could not read all from this device, so we will |
325 | * need another r10_bio. |
326 | */ |
327 | - sectors_handled = (r10_bio->sectors + max_sectors |
328 | + sectors_handled = (r10_bio->sector + max_sectors |
329 | - bio->bi_sector); |
330 | r10_bio->sectors = max_sectors; |
331 | spin_lock_irq(&conf->device_lock); |
332 | @@ -1329,7 +1329,7 @@ read_again: |
333 | bio->bi_phys_segments = 2; |
334 | else |
335 | bio->bi_phys_segments++; |
336 | - spin_unlock(&conf->device_lock); |
337 | + spin_unlock_irq(&conf->device_lock); |
338 | /* Cannot call generic_make_request directly |
339 | * as that will be queued in __generic_make_request |
340 | * and subsequent mempool_alloc might block |
341 | @@ -3198,10 +3198,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, |
342 | if (j == conf->copies) { |
343 | /* Cannot recover, so abort the recovery or |
344 | * record a bad block */ |
345 | - put_buf(r10_bio); |
346 | - if (rb2) |
347 | - atomic_dec(&rb2->remaining); |
348 | - r10_bio = rb2; |
349 | if (any_working) { |
350 | /* problem is that there are bad blocks |
351 | * on other device(s) |
352 | @@ -3233,6 +3229,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, |
353 | mirror->recovery_disabled |
354 | = mddev->recovery_disabled; |
355 | } |
356 | + put_buf(r10_bio); |
357 | + if (rb2) |
358 | + atomic_dec(&rb2->remaining); |
359 | + r10_bio = rb2; |
360 | break; |
361 | } |
362 | } |
363 | diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c |
364 | index 4bed5454b8dc..51422999fd08 100644 |
365 | --- a/drivers/md/raid5.c |
366 | +++ b/drivers/md/raid5.c |
367 | @@ -3391,7 +3391,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) |
368 | */ |
369 | set_bit(R5_Insync, &dev->flags); |
370 | |
371 | - if (rdev && test_bit(R5_WriteError, &dev->flags)) { |
372 | + if (test_bit(R5_WriteError, &dev->flags)) { |
373 | /* This flag does not apply to '.replacement' |
374 | * only to .rdev, so make sure to check that*/ |
375 | struct md_rdev *rdev2 = rcu_dereference( |
376 | @@ -3404,7 +3404,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) |
377 | } else |
378 | clear_bit(R5_WriteError, &dev->flags); |
379 | } |
380 | - if (rdev && test_bit(R5_MadeGood, &dev->flags)) { |
381 | + if (test_bit(R5_MadeGood, &dev->flags)) { |
382 | /* This flag does not apply to '.replacement' |
383 | * only to .rdev, so make sure to check that*/ |
384 | struct md_rdev *rdev2 = rcu_dereference( |
385 | diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c |
386 | index 3d4878facc26..d919a9863e01 100644 |
387 | --- a/drivers/staging/comedi/drivers/addi_apci_1032.c |
388 | +++ b/drivers/staging/comedi/drivers/addi_apci_1032.c |
389 | @@ -332,8 +332,8 @@ static int apci1032_auto_attach(struct comedi_device *dev, |
390 | s = &dev->subdevices[1]; |
391 | if (dev->irq) { |
392 | dev->read_subdev = s; |
393 | - s->type = COMEDI_SUBD_DI | SDF_CMD_READ; |
394 | - s->subdev_flags = SDF_READABLE; |
395 | + s->type = COMEDI_SUBD_DI; |
396 | + s->subdev_flags = SDF_READABLE | SDF_CMD_READ; |
397 | s->n_chan = 1; |
398 | s->maxdata = 1; |
399 | s->range_table = &range_digital; |
400 | diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c |
401 | index 6247fdcedcbf..71043a1c8500 100644 |
402 | --- a/drivers/staging/comedi/drivers/adl_pci9111.c |
403 | +++ b/drivers/staging/comedi/drivers/adl_pci9111.c |
404 | @@ -873,7 +873,7 @@ static int pci9111_auto_attach(struct comedi_device *dev, |
405 | pci9111_reset(dev); |
406 | |
407 | if (pcidev->irq > 0) { |
408 | - ret = request_irq(dev->irq, pci9111_interrupt, |
409 | + ret = request_irq(pcidev->irq, pci9111_interrupt, |
410 | IRQF_SHARED, dev->board_name, dev); |
411 | if (ret) |
412 | return ret; |
413 | diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c |
414 | index e2774f9ecd59..7a55fe70434a 100644 |
415 | --- a/drivers/tty/serial/amba-pl011.c |
416 | +++ b/drivers/tty/serial/amba-pl011.c |
417 | @@ -1543,6 +1543,8 @@ static int pl011_startup(struct uart_port *port) |
418 | /* |
419 | * Provoke TX FIFO interrupt into asserting. |
420 | */ |
421 | + spin_lock_irq(&uap->port.lock); |
422 | + |
423 | cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE; |
424 | writew(cr, uap->port.membase + UART011_CR); |
425 | writew(0, uap->port.membase + UART011_FBRD); |
426 | @@ -1567,6 +1569,8 @@ static int pl011_startup(struct uart_port *port) |
427 | cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; |
428 | writew(cr, uap->port.membase + UART011_CR); |
429 | |
430 | + spin_unlock_irq(&uap->port.lock); |
431 | + |
432 | /* |
433 | * initialise the old status of the modem signals |
434 | */ |
435 | @@ -1636,11 +1640,13 @@ static void pl011_shutdown(struct uart_port *port) |
436 | * it during startup(). |
437 | */ |
438 | uap->autorts = false; |
439 | + spin_lock_irq(&uap->port.lock); |
440 | cr = readw(uap->port.membase + UART011_CR); |
441 | uap->old_cr = cr; |
442 | cr &= UART011_CR_RTS | UART011_CR_DTR; |
443 | cr |= UART01x_CR_UARTEN | UART011_CR_TXE; |
444 | writew(cr, uap->port.membase + UART011_CR); |
445 | + spin_unlock_irq(&uap->port.lock); |
446 | |
447 | /* |
448 | * disable break condition and fifos |
449 | diff --git a/fs/dcache.c b/fs/dcache.c |
450 | index da89cdfb21ab..9a59653d3449 100644 |
451 | --- a/fs/dcache.c |
452 | +++ b/fs/dcache.c |
453 | @@ -2686,8 +2686,13 @@ char *d_path(const struct path *path, char *buf, int buflen) |
454 | * thus don't need to be hashed. They also don't need a name until a |
455 | * user wants to identify the object in /proc/pid/fd/. The little hack |
456 | * below allows us to generate a name for these objects on demand: |
457 | + * |
458 | + * Some pseudo inodes are mountable. When they are mounted |
459 | + * path->dentry == path->mnt->mnt_root. In that case don't call d_dname |
460 | + * and instead have d_path return the mounted path. |
461 | */ |
462 | - if (path->dentry->d_op && path->dentry->d_op->d_dname) |
463 | + if (path->dentry->d_op && path->dentry->d_op->d_dname && |
464 | + (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root)) |
465 | return path->dentry->d_op->d_dname(path->dentry, buf, buflen); |
466 | |
467 | get_fs_root(current->fs, &root); |
468 | diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c |
469 | index 3be57189efd5..e3ab1e4dc442 100644 |
470 | --- a/fs/fs-writeback.c |
471 | +++ b/fs/fs-writeback.c |
472 | @@ -505,13 +505,16 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, |
473 | } |
474 | WARN_ON(inode->i_state & I_SYNC); |
475 | /* |
476 | - * Skip inode if it is clean. We don't want to mess with writeback |
477 | - * lists in this function since flusher thread may be doing for example |
478 | - * sync in parallel and if we move the inode, it could get skipped. So |
479 | - * here we make sure inode is on some writeback list and leave it there |
480 | - * unless we have completely cleaned the inode. |
481 | + * Skip inode if it is clean and we have no outstanding writeback in |
482 | + * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this |
483 | + * function since flusher thread may be doing for example sync in |
484 | + * parallel and if we move the inode, it could get skipped. So here we |
485 | + * make sure inode is on some writeback list and leave it there unless |
486 | + * we have completely cleaned the inode. |
487 | */ |
488 | - if (!(inode->i_state & I_DIRTY)) |
489 | + if (!(inode->i_state & I_DIRTY) && |
490 | + (wbc->sync_mode != WB_SYNC_ALL || |
491 | + !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) |
492 | goto out; |
493 | inode->i_state |= I_SYNC; |
494 | spin_unlock(&inode->i_lock); |
495 | diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c |
496 | index 62b484e4a9e4..bc5dac400125 100644 |
497 | --- a/fs/gfs2/inode.c |
498 | +++ b/fs/gfs2/inode.c |
499 | @@ -1536,10 +1536,22 @@ static int setattr_chown(struct inode *inode, struct iattr *attr) |
500 | if (!(attr->ia_valid & ATTR_GID) || gid_eq(ogid, ngid)) |
501 | ogid = ngid = NO_GID_QUOTA_CHANGE; |
502 | |
503 | - error = gfs2_quota_lock(ip, nuid, ngid); |
504 | + error = get_write_access(inode); |
505 | if (error) |
506 | return error; |
507 | |
508 | + error = gfs2_rs_alloc(ip); |
509 | + if (error) |
510 | + goto out; |
511 | + |
512 | + error = gfs2_rindex_update(sdp); |
513 | + if (error) |
514 | + goto out; |
515 | + |
516 | + error = gfs2_quota_lock(ip, nuid, ngid); |
517 | + if (error) |
518 | + goto out; |
519 | + |
520 | if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) || |
521 | !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) { |
522 | error = gfs2_quota_check(ip, nuid, ngid); |
523 | @@ -1566,6 +1578,8 @@ out_end_trans: |
524 | gfs2_trans_end(sdp); |
525 | out_gunlock_q: |
526 | gfs2_quota_unlock(ip); |
527 | +out: |
528 | + put_write_access(inode); |
529 | return error; |
530 | } |
531 | |
532 | diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c |
533 | index cbd66188a28b..958a5b57ed4a 100644 |
534 | --- a/fs/nilfs2/segment.c |
535 | +++ b/fs/nilfs2/segment.c |
536 | @@ -1440,17 +1440,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci, |
537 | |
538 | nilfs_clear_logs(&sci->sc_segbufs); |
539 | |
540 | - err = nilfs_segctor_extend_segments(sci, nilfs, nadd); |
541 | - if (unlikely(err)) |
542 | - return err; |
543 | - |
544 | if (sci->sc_stage.flags & NILFS_CF_SUFREED) { |
545 | err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, |
546 | sci->sc_freesegs, |
547 | sci->sc_nfreesegs, |
548 | NULL); |
549 | WARN_ON(err); /* do not happen */ |
550 | + sci->sc_stage.flags &= ~NILFS_CF_SUFREED; |
551 | } |
552 | + |
553 | + err = nilfs_segctor_extend_segments(sci, nilfs, nadd); |
554 | + if (unlikely(err)) |
555 | + return err; |
556 | + |
557 | nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); |
558 | sci->sc_stage = prev_stage; |
559 | } |
560 | diff --git a/include/linux/mm.h b/include/linux/mm.h |
561 | index e0c8528a41a4..3bf21c3502d0 100644 |
562 | --- a/include/linux/mm.h |
563 | +++ b/include/linux/mm.h |
564 | @@ -766,11 +766,14 @@ static __always_inline void *lowmem_page_address(const struct page *page) |
565 | #endif |
566 | |
567 | #if defined(WANT_PAGE_VIRTUAL) |
568 | -#define page_address(page) ((page)->virtual) |
569 | -#define set_page_address(page, address) \ |
570 | - do { \ |
571 | - (page)->virtual = (address); \ |
572 | - } while(0) |
573 | +static inline void *page_address(const struct page *page) |
574 | +{ |
575 | + return page->virtual; |
576 | +} |
577 | +static inline void set_page_address(struct page *page, void *address) |
578 | +{ |
579 | + page->virtual = address; |
580 | +} |
581 | #define page_address_init() do { } while(0) |
582 | #endif |
583 | |
584 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
585 | index 6bd22902d289..eb00e81601a5 100644 |
586 | --- a/mm/huge_memory.c |
587 | +++ b/mm/huge_memory.c |
588 | @@ -1166,7 +1166,7 @@ alloc: |
589 | |
590 | if (unlikely(!new_page)) { |
591 | count_vm_event(THP_FAULT_FALLBACK); |
592 | - if (is_huge_zero_pmd(orig_pmd)) { |
593 | + if (!page) { |
594 | ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, |
595 | address, pmd, orig_pmd, haddr); |
596 | } else { |
597 | @@ -1190,7 +1190,7 @@ alloc: |
598 | goto out; |
599 | } |
600 | |
601 | - if (is_huge_zero_pmd(orig_pmd)) |
602 | + if (!page) |
603 | clear_huge_page(new_page, haddr, HPAGE_PMD_NR); |
604 | else |
605 | copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); |
606 | @@ -1215,7 +1215,7 @@ alloc: |
607 | page_add_new_anon_rmap(new_page, vma, haddr); |
608 | set_pmd_at(mm, haddr, pmd, entry); |
609 | update_mmu_cache_pmd(vma, address, pmd); |
610 | - if (is_huge_zero_pmd(orig_pmd)) { |
611 | + if (!page) { |
612 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); |
613 | put_huge_zero_page(); |
614 | } else { |
615 | diff --git a/mm/memory-failure.c b/mm/memory-failure.c |
616 | index 6a7f9cab4ddb..7e3601ce51c6 100644 |
617 | --- a/mm/memory-failure.c |
618 | +++ b/mm/memory-failure.c |
619 | @@ -1499,10 +1499,16 @@ static int soft_offline_huge_page(struct page *page, int flags) |
620 | pr_info("soft offline: %#lx: migration failed %d, type %lx\n", |
621 | pfn, ret, page->flags); |
622 | } else { |
623 | - set_page_hwpoison_huge_page(hpage); |
624 | - dequeue_hwpoisoned_huge_page(hpage); |
625 | - atomic_long_add(1 << compound_trans_order(hpage), |
626 | - &num_poisoned_pages); |
627 | + /* overcommit hugetlb page will be freed to buddy */ |
628 | + if (PageHuge(page)) { |
629 | + set_page_hwpoison_huge_page(hpage); |
630 | + dequeue_hwpoisoned_huge_page(hpage); |
631 | + atomic_long_add(1 << compound_order(hpage), |
632 | + &num_poisoned_pages); |
633 | + } else { |
634 | + SetPageHWPoison(page); |
635 | + atomic_long_inc(&num_poisoned_pages); |
636 | + } |
637 | } |
638 | /* keep elevated page count for bad page */ |
639 | return ret; |
640 | diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c |
641 | index 57f14185cf18..a7096e130c04 100644 |
642 | --- a/security/selinux/hooks.c |
643 | +++ b/security/selinux/hooks.c |
644 | @@ -219,6 +219,14 @@ static int inode_alloc_security(struct inode *inode) |
645 | return 0; |
646 | } |
647 | |
648 | +static void inode_free_rcu(struct rcu_head *head) |
649 | +{ |
650 | + struct inode_security_struct *isec; |
651 | + |
652 | + isec = container_of(head, struct inode_security_struct, rcu); |
653 | + kmem_cache_free(sel_inode_cache, isec); |
654 | +} |
655 | + |
656 | static void inode_free_security(struct inode *inode) |
657 | { |
658 | struct inode_security_struct *isec = inode->i_security; |
659 | @@ -229,8 +237,16 @@ static void inode_free_security(struct inode *inode) |
660 | list_del_init(&isec->list); |
661 | spin_unlock(&sbsec->isec_lock); |
662 | |
663 | - inode->i_security = NULL; |
664 | - kmem_cache_free(sel_inode_cache, isec); |
665 | + /* |
666 | + * The inode may still be referenced in a path walk and |
667 | + * a call to selinux_inode_permission() can be made |
668 | + * after inode_free_security() is called. Ideally, the VFS |
669 | + * wouldn't do this, but fixing that is a much harder |
670 | + * job. For now, simply free the i_security via RCU, and |
671 | + * leave the current inode->i_security pointer intact. |
672 | + * The inode will be freed after the RCU grace period too. |
673 | + */ |
674 | + call_rcu(&isec->rcu, inode_free_rcu); |
675 | } |
676 | |
677 | static int file_alloc_security(struct file *file) |
678 | diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h |
679 | index aa47bcabb5f6..6fd9dd256a62 100644 |
680 | --- a/security/selinux/include/objsec.h |
681 | +++ b/security/selinux/include/objsec.h |
682 | @@ -38,7 +38,10 @@ struct task_security_struct { |
683 | |
684 | struct inode_security_struct { |
685 | struct inode *inode; /* back pointer to inode object */ |
686 | - struct list_head list; /* list of inode_security_struct */ |
687 | + union { |
688 | + struct list_head list; /* list of inode_security_struct */ |
689 | + struct rcu_head rcu; /* for freeing the inode_security_struct */ |
690 | + }; |
691 | u32 task_sid; /* SID of creating task */ |
692 | u32 sid; /* SID of this object */ |
693 | u16 sclass; /* security class of this object */ |
694 | diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c |
695 | index eacec859f299..b4741b027a8f 100644 |
696 | --- a/tools/perf/util/scripting-engines/trace-event-perl.c |
697 | +++ b/tools/perf/util/scripting-engines/trace-event-perl.c |
698 | @@ -282,7 +282,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, |
699 | |
700 | event = find_cache_event(evsel); |
701 | if (!event) |
702 | - die("ug! no event found for type %" PRIu64, evsel->attr.config); |
703 | + die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config); |
704 | |
705 | pid = raw_field_value(event, "common_pid", data); |
706 |