Contents of /trunk/kernel-alx/patches-3.12/0108-3.12.9-all-fixes.patch
Parent Directory | Revision Log
Revision 2423 -
(show annotations)
(download)
Tue Mar 25 12:29:50 2014 UTC (10 years, 7 months ago) by niro
File size: 25676 byte(s)
Tue Mar 25 12:29:50 2014 UTC (10 years, 7 months ago) by niro
File size: 25676 byte(s)
-added 3.12 branch
1 | diff --git a/Makefile b/Makefile |
2 | index 5d0ec13bb77d..4ee77eaa7b1f 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 3 |
7 | PATCHLEVEL = 12 |
8 | -SUBLEVEL = 8 |
9 | +SUBLEVEL = 9 |
10 | EXTRAVERSION = |
11 | NAME = One Giant Leap for Frogkind |
12 | |
13 | diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c |
14 | index f35906b3d8c9..41960fb0daff 100644 |
15 | --- a/arch/arm/kernel/devtree.c |
16 | +++ b/arch/arm/kernel/devtree.c |
17 | @@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void) |
18 | |
19 | bool arch_match_cpu_phys_id(int cpu, u64 phys_id) |
20 | { |
21 | - return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu); |
22 | + return phys_id == cpu_logical_map(cpu); |
23 | } |
24 | |
25 | /** |
26 | diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c |
27 | index 8e44973b0139..2f176a495c32 100644 |
28 | --- a/arch/arm/mach-highbank/highbank.c |
29 | +++ b/arch/arm/mach-highbank/highbank.c |
30 | @@ -66,6 +66,7 @@ void highbank_set_cpu_jump(int cpu, void *jump_addr) |
31 | |
32 | static void highbank_l2x0_disable(void) |
33 | { |
34 | + outer_flush_all(); |
35 | /* Disable PL310 L2 Cache controller */ |
36 | highbank_smc1(0x102, 0x0); |
37 | } |
38 | diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c |
39 | index 57911430324e..3f44b162fcab 100644 |
40 | --- a/arch/arm/mach-omap2/omap4-common.c |
41 | +++ b/arch/arm/mach-omap2/omap4-common.c |
42 | @@ -163,6 +163,7 @@ void __iomem *omap4_get_l2cache_base(void) |
43 | |
44 | static void omap4_l2x0_disable(void) |
45 | { |
46 | + outer_flush_all(); |
47 | /* Disable PL310 L2 Cache controller */ |
48 | omap_smc1(0x102, 0x0); |
49 | } |
50 | diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c |
51 | index e09f0bfb7b8f..4b8e4d3cd6ea 100644 |
52 | --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c |
53 | +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c |
54 | @@ -10,6 +10,7 @@ |
55 | #include <linux/module.h> |
56 | #include <linux/pci.h> |
57 | #include <linux/ptrace.h> |
58 | +#include <linux/syscore_ops.h> |
59 | |
60 | #include <asm/apic.h> |
61 | |
62 | @@ -816,6 +817,18 @@ out: |
63 | return ret; |
64 | } |
65 | |
66 | +static void ibs_eilvt_setup(void) |
67 | +{ |
68 | + /* |
69 | + * Force LVT offset assignment for family 10h: The offsets are |
70 | + * not assigned by the BIOS for this family, so the OS is |
71 | + * responsible for doing it. If the OS assignment fails, fall |
72 | + * back to BIOS settings and try to setup this. |
73 | + */ |
74 | + if (boot_cpu_data.x86 == 0x10) |
75 | + force_ibs_eilvt_setup(); |
76 | +} |
77 | + |
78 | static inline int get_ibs_lvt_offset(void) |
79 | { |
80 | u64 val; |
81 | @@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy) |
82 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); |
83 | } |
84 | |
85 | +#ifdef CONFIG_PM |
86 | + |
87 | +static int perf_ibs_suspend(void) |
88 | +{ |
89 | + clear_APIC_ibs(NULL); |
90 | + return 0; |
91 | +} |
92 | + |
93 | +static void perf_ibs_resume(void) |
94 | +{ |
95 | + ibs_eilvt_setup(); |
96 | + setup_APIC_ibs(NULL); |
97 | +} |
98 | + |
99 | +static struct syscore_ops perf_ibs_syscore_ops = { |
100 | + .resume = perf_ibs_resume, |
101 | + .suspend = perf_ibs_suspend, |
102 | +}; |
103 | + |
104 | +static void perf_ibs_pm_init(void) |
105 | +{ |
106 | + register_syscore_ops(&perf_ibs_syscore_ops); |
107 | +} |
108 | + |
109 | +#else |
110 | + |
111 | +static inline void perf_ibs_pm_init(void) { } |
112 | + |
113 | +#endif |
114 | + |
115 | static int |
116 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
117 | { |
118 | @@ -877,18 +920,12 @@ static __init int amd_ibs_init(void) |
119 | if (!caps) |
120 | return -ENODEV; /* ibs not supported by the cpu */ |
121 | |
122 | - /* |
123 | - * Force LVT offset assignment for family 10h: The offsets are |
124 | - * not assigned by the BIOS for this family, so the OS is |
125 | - * responsible for doing it. If the OS assignment fails, fall |
126 | - * back to BIOS settings and try to setup this. |
127 | - */ |
128 | - if (boot_cpu_data.x86 == 0x10) |
129 | - force_ibs_eilvt_setup(); |
130 | + ibs_eilvt_setup(); |
131 | |
132 | if (!ibs_eilvt_valid()) |
133 | goto out; |
134 | |
135 | + perf_ibs_pm_init(); |
136 | get_online_cpus(); |
137 | ibs_caps = caps; |
138 | /* make ibs_caps visible to other cpus: */ |
139 | diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S |
140 | index f0dcb0ceb6a2..15a569a47b4d 100644 |
141 | --- a/arch/x86/kernel/entry_32.S |
142 | +++ b/arch/x86/kernel/entry_32.S |
143 | @@ -1085,7 +1085,7 @@ ENTRY(ftrace_caller) |
144 | pushl $0 /* Pass NULL as regs pointer */ |
145 | movl 4*4(%esp), %eax |
146 | movl 0x4(%ebp), %edx |
147 | - leal function_trace_op, %ecx |
148 | + movl function_trace_op, %ecx |
149 | subl $MCOUNT_INSN_SIZE, %eax |
150 | |
151 | .globl ftrace_call |
152 | @@ -1143,7 +1143,7 @@ ENTRY(ftrace_regs_caller) |
153 | movl 12*4(%esp), %eax /* Load ip (1st parameter) */ |
154 | subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ |
155 | movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ |
156 | - leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ |
157 | + movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ |
158 | pushl %esp /* Save pt_regs as 4th parameter */ |
159 | |
160 | GLOBAL(ftrace_regs_call) |
161 | diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S |
162 | index b077f4cc225a..9ce256739175 100644 |
163 | --- a/arch/x86/kernel/entry_64.S |
164 | +++ b/arch/x86/kernel/entry_64.S |
165 | @@ -88,7 +88,7 @@ END(function_hook) |
166 | MCOUNT_SAVE_FRAME \skip |
167 | |
168 | /* Load the ftrace_ops into the 3rd parameter */ |
169 | - leaq function_trace_op, %rdx |
170 | + movq function_trace_op(%rip), %rdx |
171 | |
172 | /* Load ip into the first parameter */ |
173 | movq RIP(%rsp), %rdi |
174 | diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c |
175 | index ab19263baf39..fb78bb9ad8f6 100644 |
176 | --- a/drivers/acpi/acpi_lpss.c |
177 | +++ b/drivers/acpi/acpi_lpss.c |
178 | @@ -156,7 +156,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { |
179 | { "80860F14", (unsigned long)&byt_sdio_dev_desc }, |
180 | { "80860F41", (unsigned long)&byt_i2c_dev_desc }, |
181 | { "INT33B2", }, |
182 | - { "INT33FC", }, |
183 | |
184 | { } |
185 | }; |
186 | diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c |
187 | index a069b5e2a2d2..920cd19edc69 100644 |
188 | --- a/drivers/gpu/drm/i915/intel_ddi.c |
189 | +++ b/drivers/gpu/drm/i915/intel_ddi.c |
190 | @@ -961,12 +961,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev) |
191 | enum pipe pipe; |
192 | struct intel_crtc *intel_crtc; |
193 | |
194 | + dev_priv->ddi_plls.spll_refcount = 0; |
195 | + dev_priv->ddi_plls.wrpll1_refcount = 0; |
196 | + dev_priv->ddi_plls.wrpll2_refcount = 0; |
197 | + |
198 | for_each_pipe(pipe) { |
199 | intel_crtc = |
200 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
201 | |
202 | - if (!intel_crtc->active) |
203 | + if (!intel_crtc->active) { |
204 | + intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; |
205 | continue; |
206 | + } |
207 | |
208 | intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, |
209 | pipe); |
210 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
211 | index aad6f7bfc589..dd2d542e4651 100644 |
212 | --- a/drivers/gpu/drm/i915/intel_display.c |
213 | +++ b/drivers/gpu/drm/i915/intel_display.c |
214 | @@ -10592,9 +10592,9 @@ void intel_modeset_gem_init(struct drm_device *dev) |
215 | |
216 | intel_setup_overlay(dev); |
217 | |
218 | - drm_modeset_lock_all(dev); |
219 | + mutex_lock(&dev->mode_config.mutex); |
220 | intel_modeset_setup_hw_state(dev, false); |
221 | - drm_modeset_unlock_all(dev); |
222 | + mutex_unlock(&dev->mode_config.mutex); |
223 | } |
224 | |
225 | void intel_modeset_cleanup(struct drm_device *dev) |
226 | diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c |
227 | index 78be66176840..942509892895 100644 |
228 | --- a/drivers/hwmon/coretemp.c |
229 | +++ b/drivers/hwmon/coretemp.c |
230 | @@ -52,7 +52,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); |
231 | |
232 | #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ |
233 | #define NUM_REAL_CORES 32 /* Number of Real cores per cpu */ |
234 | -#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ |
235 | +#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */ |
236 | #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ |
237 | #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) |
238 | #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) |
239 | diff --git a/drivers/md/md.c b/drivers/md/md.c |
240 | index ba46d9749a0b..015bc455cf1c 100644 |
241 | --- a/drivers/md/md.c |
242 | +++ b/drivers/md/md.c |
243 | @@ -1119,6 +1119,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) |
244 | rdev->raid_disk = -1; |
245 | clear_bit(Faulty, &rdev->flags); |
246 | clear_bit(In_sync, &rdev->flags); |
247 | + clear_bit(Bitmap_sync, &rdev->flags); |
248 | clear_bit(WriteMostly, &rdev->flags); |
249 | |
250 | if (mddev->raid_disks == 0) { |
251 | @@ -1197,6 +1198,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) |
252 | */ |
253 | if (ev1 < mddev->bitmap->events_cleared) |
254 | return 0; |
255 | + if (ev1 < mddev->events) |
256 | + set_bit(Bitmap_sync, &rdev->flags); |
257 | } else { |
258 | if (ev1 < mddev->events) |
259 | /* just a hot-add of a new device, leave raid_disk at -1 */ |
260 | @@ -1605,6 +1608,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) |
261 | rdev->raid_disk = -1; |
262 | clear_bit(Faulty, &rdev->flags); |
263 | clear_bit(In_sync, &rdev->flags); |
264 | + clear_bit(Bitmap_sync, &rdev->flags); |
265 | clear_bit(WriteMostly, &rdev->flags); |
266 | |
267 | if (mddev->raid_disks == 0) { |
268 | @@ -1687,6 +1691,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) |
269 | */ |
270 | if (ev1 < mddev->bitmap->events_cleared) |
271 | return 0; |
272 | + if (ev1 < mddev->events) |
273 | + set_bit(Bitmap_sync, &rdev->flags); |
274 | } else { |
275 | if (ev1 < mddev->events) |
276 | /* just a hot-add of a new device, leave raid_disk at -1 */ |
277 | @@ -2830,6 +2836,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) |
278 | else |
279 | rdev->saved_raid_disk = -1; |
280 | clear_bit(In_sync, &rdev->flags); |
281 | + clear_bit(Bitmap_sync, &rdev->flags); |
282 | err = rdev->mddev->pers-> |
283 | hot_add_disk(rdev->mddev, rdev); |
284 | if (err) { |
285 | @@ -5773,6 +5780,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) |
286 | info->raid_disk < mddev->raid_disks) { |
287 | rdev->raid_disk = info->raid_disk; |
288 | set_bit(In_sync, &rdev->flags); |
289 | + clear_bit(Bitmap_sync, &rdev->flags); |
290 | } else |
291 | rdev->raid_disk = -1; |
292 | } else |
293 | @@ -7731,7 +7739,8 @@ static int remove_and_add_spares(struct mddev *mddev, |
294 | if (test_bit(Faulty, &rdev->flags)) |
295 | continue; |
296 | if (mddev->ro && |
297 | - rdev->saved_raid_disk < 0) |
298 | + ! (rdev->saved_raid_disk >= 0 && |
299 | + !test_bit(Bitmap_sync, &rdev->flags))) |
300 | continue; |
301 | |
302 | rdev->recovery_offset = 0; |
303 | @@ -7812,9 +7821,12 @@ void md_check_recovery(struct mddev *mddev) |
304 | * As we only add devices that are already in-sync, |
305 | * we can activate the spares immediately. |
306 | */ |
307 | - clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
308 | remove_and_add_spares(mddev, NULL); |
309 | - mddev->pers->spare_active(mddev); |
310 | + /* There is no thread, but we need to call |
311 | + * ->spare_active and clear saved_raid_disk |
312 | + */ |
313 | + md_reap_sync_thread(mddev); |
314 | + clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
315 | goto unlock; |
316 | } |
317 | |
318 | diff --git a/drivers/md/md.h b/drivers/md/md.h |
319 | index 608050c43f17..636756450a19 100644 |
320 | --- a/drivers/md/md.h |
321 | +++ b/drivers/md/md.h |
322 | @@ -129,6 +129,9 @@ struct md_rdev { |
323 | enum flag_bits { |
324 | Faulty, /* device is known to have a fault */ |
325 | In_sync, /* device is in_sync with rest of array */ |
326 | + Bitmap_sync, /* ..actually, not quite In_sync. Need a |
327 | + * bitmap-based recovery to get fully in sync |
328 | + */ |
329 | Unmerged, /* device is being added to array and should |
330 | * be considerred for bvec_merge_fn but not |
331 | * yet for actual IO |
332 | diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c |
333 | index 73dc8a377522..308575d23550 100644 |
334 | --- a/drivers/md/raid10.c |
335 | +++ b/drivers/md/raid10.c |
336 | @@ -1319,7 +1319,7 @@ read_again: |
337 | /* Could not read all from this device, so we will |
338 | * need another r10_bio. |
339 | */ |
340 | - sectors_handled = (r10_bio->sectors + max_sectors |
341 | + sectors_handled = (r10_bio->sector + max_sectors |
342 | - bio->bi_sector); |
343 | r10_bio->sectors = max_sectors; |
344 | spin_lock_irq(&conf->device_lock); |
345 | @@ -1327,7 +1327,7 @@ read_again: |
346 | bio->bi_phys_segments = 2; |
347 | else |
348 | bio->bi_phys_segments++; |
349 | - spin_unlock(&conf->device_lock); |
350 | + spin_unlock_irq(&conf->device_lock); |
351 | /* Cannot call generic_make_request directly |
352 | * as that will be queued in __generic_make_request |
353 | * and subsequent mempool_alloc might block |
354 | @@ -3220,10 +3220,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, |
355 | if (j == conf->copies) { |
356 | /* Cannot recover, so abort the recovery or |
357 | * record a bad block */ |
358 | - put_buf(r10_bio); |
359 | - if (rb2) |
360 | - atomic_dec(&rb2->remaining); |
361 | - r10_bio = rb2; |
362 | if (any_working) { |
363 | /* problem is that there are bad blocks |
364 | * on other device(s) |
365 | @@ -3255,6 +3251,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, |
366 | mirror->recovery_disabled |
367 | = mddev->recovery_disabled; |
368 | } |
369 | + put_buf(r10_bio); |
370 | + if (rb2) |
371 | + atomic_dec(&rb2->remaining); |
372 | + r10_bio = rb2; |
373 | break; |
374 | } |
375 | } |
376 | diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c |
377 | index 8a0665d04567..93174c6ab37c 100644 |
378 | --- a/drivers/md/raid5.c |
379 | +++ b/drivers/md/raid5.c |
380 | @@ -3502,7 +3502,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) |
381 | */ |
382 | set_bit(R5_Insync, &dev->flags); |
383 | |
384 | - if (rdev && test_bit(R5_WriteError, &dev->flags)) { |
385 | + if (test_bit(R5_WriteError, &dev->flags)) { |
386 | /* This flag does not apply to '.replacement' |
387 | * only to .rdev, so make sure to check that*/ |
388 | struct md_rdev *rdev2 = rcu_dereference( |
389 | @@ -3515,7 +3515,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) |
390 | } else |
391 | clear_bit(R5_WriteError, &dev->flags); |
392 | } |
393 | - if (rdev && test_bit(R5_MadeGood, &dev->flags)) { |
394 | + if (test_bit(R5_MadeGood, &dev->flags)) { |
395 | /* This flag does not apply to '.replacement' |
396 | * only to .rdev, so make sure to check that*/ |
397 | struct md_rdev *rdev2 = rcu_dereference( |
398 | diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c |
399 | index 114f5ef4b73a..2832576d8b12 100644 |
400 | --- a/drivers/pinctrl/pinctrl-baytrail.c |
401 | +++ b/drivers/pinctrl/pinctrl-baytrail.c |
402 | @@ -512,7 +512,6 @@ static const struct dev_pm_ops byt_gpio_pm_ops = { |
403 | |
404 | static const struct acpi_device_id byt_gpio_acpi_match[] = { |
405 | { "INT33B2", 0 }, |
406 | - { "INT33FC", 0 }, |
407 | { } |
408 | }; |
409 | MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); |
410 | diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c |
411 | index 34ab0679e992..b95a8b3395ae 100644 |
412 | --- a/drivers/staging/comedi/drivers/addi_apci_1032.c |
413 | +++ b/drivers/staging/comedi/drivers/addi_apci_1032.c |
414 | @@ -325,8 +325,8 @@ static int apci1032_auto_attach(struct comedi_device *dev, |
415 | s = &dev->subdevices[1]; |
416 | if (dev->irq) { |
417 | dev->read_subdev = s; |
418 | - s->type = COMEDI_SUBD_DI | SDF_CMD_READ; |
419 | - s->subdev_flags = SDF_READABLE; |
420 | + s->type = COMEDI_SUBD_DI; |
421 | + s->subdev_flags = SDF_READABLE | SDF_CMD_READ; |
422 | s->n_chan = 1; |
423 | s->maxdata = 1; |
424 | s->range_table = &range_digital; |
425 | diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c |
426 | index 78cea193504f..c9702bfa88be 100644 |
427 | --- a/drivers/staging/comedi/drivers/adl_pci9111.c |
428 | +++ b/drivers/staging/comedi/drivers/adl_pci9111.c |
429 | @@ -869,7 +869,7 @@ static int pci9111_auto_attach(struct comedi_device *dev, |
430 | pci9111_reset(dev); |
431 | |
432 | if (pcidev->irq > 0) { |
433 | - ret = request_irq(dev->irq, pci9111_interrupt, |
434 | + ret = request_irq(pcidev->irq, pci9111_interrupt, |
435 | IRQF_SHARED, dev->board_name, dev); |
436 | if (ret) |
437 | return ret; |
438 | diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c |
439 | index aaa22867e656..1440d0b4a7bc 100644 |
440 | --- a/drivers/tty/serial/amba-pl011.c |
441 | +++ b/drivers/tty/serial/amba-pl011.c |
442 | @@ -1537,6 +1537,8 @@ static int pl011_startup(struct uart_port *port) |
443 | /* |
444 | * Provoke TX FIFO interrupt into asserting. |
445 | */ |
446 | + spin_lock_irq(&uap->port.lock); |
447 | + |
448 | cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE; |
449 | writew(cr, uap->port.membase + UART011_CR); |
450 | writew(0, uap->port.membase + UART011_FBRD); |
451 | @@ -1561,6 +1563,8 @@ static int pl011_startup(struct uart_port *port) |
452 | cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; |
453 | writew(cr, uap->port.membase + UART011_CR); |
454 | |
455 | + spin_unlock_irq(&uap->port.lock); |
456 | + |
457 | /* |
458 | * initialise the old status of the modem signals |
459 | */ |
460 | @@ -1629,11 +1633,13 @@ static void pl011_shutdown(struct uart_port *port) |
461 | * it during startup(). |
462 | */ |
463 | uap->autorts = false; |
464 | + spin_lock_irq(&uap->port.lock); |
465 | cr = readw(uap->port.membase + UART011_CR); |
466 | uap->old_cr = cr; |
467 | cr &= UART011_CR_RTS | UART011_CR_DTR; |
468 | cr |= UART01x_CR_UARTEN | UART011_CR_TXE; |
469 | writew(cr, uap->port.membase + UART011_CR); |
470 | + spin_unlock_irq(&uap->port.lock); |
471 | |
472 | /* |
473 | * disable break condition and fifos |
474 | diff --git a/fs/dcache.c b/fs/dcache.c |
475 | index 89f96719a29b..f27c1d12a1fa 100644 |
476 | --- a/fs/dcache.c |
477 | +++ b/fs/dcache.c |
478 | @@ -3064,8 +3064,13 @@ char *d_path(const struct path *path, char *buf, int buflen) |
479 | * thus don't need to be hashed. They also don't need a name until a |
480 | * user wants to identify the object in /proc/pid/fd/. The little hack |
481 | * below allows us to generate a name for these objects on demand: |
482 | + * |
483 | + * Some pseudo inodes are mountable. When they are mounted |
484 | + * path->dentry == path->mnt->mnt_root. In that case don't call d_dname |
485 | + * and instead have d_path return the mounted path. |
486 | */ |
487 | - if (path->dentry->d_op && path->dentry->d_op->d_dname) |
488 | + if (path->dentry->d_op && path->dentry->d_op->d_dname && |
489 | + (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root)) |
490 | return path->dentry->d_op->d_dname(path->dentry, buf, buflen); |
491 | |
492 | rcu_read_lock(); |
493 | diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c |
494 | index 9f4935b8f208..3595180b62ac 100644 |
495 | --- a/fs/fs-writeback.c |
496 | +++ b/fs/fs-writeback.c |
497 | @@ -510,13 +510,16 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, |
498 | } |
499 | WARN_ON(inode->i_state & I_SYNC); |
500 | /* |
501 | - * Skip inode if it is clean. We don't want to mess with writeback |
502 | - * lists in this function since flusher thread may be doing for example |
503 | - * sync in parallel and if we move the inode, it could get skipped. So |
504 | - * here we make sure inode is on some writeback list and leave it there |
505 | - * unless we have completely cleaned the inode. |
506 | + * Skip inode if it is clean and we have no outstanding writeback in |
507 | + * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this |
508 | + * function since flusher thread may be doing for example sync in |
509 | + * parallel and if we move the inode, it could get skipped. So here we |
510 | + * make sure inode is on some writeback list and leave it there unless |
511 | + * we have completely cleaned the inode. |
512 | */ |
513 | - if (!(inode->i_state & I_DIRTY)) |
514 | + if (!(inode->i_state & I_DIRTY) && |
515 | + (wbc->sync_mode != WB_SYNC_ALL || |
516 | + !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) |
517 | goto out; |
518 | inode->i_state |= I_SYNC; |
519 | spin_unlock(&inode->i_lock); |
520 | diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c |
521 | index 12987666e5f0..630db362a2d1 100644 |
522 | --- a/fs/gfs2/inode.c |
523 | +++ b/fs/gfs2/inode.c |
524 | @@ -1610,10 +1610,22 @@ static int setattr_chown(struct inode *inode, struct iattr *attr) |
525 | if (!(attr->ia_valid & ATTR_GID) || gid_eq(ogid, ngid)) |
526 | ogid = ngid = NO_GID_QUOTA_CHANGE; |
527 | |
528 | - error = gfs2_quota_lock(ip, nuid, ngid); |
529 | + error = get_write_access(inode); |
530 | if (error) |
531 | return error; |
532 | |
533 | + error = gfs2_rs_alloc(ip); |
534 | + if (error) |
535 | + goto out; |
536 | + |
537 | + error = gfs2_rindex_update(sdp); |
538 | + if (error) |
539 | + goto out; |
540 | + |
541 | + error = gfs2_quota_lock(ip, nuid, ngid); |
542 | + if (error) |
543 | + goto out; |
544 | + |
545 | if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) || |
546 | !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) { |
547 | error = gfs2_quota_check(ip, nuid, ngid); |
548 | @@ -1640,6 +1652,8 @@ out_end_trans: |
549 | gfs2_trans_end(sdp); |
550 | out_gunlock_q: |
551 | gfs2_quota_unlock(ip); |
552 | +out: |
553 | + put_write_access(inode); |
554 | return error; |
555 | } |
556 | |
557 | diff --git a/fs/namespace.c b/fs/namespace.c |
558 | index da5c49483430..84447dbcb650 100644 |
559 | --- a/fs/namespace.c |
560 | +++ b/fs/namespace.c |
561 | @@ -2888,7 +2888,7 @@ bool fs_fully_visible(struct file_system_type *type) |
562 | struct inode *inode = child->mnt_mountpoint->d_inode; |
563 | if (!S_ISDIR(inode->i_mode)) |
564 | goto next; |
565 | - if (inode->i_nlink != 2) |
566 | + if (inode->i_nlink > 2) |
567 | goto next; |
568 | } |
569 | visible = true; |
570 | diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c |
571 | index 9f6b486b6c01..a1a191634abc 100644 |
572 | --- a/fs/nilfs2/segment.c |
573 | +++ b/fs/nilfs2/segment.c |
574 | @@ -1440,17 +1440,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci, |
575 | |
576 | nilfs_clear_logs(&sci->sc_segbufs); |
577 | |
578 | - err = nilfs_segctor_extend_segments(sci, nilfs, nadd); |
579 | - if (unlikely(err)) |
580 | - return err; |
581 | - |
582 | if (sci->sc_stage.flags & NILFS_CF_SUFREED) { |
583 | err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, |
584 | sci->sc_freesegs, |
585 | sci->sc_nfreesegs, |
586 | NULL); |
587 | WARN_ON(err); /* do not happen */ |
588 | + sci->sc_stage.flags &= ~NILFS_CF_SUFREED; |
589 | } |
590 | + |
591 | + err = nilfs_segctor_extend_segments(sci, nilfs, nadd); |
592 | + if (unlikely(err)) |
593 | + return err; |
594 | + |
595 | nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); |
596 | sci->sc_stage = prev_stage; |
597 | } |
598 | diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h |
599 | index fe68a5a98583..7032518f8542 100644 |
600 | --- a/include/linux/crash_dump.h |
601 | +++ b/include/linux/crash_dump.h |
602 | @@ -6,6 +6,8 @@ |
603 | #include <linux/proc_fs.h> |
604 | #include <linux/elf.h> |
605 | |
606 | +#include <asm/pgtable.h> /* for pgprot_t */ |
607 | + |
608 | #define ELFCORE_ADDR_MAX (-1ULL) |
609 | #define ELFCORE_ADDR_ERR (-2ULL) |
610 | |
611 | diff --git a/include/linux/i2c.h b/include/linux/i2c.h |
612 | index 2ab11dc38077..5677fb58e688 100644 |
613 | --- a/include/linux/i2c.h |
614 | +++ b/include/linux/i2c.h |
615 | @@ -447,7 +447,7 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) |
616 | static inline struct i2c_adapter * |
617 | i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) |
618 | { |
619 | -#if IS_ENABLED(I2C_MUX) |
620 | +#if IS_ENABLED(CONFIG_I2C_MUX) |
621 | struct device *parent = adapter->dev.parent; |
622 | |
623 | if (parent != NULL && parent->type == &i2c_adapter_type) |
624 | diff --git a/include/linux/mm.h b/include/linux/mm.h |
625 | index 8b6e55ee8855..fed08c0c543b 100644 |
626 | --- a/include/linux/mm.h |
627 | +++ b/include/linux/mm.h |
628 | @@ -762,11 +762,14 @@ static __always_inline void *lowmem_page_address(const struct page *page) |
629 | #endif |
630 | |
631 | #if defined(WANT_PAGE_VIRTUAL) |
632 | -#define page_address(page) ((page)->virtual) |
633 | -#define set_page_address(page, address) \ |
634 | - do { \ |
635 | - (page)->virtual = (address); \ |
636 | - } while(0) |
637 | +static inline void *page_address(const struct page *page) |
638 | +{ |
639 | + return page->virtual; |
640 | +} |
641 | +static inline void set_page_address(struct page *page, void *address) |
642 | +{ |
643 | + page->virtual = address; |
644 | +} |
645 | #define page_address_init() do { } while(0) |
646 | #endif |
647 | |
648 | diff --git a/kernel/fork.c b/kernel/fork.c |
649 | index 690cfacaed71..458953ca4d50 100644 |
650 | --- a/kernel/fork.c |
651 | +++ b/kernel/fork.c |
652 | @@ -1175,7 +1175,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
653 | * do not allow it to share a thread group or signal handlers or |
654 | * parent with the forking task. |
655 | */ |
656 | - if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) { |
657 | + if (clone_flags & CLONE_SIGHAND) { |
658 | if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || |
659 | (task_active_pid_ns(current) != |
660 | current->nsproxy->pid_ns_for_children)) |
661 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
662 | index 47962456ed87..292a266e0d42 100644 |
663 | --- a/mm/huge_memory.c |
664 | +++ b/mm/huge_memory.c |
665 | @@ -1154,7 +1154,7 @@ alloc: |
666 | new_page = NULL; |
667 | |
668 | if (unlikely(!new_page)) { |
669 | - if (is_huge_zero_pmd(orig_pmd)) { |
670 | + if (!page) { |
671 | ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, |
672 | address, pmd, orig_pmd, haddr); |
673 | } else { |
674 | @@ -1181,7 +1181,7 @@ alloc: |
675 | |
676 | count_vm_event(THP_FAULT_ALLOC); |
677 | |
678 | - if (is_huge_zero_pmd(orig_pmd)) |
679 | + if (!page) |
680 | clear_huge_page(new_page, haddr, HPAGE_PMD_NR); |
681 | else |
682 | copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); |
683 | @@ -1207,7 +1207,7 @@ alloc: |
684 | page_add_new_anon_rmap(new_page, vma, haddr); |
685 | set_pmd_at(mm, haddr, pmd, entry); |
686 | update_mmu_cache_pmd(vma, address, pmd); |
687 | - if (is_huge_zero_pmd(orig_pmd)) { |
688 | + if (!page) { |
689 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); |
690 | put_huge_zero_page(); |
691 | } else { |
692 | diff --git a/mm/util.c b/mm/util.c |
693 | index eaf63fc2c92f..96da2d7c076c 100644 |
694 | --- a/mm/util.c |
695 | +++ b/mm/util.c |
696 | @@ -387,7 +387,10 @@ struct address_space *page_mapping(struct page *page) |
697 | { |
698 | struct address_space *mapping = page->mapping; |
699 | |
700 | - VM_BUG_ON(PageSlab(page)); |
701 | + /* This happens if someone calls flush_dcache_page on slab page */ |
702 | + if (unlikely(PageSlab(page))) |
703 | + return NULL; |
704 | + |
705 | if (unlikely(PageSwapCache(page))) { |
706 | swp_entry_t entry; |
707 | |
708 | diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c |
709 | index 392a0445265c..25d5ebaf25f9 100644 |
710 | --- a/security/selinux/hooks.c |
711 | +++ b/security/selinux/hooks.c |
712 | @@ -220,6 +220,14 @@ static int inode_alloc_security(struct inode *inode) |
713 | return 0; |
714 | } |
715 | |
716 | +static void inode_free_rcu(struct rcu_head *head) |
717 | +{ |
718 | + struct inode_security_struct *isec; |
719 | + |
720 | + isec = container_of(head, struct inode_security_struct, rcu); |
721 | + kmem_cache_free(sel_inode_cache, isec); |
722 | +} |
723 | + |
724 | static void inode_free_security(struct inode *inode) |
725 | { |
726 | struct inode_security_struct *isec = inode->i_security; |
727 | @@ -230,8 +238,16 @@ static void inode_free_security(struct inode *inode) |
728 | list_del_init(&isec->list); |
729 | spin_unlock(&sbsec->isec_lock); |
730 | |
731 | - inode->i_security = NULL; |
732 | - kmem_cache_free(sel_inode_cache, isec); |
733 | + /* |
734 | + * The inode may still be referenced in a path walk and |
735 | + * a call to selinux_inode_permission() can be made |
736 | + * after inode_free_security() is called. Ideally, the VFS |
737 | + * wouldn't do this, but fixing that is a much harder |
738 | + * job. For now, simply free the i_security via RCU, and |
739 | + * leave the current inode->i_security pointer intact. |
740 | + * The inode will be freed after the RCU grace period too. |
741 | + */ |
742 | + call_rcu(&isec->rcu, inode_free_rcu); |
743 | } |
744 | |
745 | static int file_alloc_security(struct file *file) |
746 | diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h |
747 | index aa47bcabb5f6..6fd9dd256a62 100644 |
748 | --- a/security/selinux/include/objsec.h |
749 | +++ b/security/selinux/include/objsec.h |
750 | @@ -38,7 +38,10 @@ struct task_security_struct { |
751 | |
752 | struct inode_security_struct { |
753 | struct inode *inode; /* back pointer to inode object */ |
754 | - struct list_head list; /* list of inode_security_struct */ |
755 | + union { |
756 | + struct list_head list; /* list of inode_security_struct */ |
757 | + struct rcu_head rcu; /* for freeing the inode_security_struct */ |
758 | + }; |
759 | u32 task_sid; /* SID of creating task */ |
760 | u32 sid; /* SID of this object */ |
761 | u16 sclass; /* security class of this object */ |