Magellan Linux

Contents of /trunk/kernel26-alx/patches-3.10/0142-3.10.43-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2672 - (show annotations) (download)
Tue Jul 21 16:46:35 2015 UTC (8 years, 10 months ago) by niro
File size: 47477 byte(s)
-3.10.84-alx-r1
1 diff --git a/Documentation/DocBook/media/Makefile b/Documentation/DocBook/media/Makefile
2 index f9fd615427fb..1d27f0a1abd1 100644
3 --- a/Documentation/DocBook/media/Makefile
4 +++ b/Documentation/DocBook/media/Makefile
5 @@ -195,7 +195,7 @@ DVB_DOCUMENTED = \
6 #
7
8 install_media_images = \
9 - $(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
10 + $(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
11
12 $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
13 $(Q)base64 -d $< >$@
14 diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
15 index 9b34b1685078..8d90c42e5db6 100644
16 --- a/Documentation/sysctl/kernel.txt
17 +++ b/Documentation/sysctl/kernel.txt
18 @@ -438,6 +438,32 @@ This file shows up if CONFIG_DEBUG_STACKOVERFLOW is enabled.
19
20 ==============================================================
21
22 +perf_cpu_time_max_percent:
23 +
24 +Hints to the kernel how much CPU time it should be allowed to
25 +use to handle perf sampling events. If the perf subsystem
26 +is informed that its samples are exceeding this limit, it
27 +will drop its sampling frequency to attempt to reduce its CPU
28 +usage.
29 +
30 +Some perf sampling happens in NMIs. If these samples
31 +unexpectedly take too long to execute, the NMIs can become
32 +stacked up next to each other so much that nothing else is
33 +allowed to execute.
34 +
35 +0: disable the mechanism. Do not monitor or correct perf's
36 + sampling rate no matter how CPU time it takes.
37 +
38 +1-100: attempt to throttle perf's sample rate to this
39 + percentage of CPU. Note: the kernel calculates an
40 + "expected" length of each sample event. 100 here means
41 + 100% of that expected length. Even if this is set to
42 + 100, you may still see sample throttling if this
43 + length is exceeded. Set to 0 if you truly do not care
44 + how much CPU is consumed.
45 +
46 +==============================================================
47 +
48
49 pid_max:
50
51 diff --git a/Makefile b/Makefile
52 index 4634015fed68..9cf513828341 100644
53 --- a/Makefile
54 +++ b/Makefile
55 @@ -1,6 +1,6 @@
56 VERSION = 3
57 PATCHLEVEL = 10
58 -SUBLEVEL = 42
59 +SUBLEVEL = 43
60 EXTRAVERSION =
61 NAME = TOSSUG Baby Fish
62
63 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
64 index 7e1f76027f66..20e1c994669e 100644
65 --- a/arch/arm/include/asm/uaccess.h
66 +++ b/arch/arm/include/asm/uaccess.h
67 @@ -164,8 +164,9 @@ extern int __put_user_8(void *, unsigned long long);
68 #define __put_user_check(x,p) \
69 ({ \
70 unsigned long __limit = current_thread_info()->addr_limit - 1; \
71 + const typeof(*(p)) __user *__tmp_p = (p); \
72 register const typeof(*(p)) __r2 asm("r2") = (x); \
73 - register const typeof(*(p)) __user *__p asm("r0") = (p);\
74 + register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
75 register unsigned long __l asm("r1") = __limit; \
76 register int __e asm("r0"); \
77 switch (sizeof(*(__p))) { \
78 diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
79 index e19edc6f2d15..ace0ce8f6641 100644
80 --- a/arch/arm/kernel/perf_event.c
81 +++ b/arch/arm/kernel/perf_event.c
82 @@ -303,11 +303,18 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
83 struct arm_pmu *armpmu = (struct arm_pmu *) dev;
84 struct platform_device *plat_device = armpmu->plat_device;
85 struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
86 + int ret;
87 + u64 start_clock, finish_clock;
88
89 + start_clock = sched_clock();
90 if (plat && plat->handle_irq)
91 - return plat->handle_irq(irq, dev, armpmu->handle_irq);
92 + ret = plat->handle_irq(irq, dev, armpmu->handle_irq);
93 else
94 - return armpmu->handle_irq(irq, dev);
95 + ret = armpmu->handle_irq(irq, dev);
96 + finish_clock = sched_clock();
97 +
98 + perf_sample_event_took(finish_clock - start_clock);
99 + return ret;
100 }
101
102 static void
103 diff --git a/arch/arm/mach-imx/devices/platform-ipu-core.c b/arch/arm/mach-imx/devices/platform-ipu-core.c
104 index fc4dd7cedc11..6bd7c3f37ac0 100644
105 --- a/arch/arm/mach-imx/devices/platform-ipu-core.c
106 +++ b/arch/arm/mach-imx/devices/platform-ipu-core.c
107 @@ -77,7 +77,7 @@ struct platform_device *__init imx_alloc_mx3_camera(
108
109 pdev = platform_device_alloc("mx3-camera", 0);
110 if (!pdev)
111 - goto err;
112 + return ERR_PTR(-ENOMEM);
113
114 pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
115 if (!pdev->dev.dma_mask)
116 diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
117 index 45cd26430d1f..da6d407c21cd 100644
118 --- a/arch/arm/mach-omap2/cclock3xxx_data.c
119 +++ b/arch/arm/mach-omap2/cclock3xxx_data.c
120 @@ -418,7 +418,8 @@ static struct clk_hw_omap dpll4_m5x2_ck_hw = {
121 .clkdm_name = "dpll4_clkdm",
122 };
123
124 -DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops);
125 +DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
126 + dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
127
128 static struct clk dpll4_m5x2_ck_3630 = {
129 .name = "dpll4_m5x2_ck",
130 diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
131 index c443f2e97e10..f98410a257e3 100644
132 --- a/arch/arm/mach-omap2/cpuidle44xx.c
133 +++ b/arch/arm/mach-omap2/cpuidle44xx.c
134 @@ -14,6 +14,7 @@
135 #include <linux/cpuidle.h>
136 #include <linux/cpu_pm.h>
137 #include <linux/export.h>
138 +#include <linux/clockchips.h>
139
140 #include <asm/cpuidle.h>
141 #include <asm/proc-fns.h>
142 @@ -80,6 +81,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
143 int index)
144 {
145 struct idle_statedata *cx = state_ptr + index;
146 + int cpu_id = smp_processor_id();
147
148 /*
149 * CPU0 has to wait and stay ON until CPU1 is OFF state.
150 @@ -104,6 +106,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
151 }
152 }
153
154 + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
155 +
156 /*
157 * Call idle CPU PM enter notifier chain so that
158 * VFP and per CPU interrupt context is saved.
159 @@ -147,6 +151,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
160 (cx->mpu_logic_state == PWRDM_POWER_OFF))
161 cpu_cluster_pm_exit();
162
163 + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
164 +
165 fail:
166 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
167 cpu_done[dev->cpu] = false;
168 @@ -154,6 +160,16 @@ fail:
169 return index;
170 }
171
172 +/*
173 + * For each cpu, setup the broadcast timer because local timers
174 + * stops for the states above C1.
175 + */
176 +static void omap_setup_broadcast_timer(void *arg)
177 +{
178 + int cpu = smp_processor_id();
179 + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
180 +}
181 +
182 static struct cpuidle_driver omap4_idle_driver = {
183 .name = "omap4_idle",
184 .owner = THIS_MODULE,
185 @@ -171,8 +187,7 @@ static struct cpuidle_driver omap4_idle_driver = {
186 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
187 .exit_latency = 328 + 440,
188 .target_residency = 960,
189 - .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
190 - CPUIDLE_FLAG_TIMER_STOP,
191 + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
192 .enter = omap_enter_idle_coupled,
193 .name = "C2",
194 .desc = "CPUx OFF, MPUSS CSWR",
195 @@ -181,8 +196,7 @@ static struct cpuidle_driver omap4_idle_driver = {
196 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
197 .exit_latency = 460 + 518,
198 .target_residency = 1100,
199 - .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
200 - CPUIDLE_FLAG_TIMER_STOP,
201 + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
202 .enter = omap_enter_idle_coupled,
203 .name = "C3",
204 .desc = "CPUx OFF, MPUSS OSWR",
205 @@ -213,5 +227,8 @@ int __init omap4_idle_init(void)
206 if (!cpu_clkdm[0] || !cpu_clkdm[1])
207 return -ENODEV;
208
209 + /* Configure the broadcast timer on each cpu */
210 + on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
211 +
212 return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
213 }
214 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
215 index a69b67d968d4..123d9e2271dc 100644
216 --- a/arch/x86/kernel/cpu/perf_event.c
217 +++ b/arch/x86/kernel/cpu/perf_event.c
218 @@ -1252,10 +1252,20 @@ void perf_events_lapic_init(void)
219 static int __kprobes
220 perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
221 {
222 + int ret;
223 + u64 start_clock;
224 + u64 finish_clock;
225 +
226 if (!atomic_read(&active_events))
227 return NMI_DONE;
228
229 - return x86_pmu.handle_irq(regs);
230 + start_clock = local_clock();
231 + ret = x86_pmu.handle_irq(regs);
232 + finish_clock = local_clock();
233 +
234 + perf_sample_event_took(finish_clock - start_clock);
235 +
236 + return ret;
237 }
238
239 struct event_constraint emptyconstraint;
240 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
241 index 117ce3813681..6416d0d07394 100644
242 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
243 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
244 @@ -635,9 +635,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
245 * relocations were valid.
246 */
247 for (j = 0; j < exec[i].relocation_count; j++) {
248 - if (copy_to_user(&user_relocs[j].presumed_offset,
249 - &invalid_offset,
250 - sizeof(invalid_offset))) {
251 + if (__copy_to_user(&user_relocs[j].presumed_offset,
252 + &invalid_offset,
253 + sizeof(invalid_offset))) {
254 ret = -EFAULT;
255 mutex_lock(&dev->struct_mutex);
256 goto err;
257 @@ -1151,18 +1151,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
258
259 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
260 if (!ret) {
261 + struct drm_i915_gem_exec_object __user *user_exec_list =
262 + to_user_ptr(args->buffers_ptr);
263 +
264 /* Copy the new buffer offsets back to the user's exec list. */
265 - for (i = 0; i < args->buffer_count; i++)
266 - exec_list[i].offset = exec2_list[i].offset;
267 - /* ... and back out to userspace */
268 - ret = copy_to_user(to_user_ptr(args->buffers_ptr),
269 - exec_list,
270 - sizeof(*exec_list) * args->buffer_count);
271 - if (ret) {
272 - ret = -EFAULT;
273 - DRM_DEBUG("failed to copy %d exec entries "
274 - "back to user (%d)\n",
275 - args->buffer_count, ret);
276 + for (i = 0; i < args->buffer_count; i++) {
277 + ret = __copy_to_user(&user_exec_list[i].offset,
278 + &exec2_list[i].offset,
279 + sizeof(user_exec_list[i].offset));
280 + if (ret) {
281 + ret = -EFAULT;
282 + DRM_DEBUG("failed to copy %d exec entries "
283 + "back to user (%d)\n",
284 + args->buffer_count, ret);
285 + break;
286 + }
287 }
288 }
289
290 @@ -1208,14 +1211,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
291 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
292 if (!ret) {
293 /* Copy the new buffer offsets back to the user's exec list. */
294 - ret = copy_to_user(to_user_ptr(args->buffers_ptr),
295 - exec2_list,
296 - sizeof(*exec2_list) * args->buffer_count);
297 - if (ret) {
298 - ret = -EFAULT;
299 - DRM_DEBUG("failed to copy %d exec entries "
300 - "back to user (%d)\n",
301 - args->buffer_count, ret);
302 + struct drm_i915_gem_exec_object2 *user_exec_list =
303 + to_user_ptr(args->buffers_ptr);
304 + int i;
305 +
306 + for (i = 0; i < args->buffer_count; i++) {
307 + ret = __copy_to_user(&user_exec_list[i].offset,
308 + &exec2_list[i].offset,
309 + sizeof(user_exec_list[i].offset));
310 + if (ret) {
311 + ret = -EFAULT;
312 + DRM_DEBUG("failed to copy %d exec entries "
313 + "back to user\n",
314 + args->buffer_count);
315 + break;
316 + }
317 }
318 }
319
320 diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
321 index 019eacd8a68f..9ee40042fa3a 100644
322 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
323 +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
324 @@ -679,7 +679,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
325 }
326
327 if (outp == 8)
328 - return false;
329 + return conf;
330
331 data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
332 if (data == 0x0000)
333 diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
334 index 061b227dae0c..b131520521e4 100644
335 --- a/drivers/gpu/drm/radeon/radeon_bios.c
336 +++ b/drivers/gpu/drm/radeon/radeon_bios.c
337 @@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
338 }
339 }
340
341 + if (!found) {
342 + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
343 + dhandle = ACPI_HANDLE(&pdev->dev);
344 + if (!dhandle)
345 + continue;
346 +
347 + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
348 + if (!ACPI_FAILURE(status)) {
349 + found = true;
350 + break;
351 + }
352 + }
353 + }
354 +
355 if (!found)
356 return false;
357
358 diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
359 index 1424ccde2377..f83727915787 100644
360 --- a/drivers/gpu/drm/radeon/radeon_object.c
361 +++ b/drivers/gpu/drm/radeon/radeon_object.c
362 @@ -582,22 +582,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
363 rbo = container_of(bo, struct radeon_bo, tbo);
364 radeon_bo_check_tiling(rbo, 0, 0);
365 rdev = rbo->rdev;
366 - if (bo->mem.mem_type == TTM_PL_VRAM) {
367 - size = bo->mem.num_pages << PAGE_SHIFT;
368 - offset = bo->mem.start << PAGE_SHIFT;
369 - if ((offset + size) > rdev->mc.visible_vram_size) {
370 - /* hurrah the memory is not visible ! */
371 - radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
372 - rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
373 - r = ttm_bo_validate(bo, &rbo->placement, false, false);
374 - if (unlikely(r != 0))
375 - return r;
376 - offset = bo->mem.start << PAGE_SHIFT;
377 - /* this should not happen */
378 - if ((offset + size) > rdev->mc.visible_vram_size)
379 - return -EINVAL;
380 - }
381 + if (bo->mem.mem_type != TTM_PL_VRAM)
382 + return 0;
383 +
384 + size = bo->mem.num_pages << PAGE_SHIFT;
385 + offset = bo->mem.start << PAGE_SHIFT;
386 + if ((offset + size) <= rdev->mc.visible_vram_size)
387 + return 0;
388 +
389 + /* hurrah the memory is not visible ! */
390 + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
391 + rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
392 + r = ttm_bo_validate(bo, &rbo->placement, false, false);
393 + if (unlikely(r == -ENOMEM)) {
394 + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
395 + return ttm_bo_validate(bo, &rbo->placement, false, false);
396 + } else if (unlikely(r != 0)) {
397 + return r;
398 }
399 +
400 + offset = bo->mem.start << PAGE_SHIFT;
401 + /* this should never happen */
402 + if ((offset + size) > rdev->mc.visible_vram_size)
403 + return -EINVAL;
404 +
405 return 0;
406 }
407
408 diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
409 index df064e8cd9dc..f25f29835b3e 100644
410 --- a/drivers/hwmon/Kconfig
411 +++ b/drivers/hwmon/Kconfig
412 @@ -944,7 +944,7 @@ config SENSORS_NCT6775
413
414 config SENSORS_NTC_THERMISTOR
415 tristate "NTC thermistor support"
416 - depends on (!OF && !IIO) || (OF && IIO)
417 + depends on !OF || IIO=n || IIO
418 help
419 This driver supports NTC thermistors sensor reading and its
420 interpretation. The driver can also monitor the temperature and
421 diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
422 index 9297164a23a5..c64d3d497c50 100644
423 --- a/drivers/hwmon/ntc_thermistor.c
424 +++ b/drivers/hwmon/ntc_thermistor.c
425 @@ -44,6 +44,7 @@ struct ntc_compensation {
426 unsigned int ohm;
427 };
428
429 +/* Order matters, ntc_match references the entries by index */
430 static const struct platform_device_id ntc_thermistor_id[] = {
431 { "ncp15wb473", TYPE_NCPXXWB473 },
432 { "ncp18wb473", TYPE_NCPXXWB473 },
433 @@ -141,7 +142,7 @@ struct ntc_data {
434 char name[PLATFORM_NAME_SIZE];
435 };
436
437 -#ifdef CONFIG_OF
438 +#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
439 static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
440 {
441 struct iio_channel *channel = pdata->chan;
442 @@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
443
444 static const struct of_device_id ntc_match[] = {
445 { .compatible = "ntc,ncp15wb473",
446 - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
447 + .data = &ntc_thermistor_id[0] },
448 { .compatible = "ntc,ncp18wb473",
449 - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
450 + .data = &ntc_thermistor_id[1] },
451 { .compatible = "ntc,ncp21wb473",
452 - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
453 + .data = &ntc_thermistor_id[2] },
454 { .compatible = "ntc,ncp03wb473",
455 - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
456 + .data = &ntc_thermistor_id[3] },
457 { .compatible = "ntc,ncp15wl333",
458 - .data = &ntc_thermistor_id[TYPE_NCPXXWL333] },
459 + .data = &ntc_thermistor_id[4] },
460 { },
461 };
462 MODULE_DEVICE_TABLE(of, ntc_match);
463 @@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
464 return NULL;
465 }
466
467 +#define ntc_match NULL
468 +
469 static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
470 { }
471 #endif
472 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
473 index 988e29d18bb4..bae20f8bb034 100644
474 --- a/drivers/infiniband/ulp/isert/ib_isert.c
475 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
476 @@ -965,6 +965,8 @@ sequence_cmd:
477
478 if (!rc && dump_payload == false && unsol_data)
479 iscsit_set_unsoliticed_dataout(cmd);
480 + else if (dump_payload && imm_data)
481 + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
482
483 return 0;
484 }
485 diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
486 index 1a75869d3a82..677973641d2b 100644
487 --- a/drivers/md/dm-cache-target.c
488 +++ b/drivers/md/dm-cache-target.c
489 @@ -1954,6 +1954,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
490 ti->num_discard_bios = 1;
491 ti->discards_supported = true;
492 ti->discard_zeroes_data_unsupported = true;
493 + /* Discard bios must be split on a block boundary */
494 + ti->split_discard_bios = true;
495
496 cache->features = ca->features;
497 ti->per_bio_data_size = get_per_bio_data_size(cache);
498 diff --git a/drivers/md/md.c b/drivers/md/md.c
499 index 00a99fe797d4..963fa59be9b3 100644
500 --- a/drivers/md/md.c
501 +++ b/drivers/md/md.c
502 @@ -7338,8 +7338,10 @@ void md_do_sync(struct md_thread *thread)
503 /* just incase thread restarts... */
504 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
505 return;
506 - if (mddev->ro) /* never try to sync a read-only array */
507 + if (mddev->ro) {/* never try to sync a read-only array */
508 + set_bit(MD_RECOVERY_INTR, &mddev->recovery);
509 return;
510 + }
511
512 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
513 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
514 @@ -7788,6 +7790,7 @@ void md_check_recovery(struct mddev *mddev)
515 /* There is no thread, but we need to call
516 * ->spare_active and clear saved_raid_disk
517 */
518 + set_bit(MD_RECOVERY_INTR, &mddev->recovery);
519 md_reap_sync_thread(mddev);
520 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
521 goto unlock;
522 diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
523 index d067ef70e194..5e80d428e544 100644
524 --- a/drivers/staging/comedi/drivers/ni_daq_700.c
525 +++ b/drivers/staging/comedi/drivers/ni_daq_700.c
526 @@ -127,6 +127,8 @@ static int daq700_ai_rinsn(struct comedi_device *dev,
527 /* write channel to multiplexer */
528 /* set mask scan bit high to disable scanning */
529 outb(chan | 0x80, dev->iobase + CMD_R1);
530 + /* mux needs 2us to really settle [Fred Brooks]. */
531 + udelay(2);
532
533 /* convert n samples */
534 for (n = 0; n < insn->n; n++) {
535 diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
536 index 6c7b55c2947d..e70a48e3b376 100644
537 --- a/drivers/staging/speakup/main.c
538 +++ b/drivers/staging/speakup/main.c
539 @@ -2219,6 +2219,7 @@ static void __exit speakup_exit(void)
540 unregister_keyboard_notifier(&keyboard_notifier_block);
541 unregister_vt_notifier(&vt_notifier_block);
542 speakup_unregister_devsynth();
543 + speakup_cancel_paste();
544 del_timer(&cursor_timer);
545 kthread_stop(speakup_task);
546 speakup_task = NULL;
547 diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
548 index f0fb00392d6b..f67941e78e4a 100644
549 --- a/drivers/staging/speakup/selection.c
550 +++ b/drivers/staging/speakup/selection.c
551 @@ -4,6 +4,8 @@
552 #include <linux/sched.h>
553 #include <linux/device.h> /* for dev_warn */
554 #include <linux/selection.h>
555 +#include <linux/workqueue.h>
556 +#include <asm/cmpxchg.h>
557
558 #include "speakup.h"
559
560 @@ -121,20 +123,24 @@ int speakup_set_selection(struct tty_struct *tty)
561 return 0;
562 }
563
564 -/* TODO: move to some helper thread, probably. That'd fix having to check for
565 - * in_atomic(). */
566 -int speakup_paste_selection(struct tty_struct *tty)
567 +struct speakup_paste_work {
568 + struct work_struct work;
569 + struct tty_struct *tty;
570 +};
571 +
572 +static void __speakup_paste_selection(struct work_struct *work)
573 {
574 + struct speakup_paste_work *spw =
575 + container_of(work, struct speakup_paste_work, work);
576 + struct tty_struct *tty = xchg(&spw->tty, NULL);
577 struct vc_data *vc = (struct vc_data *) tty->driver_data;
578 int pasted = 0, count;
579 DECLARE_WAITQUEUE(wait, current);
580 +
581 add_wait_queue(&vc->paste_wait, &wait);
582 while (sel_buffer && sel_buffer_lth > pasted) {
583 set_current_state(TASK_INTERRUPTIBLE);
584 if (test_bit(TTY_THROTTLED, &tty->flags)) {
585 - if (in_atomic())
586 - /* if we are in an interrupt handler, abort */
587 - break;
588 schedule();
589 continue;
590 }
591 @@ -146,6 +152,26 @@ int speakup_paste_selection(struct tty_struct *tty)
592 }
593 remove_wait_queue(&vc->paste_wait, &wait);
594 current->state = TASK_RUNNING;
595 + tty_kref_put(tty);
596 +}
597 +
598 +static struct speakup_paste_work speakup_paste_work = {
599 + .work = __WORK_INITIALIZER(speakup_paste_work.work,
600 + __speakup_paste_selection)
601 +};
602 +
603 +int speakup_paste_selection(struct tty_struct *tty)
604 +{
605 + if (cmpxchg(&speakup_paste_work.tty, NULL, tty) != NULL)
606 + return -EBUSY;
607 +
608 + tty_kref_get(tty);
609 + schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work);
610 return 0;
611 }
612
613 +void speakup_cancel_paste(void)
614 +{
615 + cancel_work_sync(&speakup_paste_work.work);
616 + tty_kref_put(speakup_paste_work.tty);
617 +}
618 diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
619 index 0126f714821a..74fe72429b2d 100644
620 --- a/drivers/staging/speakup/speakup.h
621 +++ b/drivers/staging/speakup/speakup.h
622 @@ -77,6 +77,7 @@ extern void synth_buffer_clear(void);
623 extern void speakup_clear_selection(void);
624 extern int speakup_set_selection(struct tty_struct *tty);
625 extern int speakup_paste_selection(struct tty_struct *tty);
626 +extern void speakup_cancel_paste(void);
627 extern void speakup_register_devsynth(void);
628 extern void speakup_unregister_devsynth(void);
629 extern void synth_write(const char *buf, size_t count);
630 diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
631 index 4ed35231e552..2cdd5079ae78 100644
632 --- a/drivers/usb/core/driver.c
633 +++ b/drivers/usb/core/driver.c
634 @@ -1754,10 +1754,13 @@ int usb_runtime_suspend(struct device *dev)
635 if (status == -EAGAIN || status == -EBUSY)
636 usb_mark_last_busy(udev);
637
638 - /* The PM core reacts badly unless the return code is 0,
639 - * -EAGAIN, or -EBUSY, so always return -EBUSY on an error.
640 + /*
641 + * The PM core reacts badly unless the return code is 0,
642 + * -EAGAIN, or -EBUSY, so always return -EBUSY on an error
643 + * (except for root hubs, because they don't suspend through
644 + * an upstream port like other USB devices).
645 */
646 - if (status != 0)
647 + if (status != 0 && udev->parent)
648 return -EBUSY;
649 return status;
650 }
651 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
652 index a036e03ae1b3..46efdca96952 100644
653 --- a/drivers/usb/core/hub.c
654 +++ b/drivers/usb/core/hub.c
655 @@ -1683,8 +1683,19 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
656 */
657 pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
658
659 - /* Hubs have proper suspend/resume support. */
660 - usb_enable_autosuspend(hdev);
661 + /*
662 + * Hubs have proper suspend/resume support, except for root hubs
663 + * where the controller driver doesn't have bus_suspend and
664 + * bus_resume methods.
665 + */
666 + if (hdev->parent) { /* normal device */
667 + usb_enable_autosuspend(hdev);
668 + } else { /* root hub */
669 + const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver;
670 +
671 + if (drv->bus_suspend && drv->bus_resume)
672 + usb_enable_autosuspend(hdev);
673 + }
674
675 if (hdev->level == MAX_TOPO_LEVEL) {
676 dev_err(&intf->dev,
677 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
678 index f2e57a1112c9..d007f0920126 100644
679 --- a/drivers/usb/host/xhci-mem.c
680 +++ b/drivers/usb/host/xhci-mem.c
681 @@ -1794,6 +1794,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
682 kfree(cur_cd);
683 }
684
685 + num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
686 + for (i = 0; i < num_ports; i++) {
687 + struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
688 + for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
689 + struct list_head *ep = &bwt->interval_bw[j].endpoints;
690 + while (!list_empty(ep))
691 + list_del_init(ep->next);
692 + }
693 + }
694 +
695 for (i = 1; i < MAX_HC_SLOTS; ++i)
696 xhci_free_virt_device(xhci, i);
697
698 @@ -1834,16 +1844,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
699 if (!xhci->rh_bw)
700 goto no_bw;
701
702 - num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
703 - for (i = 0; i < num_ports; i++) {
704 - struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
705 - for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
706 - struct list_head *ep = &bwt->interval_bw[j].endpoints;
707 - while (!list_empty(ep))
708 - list_del_init(ep->next);
709 - }
710 - }
711 -
712 for (i = 0; i < num_ports; i++) {
713 struct xhci_tt_bw_info *tt, *n;
714 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
715 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
716 index 2c635bd9c185..b9e663ac9a35 100644
717 --- a/drivers/usb/serial/ftdi_sio.c
718 +++ b/drivers/usb/serial/ftdi_sio.c
719 @@ -583,6 +583,8 @@ static struct usb_device_id id_table_combined [] = {
720 { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
721 { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
722 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
723 + { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
724 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
725 /*
726 * ELV devices:
727 */
728 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
729 index 993c93df6874..500474c48f4b 100644
730 --- a/drivers/usb/serial/ftdi_sio_ids.h
731 +++ b/drivers/usb/serial/ftdi_sio_ids.h
732 @@ -538,6 +538,11 @@
733 */
734 #define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */
735
736 +/*
737 + * NovaTech product ids (FTDI_VID)
738 + */
739 +#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
740 +
741
742 /********************************/
743 /** third-party VID/PID combos **/
744 diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
745 index c5c9cbf107d1..8cd6479a8b43 100644
746 --- a/drivers/usb/serial/io_ti.c
747 +++ b/drivers/usb/serial/io_ti.c
748 @@ -835,7 +835,7 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
749 firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data;
750
751 i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK;
752 - i2c_header->Size = (__u16)buffer_size;
753 + i2c_header->Size = cpu_to_le16(buffer_size);
754 i2c_header->CheckSum = cs;
755 firmware_rec->Ver_Major = OperationalMajorVersion;
756 firmware_rec->Ver_Minor = OperationalMinorVersion;
757 diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
758 index 51f83fbb73bb..6f6a856bc37c 100644
759 --- a/drivers/usb/serial/io_usbvend.h
760 +++ b/drivers/usb/serial/io_usbvend.h
761 @@ -594,7 +594,7 @@ struct edge_boot_descriptor {
762
763 struct ti_i2c_desc {
764 __u8 Type; // Type of descriptor
765 - __u16 Size; // Size of data only not including header
766 + __le16 Size; // Size of data only not including header
767 __u8 CheckSum; // Checksum (8 bit sum of data only)
768 __u8 Data[0]; // Data starts here
769 } __attribute__((packed));
770 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
771 index f213ee978516..948a19f0cdf7 100644
772 --- a/drivers/usb/serial/option.c
773 +++ b/drivers/usb/serial/option.c
774 @@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb);
775 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
776 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
777 #define NOVATELWIRELESS_PRODUCT_E362 0x9010
778 +#define NOVATELWIRELESS_PRODUCT_E371 0x9011
779 #define NOVATELWIRELESS_PRODUCT_G2 0xA010
780 #define NOVATELWIRELESS_PRODUCT_MC551 0xB001
781
782 @@ -1012,6 +1013,7 @@ static const struct usb_device_id option_ids[] = {
783 /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
784 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
785 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
786 + { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
787
788 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
789 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
790 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
791 index c5b6dbf9c2fc..229a757e1c13 100644
792 --- a/include/linux/perf_event.h
793 +++ b/include/linux/perf_event.h
794 @@ -695,10 +695,17 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
795 extern int sysctl_perf_event_paranoid;
796 extern int sysctl_perf_event_mlock;
797 extern int sysctl_perf_event_sample_rate;
798 +extern int sysctl_perf_cpu_time_max_percent;
799 +
800 +extern void perf_sample_event_took(u64 sample_len_ns);
801
802 extern int perf_proc_update_handler(struct ctl_table *table, int write,
803 void __user *buffer, size_t *lenp,
804 loff_t *ppos);
805 +extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
806 + void __user *buffer, size_t *lenp,
807 + loff_t *ppos);
808 +
809
810 static inline bool perf_paranoid_tracepoint_raw(void)
811 {
812 diff --git a/include/uapi/linux/usb/Kbuild b/include/uapi/linux/usb/Kbuild
813 index 6cb4ea826834..4cc4d6e7e523 100644
814 --- a/include/uapi/linux/usb/Kbuild
815 +++ b/include/uapi/linux/usb/Kbuild
816 @@ -1,6 +1,7 @@
817 # UAPI Header export list
818 header-y += audio.h
819 header-y += cdc.h
820 +header-y += cdc-wdm.h
821 header-y += ch11.h
822 header-y += ch9.h
823 header-y += functionfs.h
824 diff --git a/include/uapi/linux/usb/cdc-wdm.h b/include/uapi/linux/usb/cdc-wdm.h
825 index f03134feebd6..0dc132e75030 100644
826 --- a/include/uapi/linux/usb/cdc-wdm.h
827 +++ b/include/uapi/linux/usb/cdc-wdm.h
828 @@ -9,6 +9,8 @@
829 #ifndef _UAPI__LINUX_USB_CDC_WDM_H
830 #define _UAPI__LINUX_USB_CDC_WDM_H
831
832 +#include <linux/types.h>
833 +
834 /*
835 * This IOCTL is used to retrieve the wMaxCommand for the device,
836 * defining the message limit for both reading and writing.
837 diff --git a/kernel/cpu.c b/kernel/cpu.c
838 index 198a38883e64..bc255e25d5dd 100644
839 --- a/kernel/cpu.c
840 +++ b/kernel/cpu.c
841 @@ -698,10 +698,12 @@ void set_cpu_present(unsigned int cpu, bool present)
842
843 void set_cpu_online(unsigned int cpu, bool online)
844 {
845 - if (online)
846 + if (online) {
847 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
848 - else
849 + cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
850 + } else {
851 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
852 + }
853 }
854
855 void set_cpu_active(unsigned int cpu, bool active)
856 diff --git a/kernel/events/core.c b/kernel/events/core.c
857 index ac9b8cce3df2..459b94c94721 100644
858 --- a/kernel/events/core.c
859 +++ b/kernel/events/core.c
860 @@ -165,25 +165,109 @@ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free'
861 /*
862 * max perf event sample rate
863 */
864 -#define DEFAULT_MAX_SAMPLE_RATE 100000
865 -int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
866 -static int max_samples_per_tick __read_mostly =
867 - DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
868 +#define DEFAULT_MAX_SAMPLE_RATE 100000
869 +#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
870 +#define DEFAULT_CPU_TIME_MAX_PERCENT 25
871 +
872 +int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
873 +
874 +static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
875 +static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
876 +
877 +static atomic_t perf_sample_allowed_ns __read_mostly =
878 + ATOMIC_INIT( DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100);
879 +
880 +void update_perf_cpu_limits(void)
881 +{
882 + u64 tmp = perf_sample_period_ns;
883 +
884 + tmp *= sysctl_perf_cpu_time_max_percent;
885 + do_div(tmp, 100);
886 + atomic_set(&perf_sample_allowed_ns, tmp);
887 +}
888
889 int perf_proc_update_handler(struct ctl_table *table, int write,
890 void __user *buffer, size_t *lenp,
891 loff_t *ppos)
892 {
893 - int ret = proc_dointvec(table, write, buffer, lenp, ppos);
894 + int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
895
896 if (ret || !write)
897 return ret;
898
899 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
900 + perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
901 + update_perf_cpu_limits();
902 +
903 + return 0;
904 +}
905 +
906 +int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
907 +
908 +int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
909 + void __user *buffer, size_t *lenp,
910 + loff_t *ppos)
911 +{
912 + int ret = proc_dointvec(table, write, buffer, lenp, ppos);
913 +
914 + if (ret || !write)
915 + return ret;
916 +
917 + update_perf_cpu_limits();
918
919 return 0;
920 }
921
922 +/*
923 + * perf samples are done in some very critical code paths (NMIs).
924 + * If they take too much CPU time, the system can lock up and not
925 + * get any real work done. This will drop the sample rate when
926 + * we detect that events are taking too long.
927 + */
928 +#define NR_ACCUMULATED_SAMPLES 128
929 +DEFINE_PER_CPU(u64, running_sample_length);
930 +
931 +void perf_sample_event_took(u64 sample_len_ns)
932 +{
933 + u64 avg_local_sample_len;
934 + u64 local_samples_len;
935 +
936 + if (atomic_read(&perf_sample_allowed_ns) == 0)
937 + return;
938 +
939 + /* decay the counter by 1 average sample */
940 + local_samples_len = __get_cpu_var(running_sample_length);
941 + local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
942 + local_samples_len += sample_len_ns;
943 + __get_cpu_var(running_sample_length) = local_samples_len;
944 +
945 + /*
946 + * note: this will be biased artifically low until we have
947 + * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
948 + * from having to maintain a count.
949 + */
950 + avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
951 +
952 + if (avg_local_sample_len <= atomic_read(&perf_sample_allowed_ns))
953 + return;
954 +
955 + if (max_samples_per_tick <= 1)
956 + return;
957 +
958 + max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
959 + sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
960 + perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
961 +
962 + printk_ratelimited(KERN_WARNING
963 + "perf samples too long (%lld > %d), lowering "
964 + "kernel.perf_event_max_sample_rate to %d\n",
965 + avg_local_sample_len,
966 + atomic_read(&perf_sample_allowed_ns),
967 + sysctl_perf_event_sample_rate);
968 +
969 + update_perf_cpu_limits();
970 +}
971 +
972 static atomic64_t perf_event_id;
973
974 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
975 @@ -1237,6 +1321,11 @@ group_sched_out(struct perf_event *group_event,
976 cpuctx->exclusive = 0;
977 }
978
979 +struct remove_event {
980 + struct perf_event *event;
981 + bool detach_group;
982 +};
983 +
984 /*
985 * Cross CPU call to remove a performance event
986 *
987 @@ -1245,12 +1334,15 @@ group_sched_out(struct perf_event *group_event,
988 */
989 static int __perf_remove_from_context(void *info)
990 {
991 - struct perf_event *event = info;
992 + struct remove_event *re = info;
993 + struct perf_event *event = re->event;
994 struct perf_event_context *ctx = event->ctx;
995 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
996
997 raw_spin_lock(&ctx->lock);
998 event_sched_out(event, cpuctx, ctx);
999 + if (re->detach_group)
1000 + perf_group_detach(event);
1001 list_del_event(event, ctx);
1002 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1003 ctx->is_active = 0;
1004 @@ -1275,10 +1367,14 @@ static int __perf_remove_from_context(void *info)
1005 * When called from perf_event_exit_task, it's OK because the
1006 * context has been detached from its task.
1007 */
1008 -static void perf_remove_from_context(struct perf_event *event)
1009 +static void perf_remove_from_context(struct perf_event *event, bool detach_group)
1010 {
1011 struct perf_event_context *ctx = event->ctx;
1012 struct task_struct *task = ctx->task;
1013 + struct remove_event re = {
1014 + .event = event,
1015 + .detach_group = detach_group,
1016 + };
1017
1018 lockdep_assert_held(&ctx->mutex);
1019
1020 @@ -1287,12 +1383,12 @@ static void perf_remove_from_context(struct perf_event *event)
1021 * Per cpu events are removed via an smp call and
1022 * the removal is always successful.
1023 */
1024 - cpu_function_call(event->cpu, __perf_remove_from_context, event);
1025 + cpu_function_call(event->cpu, __perf_remove_from_context, &re);
1026 return;
1027 }
1028
1029 retry:
1030 - if (!task_function_call(task, __perf_remove_from_context, event))
1031 + if (!task_function_call(task, __perf_remove_from_context, &re))
1032 return;
1033
1034 raw_spin_lock_irq(&ctx->lock);
1035 @@ -1309,6 +1405,8 @@ retry:
1036 * Since the task isn't running, its safe to remove the event, us
1037 * holding the ctx->lock ensures the task won't get scheduled in.
1038 */
1039 + if (detach_group)
1040 + perf_group_detach(event);
1041 list_del_event(event, ctx);
1042 raw_spin_unlock_irq(&ctx->lock);
1043 }
1044 @@ -3015,10 +3113,7 @@ int perf_event_release_kernel(struct perf_event *event)
1045 * to trigger the AB-BA case.
1046 */
1047 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
1048 - raw_spin_lock_irq(&ctx->lock);
1049 - perf_group_detach(event);
1050 - raw_spin_unlock_irq(&ctx->lock);
1051 - perf_remove_from_context(event);
1052 + perf_remove_from_context(event, true);
1053 mutex_unlock(&ctx->mutex);
1054
1055 free_event(event);
1056 @@ -5044,6 +5139,9 @@ struct swevent_htable {
1057
1058 /* Recursion avoidance in each contexts */
1059 int recursion[PERF_NR_CONTEXTS];
1060 +
1061 + /* Keeps track of cpu being initialized/exited */
1062 + bool online;
1063 };
1064
1065 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
1066 @@ -5290,8 +5388,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
1067 hwc->state = !(flags & PERF_EF_START);
1068
1069 head = find_swevent_head(swhash, event);
1070 - if (WARN_ON_ONCE(!head))
1071 + if (!head) {
1072 + /*
1073 + * We can race with cpu hotplug code. Do not
1074 + * WARN if the cpu just got unplugged.
1075 + */
1076 + WARN_ON_ONCE(swhash->online);
1077 return -EINVAL;
1078 + }
1079
1080 hlist_add_head_rcu(&event->hlist_entry, head);
1081
1082 @@ -6581,6 +6685,9 @@ SYSCALL_DEFINE5(perf_event_open,
1083 if (attr.freq) {
1084 if (attr.sample_freq > sysctl_perf_event_sample_rate)
1085 return -EINVAL;
1086 + } else {
1087 + if (attr.sample_period & (1ULL << 63))
1088 + return -EINVAL;
1089 }
1090
1091 /*
1092 @@ -6727,7 +6834,7 @@ SYSCALL_DEFINE5(perf_event_open,
1093 struct perf_event_context *gctx = group_leader->ctx;
1094
1095 mutex_lock(&gctx->mutex);
1096 - perf_remove_from_context(group_leader);
1097 + perf_remove_from_context(group_leader, false);
1098
1099 /*
1100 * Removing from the context ends up with disabled
1101 @@ -6737,7 +6844,7 @@ SYSCALL_DEFINE5(perf_event_open,
1102 perf_event__state_init(group_leader);
1103 list_for_each_entry(sibling, &group_leader->sibling_list,
1104 group_entry) {
1105 - perf_remove_from_context(sibling);
1106 + perf_remove_from_context(sibling, false);
1107 perf_event__state_init(sibling);
1108 put_ctx(gctx);
1109 }
1110 @@ -6867,7 +6974,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
1111 mutex_lock(&src_ctx->mutex);
1112 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
1113 event_entry) {
1114 - perf_remove_from_context(event);
1115 + perf_remove_from_context(event, false);
1116 put_ctx(src_ctx);
1117 list_add(&event->event_entry, &events);
1118 }
1119 @@ -6927,13 +7034,7 @@ __perf_event_exit_task(struct perf_event *child_event,
1120 struct perf_event_context *child_ctx,
1121 struct task_struct *child)
1122 {
1123 - if (child_event->parent) {
1124 - raw_spin_lock_irq(&child_ctx->lock);
1125 - perf_group_detach(child_event);
1126 - raw_spin_unlock_irq(&child_ctx->lock);
1127 - }
1128 -
1129 - perf_remove_from_context(child_event);
1130 + perf_remove_from_context(child_event, !!child_event->parent);
1131
1132 /*
1133 * It can happen that the parent exits first, and has events
1134 @@ -7395,6 +7496,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
1135 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
1136
1137 mutex_lock(&swhash->hlist_mutex);
1138 + swhash->online = true;
1139 if (swhash->hlist_refcount > 0) {
1140 struct swevent_hlist *hlist;
1141
1142 @@ -7417,14 +7519,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
1143
1144 static void __perf_event_exit_context(void *__info)
1145 {
1146 + struct remove_event re = { .detach_group = false };
1147 struct perf_event_context *ctx = __info;
1148 - struct perf_event *event;
1149
1150 perf_pmu_rotate_stop(ctx->pmu);
1151
1152 rcu_read_lock();
1153 - list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
1154 - __perf_remove_from_context(event);
1155 + list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
1156 + __perf_remove_from_context(&re);
1157 rcu_read_unlock();
1158 }
1159
1160 @@ -7452,6 +7554,7 @@ static void perf_event_exit_cpu(int cpu)
1161 perf_event_exit_cpu_context(cpu);
1162
1163 mutex_lock(&swhash->hlist_mutex);
1164 + swhash->online = false;
1165 swevent_hlist_release(swhash);
1166 mutex_unlock(&swhash->hlist_mutex);
1167 }
1168 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1169 index b4308d7da339..2672eca82a2b 100644
1170 --- a/kernel/sched/core.c
1171 +++ b/kernel/sched/core.c
1172 @@ -5270,7 +5270,6 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
1173 unsigned long action, void *hcpu)
1174 {
1175 switch (action & ~CPU_TASKS_FROZEN) {
1176 - case CPU_STARTING:
1177 case CPU_DOWN_FAILED:
1178 set_cpu_active((long)hcpu, true);
1179 return NOTIFY_OK;
1180 diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
1181 index 1095e878a46f..b3f0a2783369 100644
1182 --- a/kernel/sched/cpupri.c
1183 +++ b/kernel/sched/cpupri.c
1184 @@ -70,8 +70,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
1185 int idx = 0;
1186 int task_pri = convert_prio(p->prio);
1187
1188 - if (task_pri >= MAX_RT_PRIO)
1189 - return 0;
1190 + BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
1191
1192 for (idx = 0; idx < task_pri; idx++) {
1193 struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
1194 diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
1195 index 1101d92635c3..c23a8fd36149 100644
1196 --- a/kernel/sched/cputime.c
1197 +++ b/kernel/sched/cputime.c
1198 @@ -326,50 +326,50 @@ out:
1199 * softirq as those do not count in task exec_runtime any more.
1200 */
1201 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
1202 - struct rq *rq)
1203 + struct rq *rq, int ticks)
1204 {
1205 - cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
1206 + cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
1207 + u64 cputime = (__force u64) cputime_one_jiffy;
1208 u64 *cpustat = kcpustat_this_cpu->cpustat;
1209
1210 if (steal_account_process_tick())
1211 return;
1212
1213 + cputime *= ticks;
1214 + scaled *= ticks;
1215 +
1216 if (irqtime_account_hi_update()) {
1217 - cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
1218 + cpustat[CPUTIME_IRQ] += cputime;
1219 } else if (irqtime_account_si_update()) {
1220 - cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
1221 + cpustat[CPUTIME_SOFTIRQ] += cputime;
1222 } else if (this_cpu_ksoftirqd() == p) {
1223 /*
1224 * ksoftirqd time do not get accounted in cpu_softirq_time.
1225 * So, we have to handle it separately here.
1226 * Also, p->stime needs to be updated for ksoftirqd.
1227 */
1228 - __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
1229 - CPUTIME_SOFTIRQ);
1230 + __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
1231 } else if (user_tick) {
1232 - account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
1233 + account_user_time(p, cputime, scaled);
1234 } else if (p == rq->idle) {
1235 - account_idle_time(cputime_one_jiffy);
1236 + account_idle_time(cputime);
1237 } else if (p->flags & PF_VCPU) { /* System time or guest time */
1238 - account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
1239 + account_guest_time(p, cputime, scaled);
1240 } else {
1241 - __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
1242 - CPUTIME_SYSTEM);
1243 + __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
1244 }
1245 }
1246
1247 static void irqtime_account_idle_ticks(int ticks)
1248 {
1249 - int i;
1250 struct rq *rq = this_rq();
1251
1252 - for (i = 0; i < ticks; i++)
1253 - irqtime_account_process_tick(current, 0, rq);
1254 + irqtime_account_process_tick(current, 0, rq, ticks);
1255 }
1256 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
1257 static inline void irqtime_account_idle_ticks(int ticks) {}
1258 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
1259 - struct rq *rq) {}
1260 + struct rq *rq, int nr_ticks) {}
1261 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1262
1263 /*
1264 @@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
1265 return;
1266
1267 if (sched_clock_irqtime) {
1268 - irqtime_account_process_tick(p, user_tick, rq);
1269 + irqtime_account_process_tick(p, user_tick, rq, 1);
1270 return;
1271 }
1272
1273 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
1274 index ed6c01626acd..9469f4c61a30 100644
1275 --- a/kernel/sysctl.c
1276 +++ b/kernel/sysctl.c
1277 @@ -1049,6 +1049,16 @@ static struct ctl_table kern_table[] = {
1278 .maxlen = sizeof(sysctl_perf_event_sample_rate),
1279 .mode = 0644,
1280 .proc_handler = perf_proc_update_handler,
1281 + .extra1 = &one,
1282 + },
1283 + {
1284 + .procname = "perf_cpu_time_max_percent",
1285 + .data = &sysctl_perf_cpu_time_max_percent,
1286 + .maxlen = sizeof(sysctl_perf_cpu_time_max_percent),
1287 + .mode = 0644,
1288 + .proc_handler = perf_cpu_time_max_percent_handler,
1289 + .extra1 = &zero,
1290 + .extra2 = &one_hundred,
1291 },
1292 #endif
1293 #ifdef CONFIG_KMEMCHECK
1294 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
1295 index 4254eb021583..4f8548abd6ee 100644
1296 --- a/mm/memory-failure.c
1297 +++ b/mm/memory-failure.c
1298 @@ -1153,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1299 */
1300 if (!PageHWPoison(p)) {
1301 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
1302 + atomic_long_sub(nr_pages, &num_poisoned_pages);
1303 + put_page(hpage);
1304 res = 0;
1305 goto out;
1306 }
1307 diff --git a/mm/rmap.c b/mm/rmap.c
1308 index fbf0040a7342..b730a4409be6 100644
1309 --- a/mm/rmap.c
1310 +++ b/mm/rmap.c
1311 @@ -1675,10 +1675,9 @@ void __put_anon_vma(struct anon_vma *anon_vma)
1312 {
1313 struct anon_vma *root = anon_vma->root;
1314
1315 + anon_vma_free(anon_vma);
1316 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1317 anon_vma_free(root);
1318 -
1319 - anon_vma_free(anon_vma);
1320 }
1321
1322 #ifdef CONFIG_MIGRATION
1323 diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
1324 index 95f3f1da0d7f..d38e6a8d8b9f 100644
1325 --- a/net/ipv6/netfilter.c
1326 +++ b/net/ipv6/netfilter.c
1327 @@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
1328 .daddr = iph->daddr,
1329 .saddr = iph->saddr,
1330 };
1331 + int err;
1332
1333 dst = ip6_route_output(net, skb->sk, &fl6);
1334 - if (dst->error) {
1335 + err = dst->error;
1336 + if (err) {
1337 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
1338 LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
1339 dst_release(dst);
1340 - return dst->error;
1341 + return err;
1342 }
1343
1344 /* Drop old route. */
1345 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1346 index 7de7cf1ec852..0923f09df503 100644
1347 --- a/sound/pci/hda/patch_realtek.c
1348 +++ b/sound/pci/hda/patch_realtek.c
1349 @@ -1590,12 +1590,10 @@ static const struct hda_fixup alc260_fixups[] = {
1350 [ALC260_FIXUP_COEF] = {
1351 .type = HDA_FIXUP_VERBS,
1352 .v.verbs = (const struct hda_verb[]) {
1353 - { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
1354 - { 0x20, AC_VERB_SET_PROC_COEF, 0x3040 },
1355 + { 0x1a, AC_VERB_SET_COEF_INDEX, 0x07 },
1356 + { 0x1a, AC_VERB_SET_PROC_COEF, 0x3040 },
1357 { }
1358 },
1359 - .chained = true,
1360 - .chain_id = ALC260_FIXUP_HP_PIN_0F,
1361 },
1362 [ALC260_FIXUP_GPIO1] = {
1363 .type = HDA_FIXUP_VERBS,
1364 @@ -1610,8 +1608,8 @@ static const struct hda_fixup alc260_fixups[] = {
1365 [ALC260_FIXUP_REPLACER] = {
1366 .type = HDA_FIXUP_VERBS,
1367 .v.verbs = (const struct hda_verb[]) {
1368 - { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
1369 - { 0x20, AC_VERB_SET_PROC_COEF, 0x3050 },
1370 + { 0x1a, AC_VERB_SET_COEF_INDEX, 0x07 },
1371 + { 0x1a, AC_VERB_SET_PROC_COEF, 0x3050 },
1372 { }
1373 },
1374 .chained = true,
1375 diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
1376 index 07b1a3ad3e24..63b6f8c8edf2 100644
1377 --- a/tools/perf/util/evsel.c
1378 +++ b/tools/perf/util/evsel.c
1379 @@ -1514,7 +1514,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel,
1380 switch (err) {
1381 case EPERM:
1382 case EACCES:
1383 - return scnprintf(msg, size, "%s",
1384 + return scnprintf(msg, size,
1385 "You may not have permission to collect %sstats.\n"
1386 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
1387 " -1 - Not paranoid at all\n"