Magellan Linux

Annotation of /trunk/kernel26-alx/patches-3.10/0129-3.10.30-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2672 - (hide annotations) (download)
Tue Jul 21 16:46:35 2015 UTC (8 years, 10 months ago) by niro
File size: 113447 byte(s)
-3.10.84-alx-r1
1 niro 2672 diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
2     index fd8d0d594fc7..954eab8c7fec 100644
3     --- a/Documentation/filesystems/proc.txt
4     +++ b/Documentation/filesystems/proc.txt
5     @@ -1372,8 +1372,8 @@ may allocate from based on an estimation of its current memory and swap use.
6     For example, if a task is using all allowed memory, its badness score will be
7     1000. If it is using half of its allowed memory, its score will be 500.
8    
9     -There is an additional factor included in the badness score: root
10     -processes are given 3% extra memory over other tasks.
11     +There is an additional factor included in the badness score: the current memory
12     +and swap usage is discounted by 3% for root processes.
13    
14     The amount of "allowed" memory depends on the context in which the oom killer
15     was called. If it is due to the memory assigned to the allocating task's cpuset
16     diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
17     index d55b8ab2d10f..d29dea0f3232 100644
18     --- a/Documentation/i2c/busses/i2c-i801
19     +++ b/Documentation/i2c/busses/i2c-i801
20     @@ -24,6 +24,7 @@ Supported adapters:
21     * Intel Lynx Point-LP (PCH)
22     * Intel Avoton (SOC)
23     * Intel Wellsburg (PCH)
24     + * Intel Coleto Creek (PCH)
25     Datasheets: Publicly available at the Intel website
26    
27     On Intel Patsburg and later chipsets, both the normal host SMBus controller
28     diff --git a/Makefile b/Makefile
29     index db7691ddd67c..18016a55dbd3 100644
30     --- a/Makefile
31     +++ b/Makefile
32     @@ -1,6 +1,6 @@
33     VERSION = 3
34     PATCHLEVEL = 10
35     -SUBLEVEL = 29
36     +SUBLEVEL = 30
37     EXTRAVERSION =
38     NAME = TOSSUG Baby Fish
39    
40     diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
41     index 38b313909ac9..adad46e41a1d 100644
42     --- a/arch/sh/kernel/kgdb.c
43     +++ b/arch/sh/kernel/kgdb.c
44     @@ -13,6 +13,7 @@
45     #include <linux/kdebug.h>
46     #include <linux/irq.h>
47     #include <linux/io.h>
48     +#include <linux/sched.h>
49     #include <asm/cacheflush.h>
50     #include <asm/traps.h>
51    
52     diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
53     index 78f1f2ded86c..ffd4493efc78 100644
54     --- a/arch/tile/include/asm/compat.h
55     +++ b/arch/tile/include/asm/compat.h
56     @@ -281,7 +281,6 @@ long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
57     u32 dummy, u32 low, u32 high);
58     long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
59     u32 dummy, u32 low, u32 high);
60     -long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len);
61     long compat_sys_sync_file_range2(int fd, unsigned int flags,
62     u32 offset_lo, u32 offset_hi,
63     u32 nbytes_lo, u32 nbytes_hi);
64     diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
65     index 96ef8eeb064e..8a95204cbdae 100644
66     --- a/arch/xtensa/platforms/xtfpga/setup.c
67     +++ b/arch/xtensa/platforms/xtfpga/setup.c
68     @@ -195,7 +195,7 @@ void platform_calibrate_ccount(void)
69     * Ethernet -- OpenCores Ethernet MAC (ethoc driver)
70     */
71    
72     -static struct resource ethoc_res[] __initdata = {
73     +static struct resource ethoc_res[] = {
74     [0] = { /* register space */
75     .start = OETH_REGS_PADDR,
76     .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1,
77     @@ -213,7 +213,7 @@ static struct resource ethoc_res[] __initdata = {
78     },
79     };
80    
81     -static struct ethoc_platform_data ethoc_pdata __initdata = {
82     +static struct ethoc_platform_data ethoc_pdata = {
83     /*
84     * The MAC address for these boards is 00:50:c2:13:6f:xx.
85     * The last byte (here as zero) is read from the DIP switches on the
86     @@ -223,7 +223,7 @@ static struct ethoc_platform_data ethoc_pdata __initdata = {
87     .phy_id = -1,
88     };
89    
90     -static struct platform_device ethoc_device __initdata = {
91     +static struct platform_device ethoc_device = {
92     .name = "ethoc",
93     .id = -1,
94     .num_resources = ARRAY_SIZE(ethoc_res),
95     @@ -237,13 +237,13 @@ static struct platform_device ethoc_device __initdata = {
96     * UART
97     */
98    
99     -static struct resource serial_resource __initdata = {
100     +static struct resource serial_resource = {
101     .start = DUART16552_PADDR,
102     .end = DUART16552_PADDR + 0x1f,
103     .flags = IORESOURCE_MEM,
104     };
105    
106     -static struct plat_serial8250_port serial_platform_data[] __initdata = {
107     +static struct plat_serial8250_port serial_platform_data[] = {
108     [0] = {
109     .mapbase = DUART16552_PADDR,
110     .irq = DUART16552_INTNUM,
111     @@ -256,7 +256,7 @@ static struct plat_serial8250_port serial_platform_data[] __initdata = {
112     { },
113     };
114    
115     -static struct platform_device xtavnet_uart __initdata = {
116     +static struct platform_device xtavnet_uart = {
117     .name = "serial8250",
118     .id = PLAT8250_DEV_PLATFORM,
119     .dev = {
120     diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
121     index 292de3cab9cc..ccba6e46cfb3 100644
122     --- a/drivers/acpi/bus.c
123     +++ b/drivers/acpi/bus.c
124     @@ -33,6 +33,7 @@
125     #include <linux/proc_fs.h>
126     #include <linux/acpi.h>
127     #include <linux/slab.h>
128     +#include <linux/regulator/machine.h>
129     #ifdef CONFIG_X86
130     #include <asm/mpspec.h>
131     #endif
132     @@ -705,6 +706,14 @@ void __init acpi_early_init(void)
133     goto error0;
134     }
135    
136     + /*
137     + * If the system is using ACPI then we can be reasonably
138     + * confident that any regulators are managed by the firmware
139     + * so tell the regulator core it has everything it needs to
140     + * know.
141     + */
142     + regulator_has_full_constraints();
143     +
144     return;
145    
146     error0:
147     diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
148     index a22fb3e47256..34d19b1984a1 100644
149     --- a/drivers/cpufreq/intel_pstate.c
150     +++ b/drivers/cpufreq/intel_pstate.c
151     @@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
152     }
153    
154     struct sample {
155     - int core_pct_busy;
156     + int32_t core_pct_busy;
157     u64 aperf;
158     u64 mperf;
159     int freq;
160     @@ -68,7 +68,7 @@ struct _pid {
161     int32_t i_gain;
162     int32_t d_gain;
163     int deadband;
164     - int last_err;
165     + int32_t last_err;
166     };
167    
168     struct cpudata {
169     @@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
170     pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
171     }
172    
173     -static signed int pid_calc(struct _pid *pid, int busy)
174     +static signed int pid_calc(struct _pid *pid, int32_t busy)
175     {
176     - signed int err, result;
177     + signed int result;
178     int32_t pterm, dterm, fp_error;
179     int32_t integral_limit;
180    
181     - err = pid->setpoint - busy;
182     - fp_error = int_tofp(err);
183     + fp_error = int_tofp(pid->setpoint) - busy;
184    
185     - if (abs(err) <= pid->deadband)
186     + if (abs(fp_error) <= int_tofp(pid->deadband))
187     return 0;
188    
189     pterm = mul_fp(pid->p_gain, fp_error);
190     @@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
191     if (pid->integral < -integral_limit)
192     pid->integral = -integral_limit;
193    
194     - dterm = mul_fp(pid->d_gain, (err - pid->last_err));
195     - pid->last_err = err;
196     + dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
197     + pid->last_err = fp_error;
198    
199     result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
200    
201     @@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void)
202     static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
203     {
204     int max_perf = cpu->pstate.turbo_pstate;
205     + int max_perf_adj;
206     int min_perf;
207     if (limits.no_turbo)
208     max_perf = cpu->pstate.max_pstate;
209    
210     - max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
211     - *max = clamp_t(int, max_perf,
212     + max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
213     + *max = clamp_t(int, max_perf_adj,
214     cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
215    
216     min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
217     @@ -394,7 +394,10 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
218     trace_cpu_frequency(pstate * 100000, cpu->cpu);
219    
220     cpu->pstate.current_pstate = pstate;
221     - wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
222     + if (limits.no_turbo)
223     + wrmsrl(MSR_IA32_PERF_CTL, BIT(32) | (pstate << 8));
224     + else
225     + wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
226    
227     }
228    
229     @@ -432,8 +435,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
230     struct sample *sample)
231     {
232     u64 core_pct;
233     - core_pct = div64_u64(sample->aperf * 100, sample->mperf);
234     - sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
235     + core_pct = div64_u64(int_tofp(sample->aperf * 100),
236     + sample->mperf);
237     + sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
238    
239     sample->core_pct_busy = core_pct;
240     }
241     @@ -465,22 +469,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
242     mod_timer_pinned(&cpu->timer, jiffies + delay);
243     }
244    
245     -static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
246     +static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
247     {
248     - int32_t busy_scaled;
249     int32_t core_busy, max_pstate, current_pstate;
250    
251     - core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
252     + core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
253     max_pstate = int_tofp(cpu->pstate.max_pstate);
254     current_pstate = int_tofp(cpu->pstate.current_pstate);
255     - busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
256     -
257     - return fp_toint(busy_scaled);
258     + return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
259     }
260    
261     static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
262     {
263     - int busy_scaled;
264     + int32_t busy_scaled;
265     struct _pid *pid;
266     signed int ctl = 0;
267     int steps;
268     @@ -523,6 +524,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
269     ICPU(0x2a, default_policy),
270     ICPU(0x2d, default_policy),
271     ICPU(0x3a, default_policy),
272     + ICPU(0x3c, default_policy),
273     + ICPU(0x3e, default_policy),
274     + ICPU(0x3f, default_policy),
275     + ICPU(0x45, default_policy),
276     + ICPU(0x46, default_policy),
277     {}
278     };
279     MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
280     diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
281     index 272a3ec35957..0314dde18a5d 100644
282     --- a/drivers/eisa/eisa-bus.c
283     +++ b/drivers/eisa/eisa-bus.c
284     @@ -275,11 +275,13 @@ static int __init eisa_request_resources(struct eisa_root_device *root,
285     }
286    
287     if (slot) {
288     + edev->res[i].name = NULL;
289     edev->res[i].start = SLOT_ADDRESS(root, slot)
290     + (i * 0x400);
291     edev->res[i].end = edev->res[i].start + 0xff;
292     edev->res[i].flags = IORESOURCE_IO;
293     } else {
294     + edev->res[i].name = NULL;
295     edev->res[i].start = SLOT_ADDRESS(root, slot)
296     + EISA_VENDOR_ID_OFFSET;
297     edev->res[i].end = edev->res[i].start + 3;
298     @@ -326,19 +328,20 @@ static int __init eisa_probe(struct eisa_root_device *root)
299     return -ENOMEM;
300     }
301    
302     - if (eisa_init_device(root, edev, 0)) {
303     + if (eisa_request_resources(root, edev, 0)) {
304     + dev_warn(root->dev,
305     + "EISA: Cannot allocate resource for mainboard\n");
306     kfree(edev);
307     if (!root->force_probe)
308     - return -ENODEV;
309     + return -EBUSY;
310     goto force_probe;
311     }
312    
313     - if (eisa_request_resources(root, edev, 0)) {
314     - dev_warn(root->dev,
315     - "EISA: Cannot allocate resource for mainboard\n");
316     + if (eisa_init_device(root, edev, 0)) {
317     + eisa_release_resources(edev);
318     kfree(edev);
319     if (!root->force_probe)
320     - return -EBUSY;
321     + return -ENODEV;
322     goto force_probe;
323     }
324    
325     @@ -361,11 +364,6 @@ static int __init eisa_probe(struct eisa_root_device *root)
326     continue;
327     }
328    
329     - if (eisa_init_device(root, edev, i)) {
330     - kfree(edev);
331     - continue;
332     - }
333     -
334     if (eisa_request_resources(root, edev, i)) {
335     dev_warn(root->dev,
336     "Cannot allocate resource for EISA slot %d\n",
337     @@ -374,6 +372,12 @@ static int __init eisa_probe(struct eisa_root_device *root)
338     continue;
339     }
340    
341     + if (eisa_init_device(root, edev, i)) {
342     + eisa_release_resources(edev);
343     + kfree(edev);
344     + continue;
345     + }
346     +
347     if (edev->state == (EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED))
348     enabled_str = " (forced enabled)";
349     else if (edev->state == EISA_CONFIG_FORCED)
350     diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
351     index 60685b21cc36..379a47ea99f6 100644
352     --- a/drivers/gpu/drm/cirrus/cirrus_mode.c
353     +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
354     @@ -273,8 +273,8 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
355     sr07 |= 0x11;
356     break;
357     case 16:
358     - sr07 |= 0xc1;
359     - hdr = 0xc0;
360     + sr07 |= 0x17;
361     + hdr = 0xc1;
362     break;
363     case 24:
364     sr07 |= 0x15;
365     diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
366     index d9d7d675246a..ccfc63665e76 100644
367     --- a/drivers/gpu/drm/i915/i915_dma.c
368     +++ b/drivers/gpu/drm/i915/i915_dma.c
369     @@ -1687,6 +1687,7 @@ out_gem_unload:
370    
371     intel_teardown_gmbus(dev);
372     intel_teardown_mchbar(dev);
373     + pm_qos_remove_request(&dev_priv->pm_qos);
374     destroy_workqueue(dev_priv->wq);
375     out_mtrrfree:
376     if (dev_priv->mm.gtt_mtrr >= 0) {
377     diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
378     index 130d1db27e28..fa2d15b16739 100644
379     --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
380     +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
381     @@ -222,7 +222,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
382     }
383    
384     sg = st->sgl;
385     - sg->offset = offset;
386     + sg->offset = 0;
387     sg->length = size;
388    
389     sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
390     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
391     index b1a0cdba59e7..2d90f96c19d0 100644
392     --- a/drivers/gpu/drm/i915/i915_reg.h
393     +++ b/drivers/gpu/drm/i915/i915_reg.h
394     @@ -1682,9 +1682,13 @@
395     * Please check the detailed lore in the commit message for for experimental
396     * evidence.
397     */
398     -#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
399     -#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
400     -#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
401     +#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
402     +#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
403     +#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
404     +/* VLV DP/HDMI bits again match Bspec */
405     +#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
406     +#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
407     +#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
408     #define PORTD_HOTPLUG_INT_STATUS (3 << 21)
409     #define PORTC_HOTPLUG_INT_STATUS (3 << 19)
410     #define PORTB_HOTPLUG_INT_STATUS (3 << 17)
411     diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
412     index 80feaec88d2b..cfd327c292ee 100644
413     --- a/drivers/gpu/drm/i915/intel_dp.c
414     +++ b/drivers/gpu/drm/i915/intel_dp.c
415     @@ -2277,18 +2277,34 @@ g4x_dp_detect(struct intel_dp *intel_dp)
416     return status;
417     }
418    
419     - switch (intel_dig_port->port) {
420     - case PORT_B:
421     - bit = PORTB_HOTPLUG_LIVE_STATUS;
422     - break;
423     - case PORT_C:
424     - bit = PORTC_HOTPLUG_LIVE_STATUS;
425     - break;
426     - case PORT_D:
427     - bit = PORTD_HOTPLUG_LIVE_STATUS;
428     - break;
429     - default:
430     - return connector_status_unknown;
431     + if (IS_VALLEYVIEW(dev)) {
432     + switch (intel_dig_port->port) {
433     + case PORT_B:
434     + bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
435     + break;
436     + case PORT_C:
437     + bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
438     + break;
439     + case PORT_D:
440     + bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
441     + break;
442     + default:
443     + return connector_status_unknown;
444     + }
445     + } else {
446     + switch (intel_dig_port->port) {
447     + case PORT_B:
448     + bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
449     + break;
450     + case PORT_C:
451     + bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
452     + break;
453     + case PORT_D:
454     + bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
455     + break;
456     + default:
457     + return connector_status_unknown;
458     + }
459     }
460    
461     if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
462     diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
463     index 48fe23e8d180..629527d205de 100644
464     --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
465     +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
466     @@ -1459,8 +1459,8 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
467     return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
468     }
469    
470     -static int __intel_ring_begin(struct intel_ring_buffer *ring,
471     - int bytes)
472     +static int __intel_ring_prepare(struct intel_ring_buffer *ring,
473     + int bytes)
474     {
475     int ret;
476    
477     @@ -1476,7 +1476,6 @@ static int __intel_ring_begin(struct intel_ring_buffer *ring,
478     return ret;
479     }
480    
481     - ring->space -= bytes;
482     return 0;
483     }
484    
485     @@ -1491,12 +1490,17 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
486     if (ret)
487     return ret;
488    
489     + ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
490     + if (ret)
491     + return ret;
492     +
493     /* Preallocate the olr before touching the ring */
494     ret = intel_ring_alloc_seqno(ring);
495     if (ret)
496     return ret;
497    
498     - return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
499     + ring->space -= num_dwords * sizeof(uint32_t);
500     + return 0;
501     }
502    
503     void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
504     diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
505     index 99e07b688ea8..f6341e8622ee 100644
506     --- a/drivers/gpu/drm/mgag200/mgag200_mode.c
507     +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
508     @@ -1477,11 +1477,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
509     (mga_vga_calculate_mode_bandwidth(mode, bpp)
510     > (32700 * 1024))) {
511     return MODE_BANDWIDTH;
512     - } else if (mode->type == G200_EH &&
513     + } else if (mdev->type == G200_EH &&
514     (mga_vga_calculate_mode_bandwidth(mode, bpp)
515     > (37500 * 1024))) {
516     return MODE_BANDWIDTH;
517     - } else if (mode->type == G200_ER &&
518     + } else if (mdev->type == G200_ER &&
519     (mga_vga_calculate_mode_bandwidth(mode,
520     bpp) > (55000 * 1024))) {
521     return MODE_BANDWIDTH;
522     diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
523     index 7ff10711a4d0..5a5f021d5863 100644
524     --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
525     +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
526     @@ -788,25 +788,25 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
527     struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
528     {
529     struct nouveau_mem *node = old_mem->mm_node;
530     - struct nouveau_bo *nvbo = nouveau_bo(bo);
531     u64 length = (new_mem->num_pages << PAGE_SHIFT);
532     u64 src_offset = node->vma[0].offset;
533     u64 dst_offset = node->vma[1].offset;
534     + int src_tiled = !!node->memtype;
535     + int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
536     int ret;
537    
538     while (length) {
539     u32 amount, stride, height;
540    
541     + ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
542     + if (ret)
543     + return ret;
544     +
545     amount = min(length, (u64)(4 * 1024 * 1024));
546     stride = 16 * 4;
547     height = amount / stride;
548    
549     - if (old_mem->mem_type == TTM_PL_VRAM &&
550     - nouveau_bo_tile_layout(nvbo)) {
551     - ret = RING_SPACE(chan, 8);
552     - if (ret)
553     - return ret;
554     -
555     + if (src_tiled) {
556     BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
557     OUT_RING (chan, 0);
558     OUT_RING (chan, 0);
559     @@ -816,19 +816,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
560     OUT_RING (chan, 0);
561     OUT_RING (chan, 0);
562     } else {
563     - ret = RING_SPACE(chan, 2);
564     - if (ret)
565     - return ret;
566     -
567     BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
568     OUT_RING (chan, 1);
569     }
570     - if (new_mem->mem_type == TTM_PL_VRAM &&
571     - nouveau_bo_tile_layout(nvbo)) {
572     - ret = RING_SPACE(chan, 8);
573     - if (ret)
574     - return ret;
575     -
576     + if (dst_tiled) {
577     BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
578     OUT_RING (chan, 0);
579     OUT_RING (chan, 0);
580     @@ -838,18 +829,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
581     OUT_RING (chan, 0);
582     OUT_RING (chan, 0);
583     } else {
584     - ret = RING_SPACE(chan, 2);
585     - if (ret)
586     - return ret;
587     -
588     BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
589     OUT_RING (chan, 1);
590     }
591    
592     - ret = RING_SPACE(chan, 14);
593     - if (ret)
594     - return ret;
595     -
596     BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
597     OUT_RING (chan, upper_32_bits(src_offset));
598     OUT_RING (chan, upper_32_bits(dst_offset));
599     diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
600     index 5285a90e9f59..50684dc6ffdf 100644
601     --- a/drivers/gpu/drm/radeon/atombios_crtc.c
602     +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
603     @@ -938,11 +938,14 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
604     radeon_atombios_get_ppll_ss_info(rdev,
605     &radeon_crtc->ss,
606     ATOM_DP_SS_ID1);
607     - } else
608     + } else {
609     radeon_crtc->ss_enabled =
610     radeon_atombios_get_ppll_ss_info(rdev,
611     &radeon_crtc->ss,
612     ATOM_DP_SS_ID1);
613     + }
614     + /* disable spread spectrum on DCE3 DP */
615     + radeon_crtc->ss_enabled = false;
616     }
617     break;
618     case ATOM_ENCODER_MODE_LVDS:
619     diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
620     index 8b6b0ba6ae40..93e26339051d 100644
621     --- a/drivers/gpu/drm/radeon/evergreen.c
622     +++ b/drivers/gpu/drm/radeon/evergreen.c
623     @@ -3792,8 +3792,8 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
624     WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
625     }
626    
627     - /* only one DAC on DCE6 */
628     - if (!ASIC_IS_DCE6(rdev))
629     + /* only one DAC on DCE5 */
630     + if (!ASIC_IS_DCE5(rdev))
631     WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
632     WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
633    
634     diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
635     index eb8ac315f92f..c7cac07f139b 100644
636     --- a/drivers/gpu/drm/radeon/evergreen_cs.c
637     +++ b/drivers/gpu/drm/radeon/evergreen_cs.c
638     @@ -967,7 +967,10 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
639     if (track->cb_dirty) {
640     tmp = track->cb_target_mask;
641     for (i = 0; i < 8; i++) {
642     - if ((tmp >> (i * 4)) & 0xF) {
643     + u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
644     +
645     + if (format != V_028C70_COLOR_INVALID &&
646     + (tmp >> (i * 4)) & 0xF) {
647     /* at least one component is enabled */
648     if (track->cb_color_bo[i] == NULL) {
649     dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
650     diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
651     index 4438d1b09325..451d7886644c 100644
652     --- a/drivers/gpu/drm/radeon/ni.c
653     +++ b/drivers/gpu/drm/radeon/ni.c
654     @@ -1178,13 +1178,12 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
655     {
656     struct radeon_ring *ring = &rdev->ring[fence->ring];
657     u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
658     + u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
659     + PACKET3_SH_ACTION_ENA;
660    
661     /* flush read cache over gart for this vmid */
662     - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
663     - radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
664     - radeon_ring_write(ring, 0);
665     radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
666     - radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
667     + radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
668     radeon_ring_write(ring, 0xFFFFFFFF);
669     radeon_ring_write(ring, 0);
670     radeon_ring_write(ring, 10); /* poll interval */
671     @@ -1200,6 +1199,8 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
672     void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
673     {
674     struct radeon_ring *ring = &rdev->ring[ib->ring];
675     + u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
676     + PACKET3_SH_ACTION_ENA;
677    
678     /* set to DX10/11 mode */
679     radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
680     @@ -1224,14 +1225,11 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
681     (ib->vm ? (ib->vm->id << 24) : 0));
682    
683     /* flush read cache over gart for this vmid */
684     - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
685     - radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
686     - radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
687     radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
688     - radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
689     + radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
690     radeon_ring_write(ring, 0xFFFFFFFF);
691     radeon_ring_write(ring, 0);
692     - radeon_ring_write(ring, 10); /* poll interval */
693     + radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
694     }
695    
696     void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
697     diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
698     index e226faf16fea..e49f7b456038 100644
699     --- a/drivers/gpu/drm/radeon/nid.h
700     +++ b/drivers/gpu/drm/radeon/nid.h
701     @@ -573,6 +573,7 @@
702     # define PACKET3_DB_ACTION_ENA (1 << 26)
703     # define PACKET3_SH_ACTION_ENA (1 << 27)
704     # define PACKET3_SX_ACTION_ENA (1 << 28)
705     +# define PACKET3_ENGINE_ME (1 << 31)
706     #define PACKET3_ME_INITIALIZE 0x44
707     #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
708     #define PACKET3_COND_WRITE 0x45
709     diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
710     index f19620b472f5..1b8c3707bf42 100644
711     --- a/drivers/gpu/drm/radeon/r600.c
712     +++ b/drivers/gpu/drm/radeon/r600.c
713     @@ -2957,14 +2957,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
714     struct radeon_fence *fence)
715     {
716     struct radeon_ring *ring = &rdev->ring[fence->ring];
717     + u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
718     + PACKET3_SH_ACTION_ENA;
719     +
720     + if (rdev->family >= CHIP_RV770)
721     + cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
722    
723     if (rdev->wb.use_event) {
724     u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
725     /* flush read cache over gart */
726     radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
727     - radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
728     - PACKET3_VC_ACTION_ENA |
729     - PACKET3_SH_ACTION_ENA);
730     + radeon_ring_write(ring, cp_coher_cntl);
731     radeon_ring_write(ring, 0xFFFFFFFF);
732     radeon_ring_write(ring, 0);
733     radeon_ring_write(ring, 10); /* poll interval */
734     @@ -2978,9 +2981,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
735     } else {
736     /* flush read cache over gart */
737     radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
738     - radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
739     - PACKET3_VC_ACTION_ENA |
740     - PACKET3_SH_ACTION_ENA);
741     + radeon_ring_write(ring, cp_coher_cntl);
742     radeon_ring_write(ring, 0xFFFFFFFF);
743     radeon_ring_write(ring, 0);
744     radeon_ring_write(ring, 10); /* poll interval */
745     diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
746     index 01a3ec83f284..745e66eacd47 100644
747     --- a/drivers/gpu/drm/radeon/r600_cs.c
748     +++ b/drivers/gpu/drm/radeon/r600_cs.c
749     @@ -749,7 +749,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
750     }
751    
752     for (i = 0; i < 8; i++) {
753     - if ((tmp >> (i * 4)) & 0xF) {
754     + u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
755     +
756     + if (format != V_0280A0_COLOR_INVALID &&
757     + (tmp >> (i * 4)) & 0xF) {
758     /* at least one component is enabled */
759     if (track->cb_color_bo[i] == NULL) {
760     dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
761     diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
762     index 2fd2241b6df9..eb28716e87fa 100644
763     --- a/drivers/gpu/drm/radeon/r600d.h
764     +++ b/drivers/gpu/drm/radeon/r600d.h
765     @@ -1283,6 +1283,7 @@
766     # define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
767     #define PACKET3_SURFACE_SYNC 0x43
768     # define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
769     +# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
770     # define PACKET3_TC_ACTION_ENA (1 << 23)
771     # define PACKET3_VC_ACTION_ENA (1 << 24)
772     # define PACKET3_CB_ACTION_ENA (1 << 25)
773     diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
774     index 239a4074f122..efb06e34aed7 100644
775     --- a/drivers/gpu/drm/radeon/radeon_atombios.c
776     +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
777     @@ -2926,6 +2926,10 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
778     /* tell the bios not to handle mode switching */
779     bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
780    
781     + /* clear the vbios dpms state */
782     + if (ASIC_IS_DCE4(rdev))
783     + bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
784     +
785     if (rdev->family >= CHIP_R600) {
786     WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
787     WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
788     diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
789     index fc60b74ee304..e24ca6ab96de 100644
790     --- a/drivers/gpu/drm/radeon/radeon_i2c.c
791     +++ b/drivers/gpu/drm/radeon/radeon_i2c.c
792     @@ -1020,6 +1020,9 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
793     /* Add the default buses */
794     void radeon_i2c_init(struct radeon_device *rdev)
795     {
796     + if (radeon_hw_i2c)
797     + DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
798     +
799     if (rdev->is_atom_bios)
800     radeon_atombios_i2c_init(rdev);
801     else
802     diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
803     index 788c64cb4b47..469ba710b52f 100644
804     --- a/drivers/gpu/drm/radeon/radeon_pm.c
805     +++ b/drivers/gpu/drm/radeon/radeon_pm.c
806     @@ -561,8 +561,10 @@ void radeon_pm_resume(struct radeon_device *rdev)
807     rdev->pm.current_clock_mode_index = 0;
808     rdev->pm.current_sclk = rdev->pm.default_sclk;
809     rdev->pm.current_mclk = rdev->pm.default_mclk;
810     - rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
811     - rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
812     + if (rdev->pm.power_state) {
813     + rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
814     + rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
815     + }
816     if (rdev->pm.pm_method == PM_METHOD_DYNPM
817     && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
818     rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
819     diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
820     index bf14a6b89b59..3cf38ff09e70 100644
821     --- a/drivers/gpu/drm/radeon/si.c
822     +++ b/drivers/gpu/drm/radeon/si.c
823     @@ -4519,7 +4519,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
824     }
825    
826     if (!ASIC_IS_NODCE(rdev)) {
827     - WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
828     + WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
829    
830     tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
831     WREG32(DC_HPD1_INT_CONTROL, tmp);
832     diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
833     index 108c8d2503c9..9652ed9ec639 100644
834     --- a/drivers/gpu/drm/radeon/sid.h
835     +++ b/drivers/gpu/drm/radeon/sid.h
836     @@ -394,7 +394,7 @@
837     # define GRPH_PFLIP_INT_MASK (1 << 0)
838     # define GRPH_PFLIP_INT_TYPE (1 << 8)
839    
840     -#define DACA_AUTODETECT_INT_CONTROL 0x66c8
841     +#define DAC_AUTODETECT_INT_CONTROL 0x67c8
842    
843     #define DC_HPD1_INT_STATUS 0x601c
844     #define DC_HPD2_INT_STATUS 0x6028
845     diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
846     index 4faf02b3657d..49423e913459 100644
847     --- a/drivers/i2c/busses/Kconfig
848     +++ b/drivers/i2c/busses/Kconfig
849     @@ -108,6 +108,7 @@ config I2C_I801
850     Lynx Point-LP (PCH)
851     Avoton (SOC)
852     Wellsburg (PCH)
853     + Coleto Creek (PCH)
854    
855     This driver can also be built as a module. If so, the module
856     will be called i2c-i801.
857     diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
858     index 3a6903f63913..4ebceed6bc66 100644
859     --- a/drivers/i2c/busses/i2c-i801.c
860     +++ b/drivers/i2c/busses/i2c-i801.c
861     @@ -58,6 +58,7 @@
862     Wellsburg (PCH) MS 0x8d7d 32 hard yes yes yes
863     Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes
864     Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes
865     + Coleto Creek (PCH) 0x23b0 32 hard yes yes yes
866    
867     Features supported by this driver:
868     Software PEC no
869     @@ -169,6 +170,7 @@
870     #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
871     #define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c
872     #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
873     +#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0
874     #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
875     #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
876     #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22
877     @@ -817,6 +819,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
878     { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0) },
879     { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) },
880     { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
881     + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) },
882     { 0, }
883     };
884    
885     diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
886     index d6c7fe7f88d5..3ad651c3356c 100644
887     --- a/drivers/infiniband/hw/qib/qib_ud.c
888     +++ b/drivers/infiniband/hw/qib/qib_ud.c
889     @@ -57,13 +57,20 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
890     struct qib_sge *sge;
891     struct ib_wc wc;
892     u32 length;
893     + enum ib_qp_type sqptype, dqptype;
894    
895     qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
896     if (!qp) {
897     ibp->n_pkt_drops++;
898     return;
899     }
900     - if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
901     +
902     + sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
903     + IB_QPT_UD : sqp->ibqp.qp_type;
904     + dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
905     + IB_QPT_UD : qp->ibqp.qp_type;
906     +
907     + if (dqptype != sqptype ||
908     !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
909     ibp->n_pkt_drops++;
910     goto drop;
911     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
912     index 25943a683d15..6771e3c94801 100644
913     --- a/drivers/iommu/intel-iommu.c
914     +++ b/drivers/iommu/intel-iommu.c
915     @@ -917,7 +917,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
916    
917     /* If range covers entire pagetable, free it */
918     if (!(start_pfn > level_pfn ||
919     - last_pfn < level_pfn + level_size(level))) {
920     + last_pfn < level_pfn + level_size(level) - 1)) {
921     dma_clear_pte(pte);
922     domain_flush_cache(domain, pte, sizeof(*pte));
923     free_pgtable_page(level_pte);
924     diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
925     index 3bfc8f1da9fe..29cff90096ad 100644
926     --- a/drivers/md/Kconfig
927     +++ b/drivers/md/Kconfig
928     @@ -176,8 +176,12 @@ config MD_FAULTY
929    
930     source "drivers/md/bcache/Kconfig"
931    
932     +config BLK_DEV_DM_BUILTIN
933     + boolean
934     +
935     config BLK_DEV_DM
936     tristate "Device mapper support"
937     + select BLK_DEV_DM_BUILTIN
938     ---help---
939     Device-mapper is a low level volume manager. It works by allowing
940     people to specify mappings for ranges of logical sectors. Various
941     diff --git a/drivers/md/Makefile b/drivers/md/Makefile
942     index 1439fd4ad9b1..3591a7292381 100644
943     --- a/drivers/md/Makefile
944     +++ b/drivers/md/Makefile
945     @@ -32,6 +32,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o
946     obj-$(CONFIG_BCACHE) += bcache/
947     obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
948     obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
949     +obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o
950     obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
951     obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
952     obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
953     diff --git a/drivers/md/dm-builtin.c b/drivers/md/dm-builtin.c
954     new file mode 100644
955     index 000000000000..6c9049c51b2b
956     --- /dev/null
957     +++ b/drivers/md/dm-builtin.c
958     @@ -0,0 +1,48 @@
959     +#include "dm.h"
960     +
961     +/*
962     + * The kobject release method must not be placed in the module itself,
963     + * otherwise we are subject to module unload races.
964     + *
965     + * The release method is called when the last reference to the kobject is
966     + * dropped. It may be called by any other kernel code that drops the last
967     + * reference.
968     + *
969     + * The release method suffers from module unload race. We may prevent the
970     + * module from being unloaded at the start of the release method (using
971     + * increased module reference count or synchronizing against the release
972     + * method), however there is no way to prevent the module from being
973     + * unloaded at the end of the release method.
974     + *
975     + * If this code were placed in the dm module, the following race may
976     + * happen:
977     + * 1. Some other process takes a reference to dm kobject
978     + * 2. The user issues ioctl function to unload the dm device
979     + * 3. dm_sysfs_exit calls kobject_put, however the object is not released
980     + * because of the other reference taken at step 1
981     + * 4. dm_sysfs_exit waits on the completion
982     + * 5. The other process that took the reference in step 1 drops it,
983     + * dm_kobject_release is called from this process
984     + * 6. dm_kobject_release calls complete()
985     + * 7. a reschedule happens before dm_kobject_release returns
986     + * 8. dm_sysfs_exit continues, the dm device is unloaded, module reference
987     + * count is decremented
988     + * 9. The user unloads the dm module
989     + * 10. The other process that was rescheduled in step 7 continues to run,
990     + * it is now executing code in unloaded module, so it crashes
991     + *
992     + * Note that if the process that takes the foreign reference to dm kobject
993     + * has a low priority and the system is sufficiently loaded with
994     + * higher-priority processes that prevent the low-priority process from
995     + * being scheduled long enough, this bug may really happen.
996     + *
997     + * In order to fix this module unload race, we place the release method
998     + * into a helper code that is compiled directly into the kernel.
999     + */
1000     +
1001     +void dm_kobject_release(struct kobject *kobj)
1002     +{
1003     + complete(dm_get_completion_from_kobject(kobj));
1004     +}
1005     +
1006     +EXPORT_SYMBOL(dm_kobject_release);
1007     diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
1008     index 84d2b91e4efb..c62c5ab6aed5 100644
1009     --- a/drivers/md/dm-sysfs.c
1010     +++ b/drivers/md/dm-sysfs.c
1011     @@ -86,6 +86,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
1012     static struct kobj_type dm_ktype = {
1013     .sysfs_ops = &dm_sysfs_ops,
1014     .default_attrs = dm_attrs,
1015     + .release = dm_kobject_release,
1016     };
1017    
1018     /*
1019     @@ -104,5 +105,7 @@ int dm_sysfs_init(struct mapped_device *md)
1020     */
1021     void dm_sysfs_exit(struct mapped_device *md)
1022     {
1023     - kobject_put(dm_kobject(md));
1024     + struct kobject *kobj = dm_kobject(md);
1025     + kobject_put(kobj);
1026     + wait_for_completion(dm_get_completion_from_kobject(kobj));
1027     }
1028     diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
1029     index 60bce435f4fa..33ac3be2e836 100644
1030     --- a/drivers/md/dm-thin-metadata.c
1031     +++ b/drivers/md/dm-thin-metadata.c
1032     @@ -1349,6 +1349,12 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
1033     return td->id;
1034     }
1035    
1036     +/*
1037     + * Check whether @time (of block creation) is older than @td's last snapshot.
1038     + * If so then the associated block is shared with the last snapshot device.
1039     + * Any block on a device created *after* the device last got snapshotted is
1040     + * necessarily not shared.
1041     + */
1042     static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
1043     {
1044     return td->snapshotted_time > time;
1045     @@ -1458,6 +1464,20 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
1046     return r;
1047     }
1048    
1049     +int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1050     +{
1051     + int r;
1052     + uint32_t ref_count;
1053     +
1054     + down_read(&pmd->root_lock);
1055     + r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1056     + if (!r)
1057     + *result = (ref_count != 0);
1058     + up_read(&pmd->root_lock);
1059     +
1060     + return r;
1061     +}
1062     +
1063     bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
1064     {
1065     int r;
1066     diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
1067     index 845ebbe589a9..125c09444019 100644
1068     --- a/drivers/md/dm-thin-metadata.h
1069     +++ b/drivers/md/dm-thin-metadata.h
1070     @@ -181,6 +181,8 @@ int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result);
1071    
1072     int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
1073    
1074     +int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
1075     +
1076     /*
1077     * Returns -ENOSPC if the new size is too small and already allocated
1078     * blocks would be lost.
1079     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1080     index e36f81e282eb..73c76c565a4d 100644
1081     --- a/drivers/md/dm-thin.c
1082     +++ b/drivers/md/dm-thin.c
1083     @@ -512,6 +512,7 @@ struct dm_thin_new_mapping {
1084     unsigned quiesced:1;
1085     unsigned prepared:1;
1086     unsigned pass_discard:1;
1087     + unsigned definitely_not_shared:1;
1088    
1089     struct thin_c *tc;
1090     dm_block_t virt_block;
1091     @@ -683,7 +684,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
1092     cell_defer_no_holder(tc, m->cell2);
1093    
1094     if (m->pass_discard)
1095     - remap_and_issue(tc, m->bio, m->data_block);
1096     + if (m->definitely_not_shared)
1097     + remap_and_issue(tc, m->bio, m->data_block);
1098     + else {
1099     + bool used = false;
1100     + if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
1101     + bio_endio(m->bio, 0);
1102     + else
1103     + remap_and_issue(tc, m->bio, m->data_block);
1104     + }
1105     else
1106     bio_endio(m->bio, 0);
1107    
1108     @@ -751,13 +760,17 @@ static int ensure_next_mapping(struct pool *pool)
1109    
1110     static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
1111     {
1112     - struct dm_thin_new_mapping *r = pool->next_mapping;
1113     + struct dm_thin_new_mapping *m = pool->next_mapping;
1114    
1115     BUG_ON(!pool->next_mapping);
1116    
1117     + memset(m, 0, sizeof(struct dm_thin_new_mapping));
1118     + INIT_LIST_HEAD(&m->list);
1119     + m->bio = NULL;
1120     +
1121     pool->next_mapping = NULL;
1122    
1123     - return r;
1124     + return m;
1125     }
1126    
1127     static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1128     @@ -769,15 +782,10 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1129     struct pool *pool = tc->pool;
1130     struct dm_thin_new_mapping *m = get_next_mapping(pool);
1131    
1132     - INIT_LIST_HEAD(&m->list);
1133     - m->quiesced = 0;
1134     - m->prepared = 0;
1135     m->tc = tc;
1136     m->virt_block = virt_block;
1137     m->data_block = data_dest;
1138     m->cell = cell;
1139     - m->err = 0;
1140     - m->bio = NULL;
1141    
1142     if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1143     m->quiesced = 1;
1144     @@ -840,15 +848,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1145     struct pool *pool = tc->pool;
1146     struct dm_thin_new_mapping *m = get_next_mapping(pool);
1147    
1148     - INIT_LIST_HEAD(&m->list);
1149     m->quiesced = 1;
1150     m->prepared = 0;
1151     m->tc = tc;
1152     m->virt_block = virt_block;
1153     m->data_block = data_block;
1154     m->cell = cell;
1155     - m->err = 0;
1156     - m->bio = NULL;
1157    
1158     /*
1159     * If the whole block of data is being overwritten or we are not
1160     @@ -1032,12 +1037,12 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1161     */
1162     m = get_next_mapping(pool);
1163     m->tc = tc;
1164     - m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
1165     + m->pass_discard = pool->pf.discard_passdown;
1166     + m->definitely_not_shared = !lookup_result.shared;
1167     m->virt_block = block;
1168     m->data_block = lookup_result.block;
1169     m->cell = cell;
1170     m->cell2 = cell2;
1171     - m->err = 0;
1172     m->bio = bio;
1173    
1174     if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1175     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1176     index 1c13071a81be..204a59fd872f 100644
1177     --- a/drivers/md/dm.c
1178     +++ b/drivers/md/dm.c
1179     @@ -184,8 +184,8 @@ struct mapped_device {
1180     /* forced geometry settings */
1181     struct hd_geometry geometry;
1182    
1183     - /* sysfs handle */
1184     - struct kobject kobj;
1185     + /* kobject and completion */
1186     + struct dm_kobject_holder kobj_holder;
1187    
1188     /* zero-length flush that will be cloned and submitted to targets */
1189     struct bio flush_bio;
1190     @@ -1904,6 +1904,7 @@ static struct mapped_device *alloc_dev(int minor)
1191     init_waitqueue_head(&md->wait);
1192     INIT_WORK(&md->work, dm_wq_work);
1193     init_waitqueue_head(&md->eventq);
1194     + init_completion(&md->kobj_holder.completion);
1195    
1196     md->disk->major = _major;
1197     md->disk->first_minor = minor;
1198     @@ -2735,20 +2736,14 @@ struct gendisk *dm_disk(struct mapped_device *md)
1199    
1200     struct kobject *dm_kobject(struct mapped_device *md)
1201     {
1202     - return &md->kobj;
1203     + return &md->kobj_holder.kobj;
1204     }
1205    
1206     -/*
1207     - * struct mapped_device should not be exported outside of dm.c
1208     - * so use this check to verify that kobj is part of md structure
1209     - */
1210     struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1211     {
1212     struct mapped_device *md;
1213    
1214     - md = container_of(kobj, struct mapped_device, kobj);
1215     - if (&md->kobj != kobj)
1216     - return NULL;
1217     + md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
1218    
1219     if (test_bit(DMF_FREEING, &md->flags) ||
1220     dm_deleting_md(md))
1221     diff --git a/drivers/md/dm.h b/drivers/md/dm.h
1222     index 45b97da1bd06..9b3222f44835 100644
1223     --- a/drivers/md/dm.h
1224     +++ b/drivers/md/dm.h
1225     @@ -15,6 +15,8 @@
1226     #include <linux/list.h>
1227     #include <linux/blkdev.h>
1228     #include <linux/hdreg.h>
1229     +#include <linux/completion.h>
1230     +#include <linux/kobject.h>
1231    
1232     /*
1233     * Suspend feature flags
1234     @@ -125,12 +127,27 @@ void dm_interface_exit(void);
1235     /*
1236     * sysfs interface
1237     */
1238     +struct dm_kobject_holder {
1239     + struct kobject kobj;
1240     + struct completion completion;
1241     +};
1242     +
1243     +static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
1244     +{
1245     + return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
1246     +}
1247     +
1248     int dm_sysfs_init(struct mapped_device *md);
1249     void dm_sysfs_exit(struct mapped_device *md);
1250     struct kobject *dm_kobject(struct mapped_device *md);
1251     struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
1252    
1253     /*
1254     + * The kobject helper
1255     + */
1256     +void dm_kobject_release(struct kobject *kobj);
1257     +
1258     +/*
1259     * Targets for linear and striped mappings
1260     */
1261     int dm_linear_init(void);
1262     diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
1263     index 3e7a88d99eb0..0d240373ffab 100644
1264     --- a/drivers/md/persistent-data/dm-space-map-common.c
1265     +++ b/drivers/md/persistent-data/dm-space-map-common.c
1266     @@ -245,6 +245,10 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
1267     return -EINVAL;
1268     }
1269    
1270     + /*
1271     + * We need to set this before the dm_tm_new_block() call below.
1272     + */
1273     + ll->nr_blocks = nr_blocks;
1274     for (i = old_blocks; i < blocks; i++) {
1275     struct dm_block *b;
1276     struct disk_index_entry idx;
1277     @@ -252,6 +256,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
1278     r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
1279     if (r < 0)
1280     return r;
1281     +
1282     idx.blocknr = cpu_to_le64(dm_block_location(b));
1283    
1284     r = dm_tm_unlock(ll->tm, b);
1285     @@ -266,7 +271,6 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
1286     return r;
1287     }
1288    
1289     - ll->nr_blocks = nr_blocks;
1290     return 0;
1291     }
1292    
1293     diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
1294     index 58fc1eef7499..afb419e514bf 100644
1295     --- a/drivers/md/persistent-data/dm-space-map-metadata.c
1296     +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
1297     @@ -608,20 +608,38 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
1298     * Flick into a mode where all blocks get allocated in the new area.
1299     */
1300     smm->begin = old_len;
1301     - memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
1302     + memcpy(sm, &bootstrap_ops, sizeof(*sm));
1303    
1304     /*
1305     * Extend.
1306     */
1307     r = sm_ll_extend(&smm->ll, extra_blocks);
1308     + if (r)
1309     + goto out;
1310    
1311     /*
1312     - * Switch back to normal behaviour.
1313     + * We repeatedly increment then commit until the commit doesn't
1314     + * allocate any new blocks.
1315     */
1316     - memcpy(&smm->sm, &ops, sizeof(smm->sm));
1317     - for (i = old_len; !r && i < smm->begin; i++)
1318     - r = sm_ll_inc(&smm->ll, i, &ev);
1319     + do {
1320     + for (i = old_len; !r && i < smm->begin; i++) {
1321     + r = sm_ll_inc(&smm->ll, i, &ev);
1322     + if (r)
1323     + goto out;
1324     + }
1325     + old_len = smm->begin;
1326     +
1327     + r = sm_ll_commit(&smm->ll);
1328     + if (r)
1329     + goto out;
1330     +
1331     + } while (old_len != smm->begin);
1332    
1333     +out:
1334     + /*
1335     + * Switch back to normal behaviour.
1336     + */
1337     + memcpy(sm, &ops, sizeof(*sm));
1338     return r;
1339     }
1340    
1341     diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
1342     index a54182dd0e91..362de38a7d8e 100644
1343     --- a/drivers/media/dvb-frontends/dib8000.c
1344     +++ b/drivers/media/dvb-frontends/dib8000.c
1345     @@ -157,15 +157,10 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
1346     return ret;
1347     }
1348    
1349     -static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
1350     +static u16 __dib8000_read_word(struct dib8000_state *state, u16 reg)
1351     {
1352     u16 ret;
1353    
1354     - if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
1355     - dprintk("could not acquire lock");
1356     - return 0;
1357     - }
1358     -
1359     state->i2c_write_buffer[0] = reg >> 8;
1360     state->i2c_write_buffer[1] = reg & 0xff;
1361    
1362     @@ -183,6 +178,21 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
1363     dprintk("i2c read error on %d", reg);
1364    
1365     ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
1366     +
1367     + return ret;
1368     +}
1369     +
1370     +static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
1371     +{
1372     + u16 ret;
1373     +
1374     + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
1375     + dprintk("could not acquire lock");
1376     + return 0;
1377     + }
1378     +
1379     + ret = __dib8000_read_word(state, reg);
1380     +
1381     mutex_unlock(&state->i2c_buffer_lock);
1382    
1383     return ret;
1384     @@ -192,8 +202,15 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
1385     {
1386     u16 rw[2];
1387    
1388     - rw[0] = dib8000_read_word(state, reg + 0);
1389     - rw[1] = dib8000_read_word(state, reg + 1);
1390     + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
1391     + dprintk("could not acquire lock");
1392     + return 0;
1393     + }
1394     +
1395     + rw[0] = __dib8000_read_word(state, reg + 0);
1396     + rw[1] = __dib8000_read_word(state, reg + 1);
1397     +
1398     + mutex_unlock(&state->i2c_buffer_lock);
1399    
1400     return ((rw[0] << 16) | (rw[1]));
1401     }
1402     @@ -2445,7 +2462,8 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
1403     if (state->revision == 0x8090)
1404     internal = dib8000_read32(state, 23) / 1000;
1405    
1406     - if (state->autosearch_state == AS_SEARCHING_FFT) {
1407     + if ((state->revision >= 0x8002) &&
1408     + (state->autosearch_state == AS_SEARCHING_FFT)) {
1409     dib8000_write_word(state, 37, 0x0065); /* P_ctrl_pha_off_max default values */
1410     dib8000_write_word(state, 116, 0x0000); /* P_ana_gain to 0 */
1411    
1412     @@ -2481,7 +2499,8 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
1413     dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (1 << 13)); /* P_restart_ccg = 1 */
1414     dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (0 << 13)); /* P_restart_ccg = 0 */
1415     dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x7ff) | (0 << 15) | (1 << 13)); /* P_restart_search = 0; */
1416     - } else if (state->autosearch_state == AS_SEARCHING_GUARD) {
1417     + } else if ((state->revision >= 0x8002) &&
1418     + (state->autosearch_state == AS_SEARCHING_GUARD)) {
1419     c->transmission_mode = TRANSMISSION_MODE_8K;
1420     c->guard_interval = GUARD_INTERVAL_1_8;
1421     c->inversion = 0;
1422     @@ -2583,7 +2602,8 @@ static int dib8000_autosearch_irq(struct dvb_frontend *fe)
1423     struct dib8000_state *state = fe->demodulator_priv;
1424     u16 irq_pending = dib8000_read_word(state, 1284);
1425    
1426     - if (state->autosearch_state == AS_SEARCHING_FFT) {
1427     + if ((state->revision >= 0x8002) &&
1428     + (state->autosearch_state == AS_SEARCHING_FFT)) {
1429     if (irq_pending & 0x1) {
1430     dprintk("dib8000_autosearch_irq: max correlation result available");
1431     return 3;
1432     diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
1433     index 4da5272075cb..02699c111019 100644
1434     --- a/drivers/media/dvb-frontends/m88rs2000.c
1435     +++ b/drivers/media/dvb-frontends/m88rs2000.c
1436     @@ -110,28 +110,94 @@ static u8 m88rs2000_readreg(struct m88rs2000_state *state, u8 reg)
1437     return b1[0];
1438     }
1439    
1440     +static u32 m88rs2000_get_mclk(struct dvb_frontend *fe)
1441     +{
1442     + struct m88rs2000_state *state = fe->demodulator_priv;
1443     + u32 mclk;
1444     + u8 reg;
1445     + /* Must not be 0x00 or 0xff */
1446     + reg = m88rs2000_readreg(state, 0x86);
1447     + if (!reg || reg == 0xff)
1448     + return 0;
1449     +
1450     + reg /= 2;
1451     + reg += 1;
1452     +
1453     + mclk = (u32)(reg * RS2000_FE_CRYSTAL_KHZ + 28 / 2) / 28;
1454     +
1455     + return mclk;
1456     +}
1457     +
1458     +static int m88rs2000_set_carrieroffset(struct dvb_frontend *fe, s16 offset)
1459     +{
1460     + struct m88rs2000_state *state = fe->demodulator_priv;
1461     + u32 mclk;
1462     + s32 tmp;
1463     + u8 reg;
1464     + int ret;
1465     +
1466     + mclk = m88rs2000_get_mclk(fe);
1467     + if (!mclk)
1468     + return -EINVAL;
1469     +
1470     + tmp = (offset * 4096 + (s32)mclk / 2) / (s32)mclk;
1471     + if (tmp < 0)
1472     + tmp += 4096;
1473     +
1474     + /* Carrier Offset */
1475     + ret = m88rs2000_writereg(state, 0x9c, (u8)(tmp >> 4));
1476     +
1477     + reg = m88rs2000_readreg(state, 0x9d);
1478     + reg &= 0xf;
1479     + reg |= (u8)(tmp & 0xf) << 4;
1480     +
1481     + ret |= m88rs2000_writereg(state, 0x9d, reg);
1482     +
1483     + return ret;
1484     +}
1485     +
1486     static int m88rs2000_set_symbolrate(struct dvb_frontend *fe, u32 srate)
1487     {
1488     struct m88rs2000_state *state = fe->demodulator_priv;
1489     int ret;
1490     - u32 temp;
1491     + u64 temp;
1492     + u32 mclk;
1493     u8 b[3];
1494    
1495     if ((srate < 1000000) || (srate > 45000000))
1496     return -EINVAL;
1497    
1498     + mclk = m88rs2000_get_mclk(fe);
1499     + if (!mclk)
1500     + return -EINVAL;
1501     +
1502     temp = srate / 1000;
1503     - temp *= 11831;
1504     - temp /= 68;
1505     - temp -= 3;
1506     + temp *= 1 << 24;
1507     +
1508     + do_div(temp, mclk);
1509    
1510     b[0] = (u8) (temp >> 16) & 0xff;
1511     b[1] = (u8) (temp >> 8) & 0xff;
1512     b[2] = (u8) temp & 0xff;
1513     +
1514     ret = m88rs2000_writereg(state, 0x93, b[2]);
1515     ret |= m88rs2000_writereg(state, 0x94, b[1]);
1516     ret |= m88rs2000_writereg(state, 0x95, b[0]);
1517    
1518     + if (srate > 10000000)
1519     + ret |= m88rs2000_writereg(state, 0xa0, 0x20);
1520     + else
1521     + ret |= m88rs2000_writereg(state, 0xa0, 0x60);
1522     +
1523     + ret |= m88rs2000_writereg(state, 0xa1, 0xe0);
1524     +
1525     + if (srate > 12000000)
1526     + ret |= m88rs2000_writereg(state, 0xa3, 0x20);
1527     + else if (srate > 2800000)
1528     + ret |= m88rs2000_writereg(state, 0xa3, 0x98);
1529     + else
1530     + ret |= m88rs2000_writereg(state, 0xa3, 0x90);
1531     +
1532     deb_info("m88rs2000: m88rs2000_set_symbolrate\n");
1533     return ret;
1534     }
1535     @@ -261,8 +327,6 @@ struct inittab m88rs2000_shutdown[] = {
1536    
1537     struct inittab fe_reset[] = {
1538     {DEMOD_WRITE, 0x00, 0x01},
1539     - {DEMOD_WRITE, 0xf1, 0xbf},
1540     - {DEMOD_WRITE, 0x00, 0x01},
1541     {DEMOD_WRITE, 0x20, 0x81},
1542     {DEMOD_WRITE, 0x21, 0x80},
1543     {DEMOD_WRITE, 0x10, 0x33},
1544     @@ -305,9 +369,6 @@ struct inittab fe_trigger[] = {
1545     {DEMOD_WRITE, 0x9b, 0x64},
1546     {DEMOD_WRITE, 0x9e, 0x00},
1547     {DEMOD_WRITE, 0x9f, 0xf8},
1548     - {DEMOD_WRITE, 0xa0, 0x20},
1549     - {DEMOD_WRITE, 0xa1, 0xe0},
1550     - {DEMOD_WRITE, 0xa3, 0x38},
1551     {DEMOD_WRITE, 0x98, 0xff},
1552     {DEMOD_WRITE, 0xc0, 0x0f},
1553     {DEMOD_WRITE, 0x89, 0x01},
1554     @@ -540,9 +601,8 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
1555     struct dtv_frontend_properties *c = &fe->dtv_property_cache;
1556     fe_status_t status;
1557     int i, ret = 0;
1558     - s32 tmp;
1559     u32 tuner_freq;
1560     - u16 offset = 0;
1561     + s16 offset = 0;
1562     u8 reg;
1563    
1564     state->no_lock_count = 0;
1565     @@ -567,29 +627,26 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
1566     if (ret < 0)
1567     return -ENODEV;
1568    
1569     - offset = tuner_freq - c->frequency;
1570     -
1571     - /* calculate offset assuming 96000kHz*/
1572     - tmp = offset;
1573     - tmp *= 65536;
1574     -
1575     - tmp = (2 * tmp + 96000) / (2 * 96000);
1576     - if (tmp < 0)
1577     - tmp += 65536;
1578     + offset = (s16)((s32)tuner_freq - c->frequency);
1579    
1580     - offset = tmp & 0xffff;
1581     + /* default mclk value 96.4285 * 2 * 1000 = 192857 */
1582     + if (((c->frequency % 192857) >= (192857 - 3000)) ||
1583     + (c->frequency % 192857) <= 3000)
1584     + ret = m88rs2000_writereg(state, 0x86, 0xc2);
1585     + else
1586     + ret = m88rs2000_writereg(state, 0x86, 0xc6);
1587    
1588     - ret = m88rs2000_writereg(state, 0x9a, 0x30);
1589     - /* Unknown usually 0xc6 sometimes 0xc1 */
1590     - reg = m88rs2000_readreg(state, 0x86);
1591     - ret |= m88rs2000_writereg(state, 0x86, reg);
1592     - /* Offset lower nibble always 0 */
1593     - ret |= m88rs2000_writereg(state, 0x9c, (offset >> 8));
1594     - ret |= m88rs2000_writereg(state, 0x9d, offset & 0xf0);
1595     + ret |= m88rs2000_set_carrieroffset(fe, offset);
1596     + if (ret < 0)
1597     + return -ENODEV;
1598    
1599     + /* Reset demod by symbol rate */
1600     + if (c->symbol_rate > 27500000)
1601     + ret = m88rs2000_writereg(state, 0xf1, 0xa4);
1602     + else
1603     + ret = m88rs2000_writereg(state, 0xf1, 0xbf);
1604    
1605     - /* Reset Demod */
1606     - ret = m88rs2000_tab_set(state, fe_reset);
1607     + ret |= m88rs2000_tab_set(state, fe_reset);
1608     if (ret < 0)
1609     return -ENODEV;
1610    
1611     diff --git a/drivers/media/dvb-frontends/m88rs2000.h b/drivers/media/dvb-frontends/m88rs2000.h
1612     index 14ce31e76ae6..0a50ea90736b 100644
1613     --- a/drivers/media/dvb-frontends/m88rs2000.h
1614     +++ b/drivers/media/dvb-frontends/m88rs2000.h
1615     @@ -53,6 +53,8 @@ static inline struct dvb_frontend *m88rs2000_attach(
1616     }
1617     #endif /* CONFIG_DVB_M88RS2000 */
1618    
1619     +#define RS2000_FE_CRYSTAL_KHZ 27000
1620     +
1621     enum {
1622     DEMOD_WRITE = 0x1,
1623     WRITE_DELAY = 0x10,
1624     diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
1625     index fbca9856313a..4bf057544607 100644
1626     --- a/drivers/media/dvb-frontends/nxt200x.c
1627     +++ b/drivers/media/dvb-frontends/nxt200x.c
1628     @@ -40,7 +40,7 @@
1629     #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1630    
1631     /* Max transfer size done by I2C transfer functions */
1632     -#define MAX_XFER_SIZE 64
1633     +#define MAX_XFER_SIZE 256
1634    
1635     #define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw"
1636     #define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw"
1637     diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
1638     index d12faa691af8..961d7ff75427 100644
1639     --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
1640     +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
1641     @@ -177,21 +177,6 @@ unlock:
1642     mutex_unlock(&dev->mfc_mutex);
1643     }
1644    
1645     -static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
1646     -{
1647     - struct video_device *vdev = video_devdata(file);
1648     -
1649     - if (!vdev) {
1650     - mfc_err("failed to get video_device");
1651     - return MFCNODE_INVALID;
1652     - }
1653     - if (vdev->index == 0)
1654     - return MFCNODE_DECODER;
1655     - else if (vdev->index == 1)
1656     - return MFCNODE_ENCODER;
1657     - return MFCNODE_INVALID;
1658     -}
1659     -
1660     static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
1661     {
1662     mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
1663     @@ -701,6 +686,7 @@ irq_cleanup_hw:
1664     /* Open an MFC node */
1665     static int s5p_mfc_open(struct file *file)
1666     {
1667     + struct video_device *vdev = video_devdata(file);
1668     struct s5p_mfc_dev *dev = video_drvdata(file);
1669     struct s5p_mfc_ctx *ctx = NULL;
1670     struct vb2_queue *q;
1671     @@ -738,7 +724,7 @@ static int s5p_mfc_open(struct file *file)
1672     /* Mark context as idle */
1673     clear_work_bit_irqsave(ctx);
1674     dev->ctx[ctx->num] = ctx;
1675     - if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
1676     + if (vdev == dev->vfd_dec) {
1677     ctx->type = MFCINST_DECODER;
1678     ctx->c_ops = get_dec_codec_ops();
1679     s5p_mfc_dec_init(ctx);
1680     @@ -748,7 +734,7 @@ static int s5p_mfc_open(struct file *file)
1681     mfc_err("Failed to setup mfc controls\n");
1682     goto err_ctrls_setup;
1683     }
1684     - } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
1685     + } else if (vdev == dev->vfd_enc) {
1686     ctx->type = MFCINST_ENCODER;
1687     ctx->c_ops = get_enc_codec_ops();
1688     /* only for encoder */
1689     @@ -793,10 +779,10 @@ static int s5p_mfc_open(struct file *file)
1690     q = &ctx->vq_dst;
1691     q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1692     q->drv_priv = &ctx->fh;
1693     - if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
1694     + if (vdev == dev->vfd_dec) {
1695     q->io_modes = VB2_MMAP;
1696     q->ops = get_dec_queue_ops();
1697     - } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
1698     + } else if (vdev == dev->vfd_enc) {
1699     q->io_modes = VB2_MMAP | VB2_USERPTR;
1700     q->ops = get_enc_queue_ops();
1701     } else {
1702     @@ -815,10 +801,10 @@ static int s5p_mfc_open(struct file *file)
1703     q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1704     q->io_modes = VB2_MMAP;
1705     q->drv_priv = &ctx->fh;
1706     - if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
1707     + if (vdev == dev->vfd_dec) {
1708     q->io_modes = VB2_MMAP;
1709     q->ops = get_dec_queue_ops();
1710     - } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
1711     + } else if (vdev == dev->vfd_enc) {
1712     q->io_modes = VB2_MMAP | VB2_USERPTR;
1713     q->ops = get_enc_queue_ops();
1714     } else {
1715     diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
1716     index ef4074cd5316..f804c1faa7ff 100644
1717     --- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
1718     +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
1719     @@ -113,15 +113,6 @@ enum s5p_mfc_fmt_type {
1720     };
1721    
1722     /**
1723     - * enum s5p_mfc_node_type - The type of an MFC device node.
1724     - */
1725     -enum s5p_mfc_node_type {
1726     - MFCNODE_INVALID = -1,
1727     - MFCNODE_DECODER = 0,
1728     - MFCNODE_ENCODER = 1,
1729     -};
1730     -
1731     -/**
1732     * enum s5p_mfc_inst_type - The type of an MFC instance.
1733     */
1734     enum s5p_mfc_inst_type {
1735     diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
1736     index 90cfa35ef6e6..eeab79bdd2aa 100644
1737     --- a/drivers/media/usb/dvb-usb-v2/anysee.c
1738     +++ b/drivers/media/usb/dvb-usb-v2/anysee.c
1739     @@ -442,6 +442,7 @@ static struct cxd2820r_config anysee_cxd2820r_config = {
1740     * IOD[0] ZL10353 1=enabled
1741     * IOE[0] tuner 0=enabled
1742     * tuner is behind ZL10353 I2C-gate
1743     + * tuner is behind TDA10023 I2C-gate
1744     *
1745     * E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)"
1746     * PCB: 508TC (rev0.6)
1747     @@ -956,7 +957,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
1748    
1749     if (fe && adap->fe[1]) {
1750     /* attach tuner for 2nd FE */
1751     - fe = dvb_attach(dvb_pll_attach, adap->fe[0],
1752     + fe = dvb_attach(dvb_pll_attach, adap->fe[1],
1753     (0xc0 >> 1), &d->i2c_adap,
1754     DVB_PLL_SAMSUNG_DTOS403IH102A);
1755     }
1756     diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
1757     index 9f12f91d6296..4be5be34194f 100644
1758     --- a/drivers/mfd/lpc_ich.c
1759     +++ b/drivers/mfd/lpc_ich.c
1760     @@ -51,6 +51,8 @@
1761     * document number TBD : Lynx Point
1762     * document number TBD : Lynx Point-LP
1763     * document number TBD : Wellsburg
1764     + * document number TBD : Avoton SoC
1765     + * document number TBD : Coleto Creek
1766     */
1767    
1768     #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1769     @@ -207,6 +209,8 @@ enum lpc_chipsets {
1770     LPC_LPT, /* Lynx Point */
1771     LPC_LPT_LP, /* Lynx Point-LP */
1772     LPC_WBG, /* Wellsburg */
1773     + LPC_AVN, /* Avoton SoC */
1774     + LPC_COLETO, /* Coleto Creek */
1775     };
1776    
1777     struct lpc_ich_info lpc_chipset_info[] = {
1778     @@ -491,6 +495,14 @@ struct lpc_ich_info lpc_chipset_info[] = {
1779     .name = "Wellsburg",
1780     .iTCO_version = 2,
1781     },
1782     + [LPC_AVN] = {
1783     + .name = "Avoton SoC",
1784     + .iTCO_version = 1,
1785     + },
1786     + [LPC_COLETO] = {
1787     + .name = "Coleto Creek",
1788     + .iTCO_version = 2,
1789     + },
1790     };
1791    
1792     /*
1793     @@ -704,6 +716,11 @@ static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
1794     { PCI_VDEVICE(INTEL, 0x8d5d), LPC_WBG},
1795     { PCI_VDEVICE(INTEL, 0x8d5e), LPC_WBG},
1796     { PCI_VDEVICE(INTEL, 0x8d5f), LPC_WBG},
1797     + { PCI_VDEVICE(INTEL, 0x1f38), LPC_AVN},
1798     + { PCI_VDEVICE(INTEL, 0x1f39), LPC_AVN},
1799     + { PCI_VDEVICE(INTEL, 0x1f3a), LPC_AVN},
1800     + { PCI_VDEVICE(INTEL, 0x1f3b), LPC_AVN},
1801     + { PCI_VDEVICE(INTEL, 0x2390), LPC_COLETO},
1802     { 0, }, /* End of list */
1803     };
1804     MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
1805     diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
1806     index 76a3d3a752d8..9aca9462a12f 100644
1807     --- a/drivers/mmc/card/block.c
1808     +++ b/drivers/mmc/card/block.c
1809     @@ -1931,6 +1931,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1810     struct mmc_card *card = md->queue.card;
1811     struct mmc_host *host = card->host;
1812     unsigned long flags;
1813     + unsigned int cmd_flags = req ? req->cmd_flags : 0;
1814    
1815     if (req && !mq->mqrq_prev->req)
1816     /* claim host only for the first request */
1817     @@ -1946,7 +1947,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1818     }
1819    
1820     mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
1821     - if (req && req->cmd_flags & REQ_DISCARD) {
1822     + if (cmd_flags & REQ_DISCARD) {
1823     /* complete ongoing async transfer before issuing discard */
1824     if (card->host->areq)
1825     mmc_blk_issue_rw_rq(mq, NULL);
1826     @@ -1955,7 +1956,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1827     ret = mmc_blk_issue_secdiscard_rq(mq, req);
1828     else
1829     ret = mmc_blk_issue_discard_rq(mq, req);
1830     - } else if (req && req->cmd_flags & REQ_FLUSH) {
1831     + } else if (cmd_flags & REQ_FLUSH) {
1832     /* complete ongoing async transfer before issuing flush */
1833     if (card->host->areq)
1834     mmc_blk_issue_rw_rq(mq, NULL);
1835     @@ -1971,7 +1972,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1836    
1837     out:
1838     if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
1839     - (req && (req->cmd_flags & MMC_REQ_SPECIAL_MASK)))
1840     + (cmd_flags & MMC_REQ_SPECIAL_MASK))
1841     /*
1842     * Release host when there are no more requests
1843     * and after special request(discard, flush) is done.
1844     diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
1845     index b5c95043f7ef..84b054b08462 100644
1846     --- a/drivers/mmc/host/atmel-mci.c
1847     +++ b/drivers/mmc/host/atmel-mci.c
1848     @@ -1188,11 +1188,22 @@ static void atmci_start_request(struct atmel_mci *host,
1849     iflags |= ATMCI_CMDRDY;
1850     cmd = mrq->cmd;
1851     cmdflags = atmci_prepare_command(slot->mmc, cmd);
1852     - atmci_send_command(host, cmd, cmdflags);
1853     +
1854     + /*
1855     + * DMA transfer should be started before sending the command to avoid
1856     + * unexpected errors especially for read operations in SDIO mode.
1857     + * Unfortunately, in PDC mode, command has to be sent before starting
1858     + * the transfer.
1859     + */
1860     + if (host->submit_data != &atmci_submit_data_dma)
1861     + atmci_send_command(host, cmd, cmdflags);
1862    
1863     if (data)
1864     host->submit_data(host, data);
1865    
1866     + if (host->submit_data == &atmci_submit_data_dma)
1867     + atmci_send_command(host, cmd, cmdflags);
1868     +
1869     if (mrq->stop) {
1870     host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
1871     host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
1872     diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
1873     index 07e5784e5cd3..9896b636c4eb 100644
1874     --- a/drivers/mtd/nand/mxc_nand.c
1875     +++ b/drivers/mtd/nand/mxc_nand.c
1876     @@ -676,7 +676,6 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
1877     ecc_stat >>= 4;
1878     } while (--no_subpages);
1879    
1880     - mtd->ecc_stats.corrected += ret;
1881     pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
1882    
1883     return ret;
1884     diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
1885     index f1cb706445c7..6ae046bdb263 100644
1886     --- a/drivers/rtc/rtc-cmos.c
1887     +++ b/drivers/rtc/rtc-cmos.c
1888     @@ -34,11 +34,11 @@
1889     #include <linux/interrupt.h>
1890     #include <linux/spinlock.h>
1891     #include <linux/platform_device.h>
1892     -#include <linux/mod_devicetable.h>
1893     #include <linux/log2.h>
1894     #include <linux/pm.h>
1895     #include <linux/of.h>
1896     #include <linux/of_platform.h>
1897     +#include <linux/dmi.h>
1898    
1899     /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
1900     #include <asm-generic/rtc.h>
1901     @@ -377,6 +377,51 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
1902     return 0;
1903     }
1904    
1905     +/*
1906     + * Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes.
1907     + */
1908     +static bool alarm_disable_quirk;
1909     +
1910     +static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
1911     +{
1912     + alarm_disable_quirk = true;
1913     + pr_info("rtc-cmos: BIOS has alarm-disable quirk. ");
1914     + pr_info("RTC alarms disabled\n");
1915     + return 0;
1916     +}
1917     +
1918     +static const struct dmi_system_id rtc_quirks[] __initconst = {
1919     + /* https://bugzilla.novell.com/show_bug.cgi?id=805740 */
1920     + {
1921     + .callback = set_alarm_disable_quirk,
1922     + .ident = "IBM Truman",
1923     + .matches = {
1924     + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1925     + DMI_MATCH(DMI_PRODUCT_NAME, "4852570"),
1926     + },
1927     + },
1928     + /* https://bugzilla.novell.com/show_bug.cgi?id=812592 */
1929     + {
1930     + .callback = set_alarm_disable_quirk,
1931     + .ident = "Gigabyte GA-990XA-UD3",
1932     + .matches = {
1933     + DMI_MATCH(DMI_SYS_VENDOR,
1934     + "Gigabyte Technology Co., Ltd."),
1935     + DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"),
1936     + },
1937     + },
1938     + /* http://permalink.gmane.org/gmane.linux.kernel/1604474 */
1939     + {
1940     + .callback = set_alarm_disable_quirk,
1941     + .ident = "Toshiba Satellite L300",
1942     + .matches = {
1943     + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1944     + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
1945     + },
1946     + },
1947     + {}
1948     +};
1949     +
1950     static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
1951     {
1952     struct cmos_rtc *cmos = dev_get_drvdata(dev);
1953     @@ -385,6 +430,9 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
1954     if (!is_valid_irq(cmos->irq))
1955     return -EINVAL;
1956    
1957     + if (alarm_disable_quirk)
1958     + return 0;
1959     +
1960     spin_lock_irqsave(&rtc_lock, flags);
1961    
1962     if (enabled)
1963     @@ -1163,6 +1211,8 @@ static int __init cmos_init(void)
1964     platform_driver_registered = true;
1965     }
1966    
1967     + dmi_check_system(rtc_quirks);
1968     +
1969     if (retval == 0)
1970     return 0;
1971    
1972     diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
1973     index a4ec5f4ec817..cd716f4cd37f 100644
1974     --- a/drivers/spi/spi-bcm63xx.c
1975     +++ b/drivers/spi/spi-bcm63xx.c
1976     @@ -180,8 +180,6 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
1977     transfer_list);
1978     }
1979    
1980     - len -= prepend_len;
1981     -
1982     init_completion(&bs->done);
1983    
1984     /* Fill in the Message control register */
1985     diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
1986     index 32b7bb111eb6..ca99ac9295cf 100644
1987     --- a/drivers/spi/spi.c
1988     +++ b/drivers/spi/spi.c
1989     @@ -584,7 +584,9 @@ static void spi_pump_messages(struct kthread_work *work)
1990     ret = master->transfer_one_message(master, master->cur_msg);
1991     if (ret) {
1992     dev_err(&master->dev,
1993     - "failed to transfer one message from queue\n");
1994     + "failed to transfer one message from queue: %d\n", ret);
1995     + master->cur_msg->status = ret;
1996     + spi_finalize_current_message(master);
1997     return;
1998     }
1999     }
2000     diff --git a/fs/dcookies.c b/fs/dcookies.c
2001     index ab5954b50267..ac44a69fbea9 100644
2002     --- a/fs/dcookies.c
2003     +++ b/fs/dcookies.c
2004     @@ -204,7 +204,7 @@ out:
2005     }
2006    
2007     #ifdef CONFIG_COMPAT
2008     -COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, size_t, len)
2009     +COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, compat_size_t, len)
2010     {
2011     #ifdef __BIG_ENDIAN
2012     return sys_lookup_dcookie(((u64)w0 << 32) | w1, buf, len);
2013     diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
2014     index b74422888604..85cde3e76290 100644
2015     --- a/fs/exofs/ore.c
2016     +++ b/fs/exofs/ore.c
2017     @@ -103,7 +103,7 @@ int ore_verify_layout(unsigned total_comps, struct ore_layout *layout)
2018    
2019     layout->max_io_length =
2020     (BIO_MAX_PAGES_KMALLOC * PAGE_SIZE - layout->stripe_unit) *
2021     - layout->group_width;
2022     + (layout->group_width - layout->parity);
2023     if (layout->parity) {
2024     unsigned stripe_length =
2025     (layout->group_width - layout->parity) *
2026     @@ -286,7 +286,8 @@ int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
2027     if (length) {
2028     ore_calc_stripe_info(layout, offset, length, &ios->si);
2029     ios->length = ios->si.length;
2030     - ios->nr_pages = (ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
2031     + ios->nr_pages = ((ios->offset & (PAGE_SIZE - 1)) +
2032     + ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
2033     if (layout->parity)
2034     _ore_post_alloc_raid_stuff(ios);
2035     }
2036     @@ -536,6 +537,7 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
2037     u64 H = LmodS - G * T;
2038    
2039     u32 N = div_u64(H, U);
2040     + u32 Nlast;
2041    
2042     /* "H - (N * U)" is just "H % U" so it's bound to u32 */
2043     u32 C = (u32)(H - (N * U)) / stripe_unit + G * group_width;
2044     @@ -568,6 +570,10 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
2045     si->length = T - H;
2046     if (si->length > length)
2047     si->length = length;
2048     +
2049     + Nlast = div_u64(H + si->length + U - 1, U);
2050     + si->maxdevUnits = Nlast - N;
2051     +
2052     si->M = M;
2053     }
2054     EXPORT_SYMBOL(ore_calc_stripe_info);
2055     @@ -583,13 +589,16 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
2056     int ret;
2057    
2058     if (per_dev->bio == NULL) {
2059     - unsigned pages_in_stripe = ios->layout->group_width *
2060     - (ios->layout->stripe_unit / PAGE_SIZE);
2061     - unsigned nr_pages = ios->nr_pages * ios->layout->group_width /
2062     - (ios->layout->group_width -
2063     - ios->layout->parity);
2064     - unsigned bio_size = (nr_pages + pages_in_stripe) /
2065     - ios->layout->group_width;
2066     + unsigned bio_size;
2067     +
2068     + if (!ios->reading) {
2069     + bio_size = ios->si.maxdevUnits;
2070     + } else {
2071     + bio_size = (ios->si.maxdevUnits + 1) *
2072     + (ios->layout->group_width - ios->layout->parity) /
2073     + ios->layout->group_width;
2074     + }
2075     + bio_size *= (ios->layout->stripe_unit / PAGE_SIZE);
2076    
2077     per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
2078     if (unlikely(!per_dev->bio)) {
2079     @@ -609,8 +618,12 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
2080     added_len = bio_add_pc_page(q, per_dev->bio, pages[pg],
2081     pglen, pgbase);
2082     if (unlikely(pglen != added_len)) {
2083     - ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=%u\n",
2084     - per_dev->bio->bi_vcnt);
2085     + /* If bi_vcnt == bi_max then this is a SW BUG */
2086     + ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=0x%x "
2087     + "bi_max=0x%x BIO_MAX=0x%x cur_len=0x%x\n",
2088     + per_dev->bio->bi_vcnt,
2089     + per_dev->bio->bi_max_vecs,
2090     + BIO_MAX_PAGES_KMALLOC, cur_len);
2091     ret = -ENOMEM;
2092     goto out;
2093     }
2094     @@ -1098,7 +1111,7 @@ int ore_truncate(struct ore_layout *layout, struct ore_components *oc,
2095     size_attr->attr = g_attr_logical_length;
2096     size_attr->attr.val_ptr = &size_attr->newsize;
2097    
2098     - ORE_DBGMSG("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
2099     + ORE_DBGMSG2("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
2100     _LLU(oc->comps->obj.id), _LLU(obj_size), i);
2101     ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
2102     &size_attr->attr);
2103     diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
2104     index 1d55f9465400..23bf1a52a5da 100644
2105     --- a/fs/fuse/dev.c
2106     +++ b/fs/fuse/dev.c
2107     @@ -1296,22 +1296,6 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
2108     return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
2109     }
2110    
2111     -static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
2112     - struct pipe_buffer *buf)
2113     -{
2114     - return 1;
2115     -}
2116     -
2117     -static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
2118     - .can_merge = 0,
2119     - .map = generic_pipe_buf_map,
2120     - .unmap = generic_pipe_buf_unmap,
2121     - .confirm = generic_pipe_buf_confirm,
2122     - .release = generic_pipe_buf_release,
2123     - .steal = fuse_dev_pipe_buf_steal,
2124     - .get = generic_pipe_buf_get,
2125     -};
2126     -
2127     static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
2128     struct pipe_inode_info *pipe,
2129     size_t len, unsigned int flags)
2130     @@ -1358,7 +1342,11 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
2131     buf->page = bufs[page_nr].page;
2132     buf->offset = bufs[page_nr].offset;
2133     buf->len = bufs[page_nr].len;
2134     - buf->ops = &fuse_dev_pipe_buf_ops;
2135     + /*
2136     + * Need to be careful about this. Having buf->ops in module
2137     + * code can Oops if the buffer persists after module unload.
2138     + */
2139     + buf->ops = &nosteal_pipe_buf_ops;
2140    
2141     pipe->nrbufs++;
2142     page_nr++;
2143     diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
2144     index 4cbad5d6b276..02773aab43c5 100644
2145     --- a/fs/nfs/nfs4client.c
2146     +++ b/fs/nfs/nfs4client.c
2147     @@ -240,13 +240,11 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
2148     error = nfs4_discover_server_trunking(clp, &old);
2149     if (error < 0)
2150     goto error;
2151     - nfs_put_client(clp);
2152     - if (clp != old) {
2153     - clp->cl_preserve_clid = true;
2154     - clp = old;
2155     - }
2156    
2157     - return clp;
2158     + if (clp != old)
2159     + clp->cl_preserve_clid = true;
2160     + nfs_put_client(clp);
2161     + return old;
2162    
2163     error:
2164     nfs_mark_client_ready(clp, error);
2165     @@ -324,9 +322,10 @@ int nfs40_walk_client_list(struct nfs_client *new,
2166     prev = pos;
2167    
2168     status = nfs_wait_client_init_complete(pos);
2169     - spin_lock(&nn->nfs_client_lock);
2170     if (status < 0)
2171     - continue;
2172     + goto out;
2173     + status = -NFS4ERR_STALE_CLIENTID;
2174     + spin_lock(&nn->nfs_client_lock);
2175     }
2176     if (pos->cl_cons_state != NFS_CS_READY)
2177     continue;
2178     @@ -464,7 +463,8 @@ int nfs41_walk_client_list(struct nfs_client *new,
2179     }
2180     spin_lock(&nn->nfs_client_lock);
2181     if (status < 0)
2182     - continue;
2183     + break;
2184     + status = -NFS4ERR_STALE_CLIENTID;
2185     }
2186     if (pos->cl_cons_state != NFS_CS_READY)
2187     continue;
2188     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2189     index 75e49d42a7fb..26e71bdb5b33 100644
2190     --- a/fs/nfs/nfs4proc.c
2191     +++ b/fs/nfs/nfs4proc.c
2192     @@ -6232,9 +6232,9 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
2193     struct nfs_server *server = NFS_SERVER(inode);
2194     struct pnfs_layout_hdr *lo;
2195     struct nfs4_state *state = NULL;
2196     - unsigned long timeo, giveup;
2197     + unsigned long timeo, now, giveup;
2198    
2199     - dprintk("--> %s\n", __func__);
2200     + dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
2201    
2202     if (!nfs41_sequence_done(task, &lgp->res.seq_res))
2203     goto out;
2204     @@ -6242,12 +6242,38 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
2205     switch (task->tk_status) {
2206     case 0:
2207     goto out;
2208     + /*
2209     + * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
2210     + * (or clients) writing to the same RAID stripe
2211     + */
2212     case -NFS4ERR_LAYOUTTRYLATER:
2213     + /*
2214     + * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
2215     + * existing layout before getting a new one).
2216     + */
2217     case -NFS4ERR_RECALLCONFLICT:
2218     timeo = rpc_get_timeout(task->tk_client);
2219     giveup = lgp->args.timestamp + timeo;
2220     - if (time_after(giveup, jiffies))
2221     - task->tk_status = -NFS4ERR_DELAY;
2222     + now = jiffies;
2223     + if (time_after(giveup, now)) {
2224     + unsigned long delay;
2225     +
2226     + /* Delay for:
2227     + * - Not less then NFS4_POLL_RETRY_MIN.
2228     + * - One last time a jiffie before we give up
2229     + * - exponential backoff (time_now minus start_attempt)
2230     + */
2231     + delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
2232     + min((giveup - now - 1),
2233     + now - lgp->args.timestamp));
2234     +
2235     + dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
2236     + __func__, delay);
2237     + rpc_delay(task, delay);
2238     + task->tk_status = 0;
2239     + rpc_restart_call_prepare(task);
2240     + goto out; /* Do not call nfs4_async_handle_error() */
2241     + }
2242     break;
2243     case -NFS4ERR_EXPIRED:
2244     case -NFS4ERR_BAD_STATEID:
2245     @@ -6683,7 +6709,7 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
2246     switch (err) {
2247     case 0:
2248     case -NFS4ERR_WRONGSEC:
2249     - case -NFS4ERR_NOTSUPP:
2250     + case -ENOTSUPP:
2251     goto out;
2252     default:
2253     err = nfs4_handle_exception(server, err, &exception);
2254     @@ -6715,7 +6741,7 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2255     * Fall back on "guess and check" method if
2256     * the server doesn't support SECINFO_NO_NAME
2257     */
2258     - if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
2259     + if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
2260     err = nfs4_find_root_sec(server, fhandle, info);
2261     goto out_freepage;
2262     }
2263     diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
2264     index 4be8d135ed61..988efb4caac0 100644
2265     --- a/fs/nfs/nfs4xdr.c
2266     +++ b/fs/nfs/nfs4xdr.c
2267     @@ -3002,7 +3002,8 @@ out_overflow:
2268     return -EIO;
2269     }
2270    
2271     -static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
2272     +static bool __decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected,
2273     + int *nfs_retval)
2274     {
2275     __be32 *p;
2276     uint32_t opnum;
2277     @@ -3012,19 +3013,32 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
2278     if (unlikely(!p))
2279     goto out_overflow;
2280     opnum = be32_to_cpup(p++);
2281     - if (opnum != expected) {
2282     - dprintk("nfs: Server returned operation"
2283     - " %d but we issued a request for %d\n",
2284     - opnum, expected);
2285     - return -EIO;
2286     - }
2287     + if (unlikely(opnum != expected))
2288     + goto out_bad_operation;
2289     nfserr = be32_to_cpup(p);
2290     - if (nfserr != NFS_OK)
2291     - return nfs4_stat_to_errno(nfserr);
2292     - return 0;
2293     + if (nfserr == NFS_OK)
2294     + *nfs_retval = 0;
2295     + else
2296     + *nfs_retval = nfs4_stat_to_errno(nfserr);
2297     + return true;
2298     +out_bad_operation:
2299     + dprintk("nfs: Server returned operation"
2300     + " %d but we issued a request for %d\n",
2301     + opnum, expected);
2302     + *nfs_retval = -EREMOTEIO;
2303     + return false;
2304     out_overflow:
2305     print_overflow_msg(__func__, xdr);
2306     - return -EIO;
2307     + *nfs_retval = -EIO;
2308     + return false;
2309     +}
2310     +
2311     +static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
2312     +{
2313     + int retval;
2314     +
2315     + __decode_op_hdr(xdr, expected, &retval);
2316     + return retval;
2317     }
2318    
2319     /* Dummy routine */
2320     @@ -4842,11 +4856,12 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
2321     uint32_t savewords, bmlen, i;
2322     int status;
2323    
2324     - status = decode_op_hdr(xdr, OP_OPEN);
2325     - if (status != -EIO)
2326     - nfs_increment_open_seqid(status, res->seqid);
2327     - if (!status)
2328     - status = decode_stateid(xdr, &res->stateid);
2329     + if (!__decode_op_hdr(xdr, OP_OPEN, &status))
2330     + return status;
2331     + nfs_increment_open_seqid(status, res->seqid);
2332     + if (status)
2333     + return status;
2334     + status = decode_stateid(xdr, &res->stateid);
2335     if (unlikely(status))
2336     return status;
2337    
2338     diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
2339     index 77cc85dd0db0..f1680cdbd88b 100644
2340     --- a/fs/notify/fanotify/fanotify_user.c
2341     +++ b/fs/notify/fanotify/fanotify_user.c
2342     @@ -867,9 +867,9 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
2343     {
2344     return sys_fanotify_mark(fanotify_fd, flags,
2345     #ifdef __BIG_ENDIAN
2346     - ((__u64)mask1 << 32) | mask0,
2347     -#else
2348     ((__u64)mask0 << 32) | mask1,
2349     +#else
2350     + ((__u64)mask1 << 32) | mask0,
2351     #endif
2352     dfd, pathname);
2353     }
2354     diff --git a/fs/read_write.c b/fs/read_write.c
2355     index 2cefa417be34..f6b7c600eb7f 100644
2356     --- a/fs/read_write.c
2357     +++ b/fs/read_write.c
2358     @@ -947,9 +947,9 @@ out:
2359     return ret;
2360     }
2361    
2362     -COMPAT_SYSCALL_DEFINE3(readv, unsigned long, fd,
2363     +COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
2364     const struct compat_iovec __user *,vec,
2365     - unsigned long, vlen)
2366     + compat_ulong_t, vlen)
2367     {
2368     struct fd f = fdget(fd);
2369     ssize_t ret;
2370     @@ -983,9 +983,9 @@ COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
2371     return ret;
2372     }
2373    
2374     -COMPAT_SYSCALL_DEFINE5(preadv, unsigned long, fd,
2375     +COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
2376     const struct compat_iovec __user *,vec,
2377     - unsigned long, vlen, u32, pos_low, u32, pos_high)
2378     + compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
2379     {
2380     loff_t pos = ((loff_t)pos_high << 32) | pos_low;
2381     return compat_sys_preadv64(fd, vec, vlen, pos);
2382     @@ -1013,9 +1013,9 @@ out:
2383     return ret;
2384     }
2385    
2386     -COMPAT_SYSCALL_DEFINE3(writev, unsigned long, fd,
2387     +COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
2388     const struct compat_iovec __user *, vec,
2389     - unsigned long, vlen)
2390     + compat_ulong_t, vlen)
2391     {
2392     struct fd f = fdget(fd);
2393     ssize_t ret;
2394     @@ -1049,9 +1049,9 @@ COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
2395     return ret;
2396     }
2397    
2398     -COMPAT_SYSCALL_DEFINE5(pwritev, unsigned long, fd,
2399     +COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
2400     const struct compat_iovec __user *,vec,
2401     - unsigned long, vlen, u32, pos_low, u32, pos_high)
2402     + compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
2403     {
2404     loff_t pos = ((loff_t)pos_high << 32) | pos_low;
2405     return compat_sys_pwritev64(fd, vec, vlen, pos);
2406     diff --git a/fs/splice.c b/fs/splice.c
2407     index d37431dd60a1..4b5a5fac3383 100644
2408     --- a/fs/splice.c
2409     +++ b/fs/splice.c
2410     @@ -555,6 +555,24 @@ static const struct pipe_buf_operations default_pipe_buf_ops = {
2411     .get = generic_pipe_buf_get,
2412     };
2413    
2414     +static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
2415     + struct pipe_buffer *buf)
2416     +{
2417     + return 1;
2418     +}
2419     +
2420     +/* Pipe buffer operations for a socket and similar. */
2421     +const struct pipe_buf_operations nosteal_pipe_buf_ops = {
2422     + .can_merge = 0,
2423     + .map = generic_pipe_buf_map,
2424     + .unmap = generic_pipe_buf_unmap,
2425     + .confirm = generic_pipe_buf_confirm,
2426     + .release = generic_pipe_buf_release,
2427     + .steal = generic_pipe_buf_nosteal,
2428     + .get = generic_pipe_buf_get,
2429     +};
2430     +EXPORT_SYMBOL(nosteal_pipe_buf_ops);
2431     +
2432     static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
2433     unsigned long vlen, loff_t offset)
2434     {
2435     diff --git a/include/linux/audit.h b/include/linux/audit.h
2436     index 729a4d165bcc..4fb28b23a4a4 100644
2437     --- a/include/linux/audit.h
2438     +++ b/include/linux/audit.h
2439     @@ -135,7 +135,7 @@ static inline void audit_syscall_exit(void *pt_regs)
2440     {
2441     if (unlikely(current->audit_context)) {
2442     int success = is_syscall_success(pt_regs);
2443     - int return_code = regs_return_value(pt_regs);
2444     + long return_code = regs_return_value(pt_regs);
2445    
2446     __audit_syscall_exit(success, return_code);
2447     }
2448     diff --git a/include/linux/compat.h b/include/linux/compat.h
2449     index ec1aee4aec9c..df7060083d85 100644
2450     --- a/include/linux/compat.h
2451     +++ b/include/linux/compat.h
2452     @@ -326,16 +326,16 @@ asmlinkage long compat_sys_keyctl(u32 option,
2453     u32 arg2, u32 arg3, u32 arg4, u32 arg5);
2454     asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
2455    
2456     -asmlinkage ssize_t compat_sys_readv(unsigned long fd,
2457     - const struct compat_iovec __user *vec, unsigned long vlen);
2458     -asmlinkage ssize_t compat_sys_writev(unsigned long fd,
2459     - const struct compat_iovec __user *vec, unsigned long vlen);
2460     -asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
2461     +asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd,
2462     + const struct compat_iovec __user *vec, compat_ulong_t vlen);
2463     +asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd,
2464     + const struct compat_iovec __user *vec, compat_ulong_t vlen);
2465     +asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
2466     const struct compat_iovec __user *vec,
2467     - unsigned long vlen, u32 pos_low, u32 pos_high);
2468     -asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
2469     + compat_ulong_t vlen, u32 pos_low, u32 pos_high);
2470     +asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
2471     const struct compat_iovec __user *vec,
2472     - unsigned long vlen, u32 pos_low, u32 pos_high);
2473     + compat_ulong_t vlen, u32 pos_low, u32 pos_high);
2474     asmlinkage long comat_sys_lseek(unsigned int, compat_off_t, unsigned int);
2475    
2476     asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
2477     @@ -421,7 +421,7 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
2478     asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
2479     compat_long_t addr, compat_long_t data);
2480    
2481     -asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
2482     +asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
2483     /*
2484     * epoll (fs/eventpoll.c) compat bits follow ...
2485     */
2486     diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
2487     index b8809fef61f5..ab5752692113 100644
2488     --- a/include/linux/pipe_fs_i.h
2489     +++ b/include/linux/pipe_fs_i.h
2490     @@ -157,6 +157,8 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
2491     int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
2492     void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
2493    
2494     +extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
2495     +
2496     /* for F_SETPIPE_SZ and F_GETPIPE_SZ */
2497     long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
2498     struct pipe_inode_info *get_pipe_info(struct file *file);
2499     diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
2500     index c586679b6fef..9044769f2296 100644
2501     --- a/include/linux/vmstat.h
2502     +++ b/include/linux/vmstat.h
2503     @@ -142,9 +142,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
2504     return x;
2505     }
2506    
2507     -extern unsigned long global_reclaimable_pages(void);
2508     -extern unsigned long zone_reclaimable_pages(struct zone *zone);
2509     -
2510     #ifdef CONFIG_NUMA
2511     /*
2512     * Determine the per node value of a stat item. This function
2513     diff --git a/include/scsi/osd_ore.h b/include/scsi/osd_ore.h
2514     index a5f9b960dfc8..6ca3265a4dca 100644
2515     --- a/include/scsi/osd_ore.h
2516     +++ b/include/scsi/osd_ore.h
2517     @@ -102,6 +102,7 @@ struct ore_striping_info {
2518     unsigned unit_off;
2519     unsigned cur_pg;
2520     unsigned cur_comp;
2521     + unsigned maxdevUnits;
2522     };
2523    
2524     struct ore_io_state;
2525     diff --git a/kernel/audit.c b/kernel/audit.c
2526     index 7ddfd8a00a2a..6def25f1b351 100644
2527     --- a/kernel/audit.c
2528     +++ b/kernel/audit.c
2529     @@ -103,7 +103,8 @@ static int audit_rate_limit;
2530    
2531     /* Number of outstanding audit_buffers allowed. */
2532     static int audit_backlog_limit = 64;
2533     -static int audit_backlog_wait_time = 60 * HZ;
2534     +#define AUDIT_BACKLOG_WAIT_TIME (60 * HZ)
2535     +static int audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
2536     static int audit_backlog_wait_overflow = 0;
2537    
2538     /* The identity of the user shutting down the audit system. */
2539     @@ -1135,6 +1136,8 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
2540     return NULL;
2541     }
2542    
2543     + audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
2544     +
2545     ab = audit_buffer_alloc(ctx, gfp_mask, type);
2546     if (!ab) {
2547     audit_log_lost("out of memory in audit_log_start");
2548     diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
2549     index 1c5b0fcd83b2..76fefb1613b2 100644
2550     --- a/kernel/time/timekeeping.c
2551     +++ b/kernel/time/timekeeping.c
2552     @@ -72,7 +72,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
2553     tk->wall_to_monotonic = wtm;
2554     set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
2555     tk->offs_real = timespec_to_ktime(tmp);
2556     - tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tk->tai_offset, 0));
2557     + tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
2558     }
2559    
2560     static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
2561     @@ -590,7 +590,7 @@ s32 timekeeping_get_tai_offset(void)
2562     static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
2563     {
2564     tk->tai_offset = tai_offset;
2565     - tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tai_offset, 0));
2566     + tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
2567     }
2568    
2569     /**
2570     @@ -605,6 +605,7 @@ void timekeeping_set_tai_offset(s32 tai_offset)
2571     raw_spin_lock_irqsave(&timekeeper_lock, flags);
2572     write_seqcount_begin(&timekeeper_seq);
2573     __timekeeping_set_tai_offset(tk, tai_offset);
2574     + timekeeping_update(tk, false, true);
2575     write_seqcount_end(&timekeeper_seq);
2576     raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2577     clock_was_set();
2578     @@ -1007,6 +1008,8 @@ static int timekeeping_suspend(void)
2579     timekeeping_suspend_time =
2580     timespec_add(timekeeping_suspend_time, delta_delta);
2581     }
2582     +
2583     + timekeeping_update(tk, false, true);
2584     write_seqcount_end(&timekeeper_seq);
2585     raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2586    
2587     @@ -1236,9 +1239,10 @@ out_adjust:
2588     * It also calls into the NTP code to handle leapsecond processing.
2589     *
2590     */
2591     -static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
2592     +static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
2593     {
2594     u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
2595     + unsigned int clock_set = 0;
2596    
2597     while (tk->xtime_nsec >= nsecps) {
2598     int leap;
2599     @@ -1260,9 +1264,10 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
2600    
2601     __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
2602    
2603     - clock_was_set_delayed();
2604     + clock_set = 1;
2605     }
2606     }
2607     + return clock_set;
2608     }
2609    
2610     /**
2611     @@ -1275,7 +1280,8 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
2612     * Returns the unconsumed cycles.
2613     */
2614     static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
2615     - u32 shift)
2616     + u32 shift,
2617     + unsigned int *clock_set)
2618     {
2619     cycle_t interval = tk->cycle_interval << shift;
2620     u64 raw_nsecs;
2621     @@ -1289,7 +1295,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
2622     tk->cycle_last += interval;
2623    
2624     tk->xtime_nsec += tk->xtime_interval << shift;
2625     - accumulate_nsecs_to_secs(tk);
2626     + *clock_set |= accumulate_nsecs_to_secs(tk);
2627    
2628     /* Accumulate raw time */
2629     raw_nsecs = (u64)tk->raw_interval << shift;
2630     @@ -1347,6 +1353,7 @@ static void update_wall_time(void)
2631     struct timekeeper *tk = &shadow_timekeeper;
2632     cycle_t offset;
2633     int shift = 0, maxshift;
2634     + unsigned int clock_set = 0;
2635     unsigned long flags;
2636    
2637     raw_spin_lock_irqsave(&timekeeper_lock, flags);
2638     @@ -1381,7 +1388,8 @@ static void update_wall_time(void)
2639     maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2640     shift = min(shift, maxshift);
2641     while (offset >= tk->cycle_interval) {
2642     - offset = logarithmic_accumulation(tk, offset, shift);
2643     + offset = logarithmic_accumulation(tk, offset, shift,
2644     + &clock_set);
2645     if (offset < tk->cycle_interval<<shift)
2646     shift--;
2647     }
2648     @@ -1399,7 +1407,7 @@ static void update_wall_time(void)
2649     * Finally, make sure that after the rounding
2650     * xtime_nsec isn't larger than NSEC_PER_SEC
2651     */
2652     - accumulate_nsecs_to_secs(tk);
2653     + clock_set |= accumulate_nsecs_to_secs(tk);
2654    
2655     write_seqcount_begin(&timekeeper_seq);
2656     /* Update clock->cycle_last with the new value */
2657     @@ -1419,6 +1427,10 @@ static void update_wall_time(void)
2658     write_seqcount_end(&timekeeper_seq);
2659     out:
2660     raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2661     + if (clock_set)
2662     + /* have to call outside the timekeeper_seq */
2663     + clock_was_set_delayed();
2664     +
2665     }
2666    
2667     /**
2668     @@ -1677,11 +1689,14 @@ int do_adjtimex(struct timex *txc)
2669    
2670     if (tai != orig_tai) {
2671     __timekeeping_set_tai_offset(tk, tai);
2672     - clock_was_set_delayed();
2673     + timekeeping_update(tk, false, true);
2674     }
2675     write_seqcount_end(&timekeeper_seq);
2676     raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2677    
2678     + if (tai != orig_tai)
2679     + clock_was_set();
2680     +
2681     ntp_notify_cmos_timer();
2682    
2683     return ret;
2684     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
2685     index d0c5c3f0d939..4b93b8412252 100644
2686     --- a/kernel/trace/ftrace.c
2687     +++ b/kernel/trace/ftrace.c
2688     @@ -85,6 +85,8 @@ int function_trace_stop __read_mostly;
2689    
2690     /* Current function tracing op */
2691     struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
2692     +/* What to set function_trace_op to */
2693     +static struct ftrace_ops *set_function_trace_op;
2694    
2695     /* List for set_ftrace_pid's pids. */
2696     LIST_HEAD(ftrace_pids);
2697     @@ -278,6 +280,29 @@ static void update_global_ops(void)
2698     global_ops.func = func;
2699     }
2700    
2701     +static void ftrace_sync(struct work_struct *work)
2702     +{
2703     + /*
2704     + * This function is just a stub to implement a hard force
2705     + * of synchronize_sched(). This requires synchronizing
2706     + * tasks even in userspace and idle.
2707     + *
2708     + * Yes, function tracing is rude.
2709     + */
2710     +}
2711     +
2712     +static void ftrace_sync_ipi(void *data)
2713     +{
2714     + /* Probably not needed, but do it anyway */
2715     + smp_rmb();
2716     +}
2717     +
2718     +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2719     +static void update_function_graph_func(void);
2720     +#else
2721     +static inline void update_function_graph_func(void) { }
2722     +#endif
2723     +
2724     static void update_ftrace_function(void)
2725     {
2726     ftrace_func_t func;
2727     @@ -296,16 +321,61 @@ static void update_ftrace_function(void)
2728     !FTRACE_FORCE_LIST_FUNC)) {
2729     /* Set the ftrace_ops that the arch callback uses */
2730     if (ftrace_ops_list == &global_ops)
2731     - function_trace_op = ftrace_global_list;
2732     + set_function_trace_op = ftrace_global_list;
2733     else
2734     - function_trace_op = ftrace_ops_list;
2735     + set_function_trace_op = ftrace_ops_list;
2736     func = ftrace_ops_list->func;
2737     } else {
2738     /* Just use the default ftrace_ops */
2739     - function_trace_op = &ftrace_list_end;
2740     + set_function_trace_op = &ftrace_list_end;
2741     func = ftrace_ops_list_func;
2742     }
2743    
2744     + /* If there's no change, then do nothing more here */
2745     + if (ftrace_trace_function == func)
2746     + return;
2747     +
2748     + update_function_graph_func();
2749     +
2750     + /*
2751     + * If we are using the list function, it doesn't care
2752     + * about the function_trace_ops.
2753     + */
2754     + if (func == ftrace_ops_list_func) {
2755     + ftrace_trace_function = func;
2756     + /*
2757     + * Don't even bother setting function_trace_ops,
2758     + * it would be racy to do so anyway.
2759     + */
2760     + return;
2761     + }
2762     +
2763     +#ifndef CONFIG_DYNAMIC_FTRACE
2764     + /*
2765     + * For static tracing, we need to be a bit more careful.
2766     + * The function change takes affect immediately. Thus,
2767     + * we need to coorditate the setting of the function_trace_ops
2768     + * with the setting of the ftrace_trace_function.
2769     + *
2770     + * Set the function to the list ops, which will call the
2771     + * function we want, albeit indirectly, but it handles the
2772     + * ftrace_ops and doesn't depend on function_trace_op.
2773     + */
2774     + ftrace_trace_function = ftrace_ops_list_func;
2775     + /*
2776     + * Make sure all CPUs see this. Yes this is slow, but static
2777     + * tracing is slow and nasty to have enabled.
2778     + */
2779     + schedule_on_each_cpu(ftrace_sync);
2780     + /* Now all cpus are using the list ops. */
2781     + function_trace_op = set_function_trace_op;
2782     + /* Make sure the function_trace_op is visible on all CPUs */
2783     + smp_wmb();
2784     + /* Nasty way to force a rmb on all cpus */
2785     + smp_call_function(ftrace_sync_ipi, NULL, 1);
2786     + /* OK, we are all set to update the ftrace_trace_function now! */
2787     +#endif /* !CONFIG_DYNAMIC_FTRACE */
2788     +
2789     ftrace_trace_function = func;
2790     }
2791    
2792     @@ -428,16 +498,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
2793     } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
2794     ret = remove_ftrace_list_ops(&ftrace_control_list,
2795     &control_ops, ops);
2796     - if (!ret) {
2797     - /*
2798     - * The ftrace_ops is now removed from the list,
2799     - * so there'll be no new users. We must ensure
2800     - * all current users are done before we free
2801     - * the control data.
2802     - */
2803     - synchronize_sched();
2804     - control_ops_free(ops);
2805     - }
2806     } else
2807     ret = remove_ftrace_ops(&ftrace_ops_list, ops);
2808    
2809     @@ -447,13 +507,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
2810     if (ftrace_enabled)
2811     update_ftrace_function();
2812    
2813     - /*
2814     - * Dynamic ops may be freed, we must make sure that all
2815     - * callers are done before leaving this function.
2816     - */
2817     - if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2818     - synchronize_sched();
2819     -
2820     return 0;
2821     }
2822    
2823     @@ -1952,8 +2005,14 @@ void ftrace_modify_all_code(int command)
2824     else if (command & FTRACE_DISABLE_CALLS)
2825     ftrace_replace_code(0);
2826    
2827     - if (command & FTRACE_UPDATE_TRACE_FUNC)
2828     + if (command & FTRACE_UPDATE_TRACE_FUNC) {
2829     + function_trace_op = set_function_trace_op;
2830     + smp_wmb();
2831     + /* If irqs are disabled, we are in stop machine */
2832     + if (!irqs_disabled())
2833     + smp_call_function(ftrace_sync_ipi, NULL, 1);
2834     ftrace_update_ftrace_func(ftrace_trace_function);
2835     + }
2836    
2837     if (command & FTRACE_START_FUNC_RET)
2838     ftrace_enable_ftrace_graph_caller();
2839     @@ -2116,10 +2175,41 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2840     command |= FTRACE_UPDATE_TRACE_FUNC;
2841     }
2842    
2843     - if (!command || !ftrace_enabled)
2844     + if (!command || !ftrace_enabled) {
2845     + /*
2846     + * If these are control ops, they still need their
2847     + * per_cpu field freed. Since, function tracing is
2848     + * not currently active, we can just free them
2849     + * without synchronizing all CPUs.
2850     + */
2851     + if (ops->flags & FTRACE_OPS_FL_CONTROL)
2852     + control_ops_free(ops);
2853     return 0;
2854     + }
2855    
2856     ftrace_run_update_code(command);
2857     +
2858     + /*
2859     + * Dynamic ops may be freed, we must make sure that all
2860     + * callers are done before leaving this function.
2861     + * The same goes for freeing the per_cpu data of the control
2862     + * ops.
2863     + *
2864     + * Again, normal synchronize_sched() is not good enough.
2865     + * We need to do a hard force of sched synchronization.
2866     + * This is because we use preempt_disable() to do RCU, but
2867     + * the function tracers can be called where RCU is not watching
2868     + * (like before user_exit()). We can not rely on the RCU
2869     + * infrastructure to do the synchronization, thus we must do it
2870     + * ourselves.
2871     + */
2872     + if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2873     + schedule_on_each_cpu(ftrace_sync);
2874     +
2875     + if (ops->flags & FTRACE_OPS_FL_CONTROL)
2876     + control_ops_free(ops);
2877     + }
2878     +
2879     return 0;
2880     }
2881    
2882     @@ -4728,6 +4818,7 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2883     trace_func_graph_ret_t ftrace_graph_return =
2884     (trace_func_graph_ret_t)ftrace_stub;
2885     trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
2886     +static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
2887    
2888     /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2889     static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2890     @@ -4869,6 +4960,30 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
2891     FTRACE_OPS_FL_RECURSION_SAFE,
2892     };
2893    
2894     +static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
2895     +{
2896     + if (!ftrace_ops_test(&global_ops, trace->func, NULL))
2897     + return 0;
2898     + return __ftrace_graph_entry(trace);
2899     +}
2900     +
2901     +/*
2902     + * The function graph tracer should only trace the functions defined
2903     + * by set_ftrace_filter and set_ftrace_notrace. If another function
2904     + * tracer ops is registered, the graph tracer requires testing the
2905     + * function against the global ops, and not just trace any function
2906     + * that any ftrace_ops registered.
2907     + */
2908     +static void update_function_graph_func(void)
2909     +{
2910     + if (ftrace_ops_list == &ftrace_list_end ||
2911     + (ftrace_ops_list == &global_ops &&
2912     + global_ops.next == &ftrace_list_end))
2913     + ftrace_graph_entry = __ftrace_graph_entry;
2914     + else
2915     + ftrace_graph_entry = ftrace_graph_entry_test;
2916     +}
2917     +
2918     int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2919     trace_func_graph_ent_t entryfunc)
2920     {
2921     @@ -4893,7 +5008,16 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2922     }
2923    
2924     ftrace_graph_return = retfunc;
2925     - ftrace_graph_entry = entryfunc;
2926     +
2927     + /*
2928     + * Update the indirect function to the entryfunc, and the
2929     + * function that gets called to the entry_test first. Then
2930     + * call the update fgraph entry function to determine if
2931     + * the entryfunc should be called directly or not.
2932     + */
2933     + __ftrace_graph_entry = entryfunc;
2934     + ftrace_graph_entry = ftrace_graph_entry_test;
2935     + update_function_graph_func();
2936    
2937     ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
2938    
2939     @@ -4912,6 +5036,7 @@ void unregister_ftrace_graph(void)
2940     ftrace_graph_active--;
2941     ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2942     ftrace_graph_entry = ftrace_graph_entry_stub;
2943     + __ftrace_graph_entry = ftrace_graph_entry_stub;
2944     ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
2945     unregister_pm_notifier(&ftrace_suspend_notifier);
2946     unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
2947     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2948     index 5546ae9c84f9..6dbdf277c8fe 100644
2949     --- a/kernel/trace/trace.c
2950     +++ b/kernel/trace/trace.c
2951     @@ -424,6 +424,9 @@ int __trace_puts(unsigned long ip, const char *str, int size)
2952     unsigned long irq_flags;
2953     int alloc;
2954    
2955     + if (unlikely(tracing_selftest_running || tracing_disabled))
2956     + return 0;
2957     +
2958     alloc = sizeof(*entry) + size + 2; /* possible \n added */
2959    
2960     local_save_flags(irq_flags);
2961     @@ -464,6 +467,9 @@ int __trace_bputs(unsigned long ip, const char *str)
2962     unsigned long irq_flags;
2963     int size = sizeof(struct bputs_entry);
2964    
2965     + if (unlikely(tracing_selftest_running || tracing_disabled))
2966     + return 0;
2967     +
2968     local_save_flags(irq_flags);
2969     buffer = global_trace.trace_buffer.buffer;
2970     event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
2971     @@ -5878,6 +5884,8 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
2972    
2973     rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
2974    
2975     + buf->tr = tr;
2976     +
2977     buf->buffer = ring_buffer_alloc(size, rb_flags);
2978     if (!buf->buffer)
2979     return -ENOMEM;
2980     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
2981     index 7e3601ce51c6..3b4120e38d48 100644
2982     --- a/mm/memory-failure.c
2983     +++ b/mm/memory-failure.c
2984     @@ -854,14 +854,14 @@ static int page_action(struct page_state *ps, struct page *p,
2985     * the pages and send SIGBUS to the processes if the data was dirty.
2986     */
2987     static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
2988     - int trapno, int flags)
2989     + int trapno, int flags, struct page **hpagep)
2990     {
2991     enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
2992     struct address_space *mapping;
2993     LIST_HEAD(tokill);
2994     int ret;
2995     int kill = 1, forcekill;
2996     - struct page *hpage = compound_head(p);
2997     + struct page *hpage = *hpagep;
2998     struct page *ppage;
2999    
3000     if (PageReserved(p) || PageSlab(p))
3001     @@ -940,11 +940,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
3002     * We pinned the head page for hwpoison handling,
3003     * now we split the thp and we are interested in
3004     * the hwpoisoned raw page, so move the refcount
3005     - * to it.
3006     + * to it. Similarly, page lock is shifted.
3007     */
3008     if (hpage != p) {
3009     put_page(hpage);
3010     get_page(p);
3011     + lock_page(p);
3012     + unlock_page(hpage);
3013     + *hpagep = p;
3014     }
3015     /* THP is split, so ppage should be the real poisoned page. */
3016     ppage = p;
3017     @@ -962,17 +965,11 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
3018     if (kill)
3019     collect_procs(ppage, &tokill);
3020    
3021     - if (hpage != ppage)
3022     - lock_page(ppage);
3023     -
3024     ret = try_to_unmap(ppage, ttu);
3025     if (ret != SWAP_SUCCESS)
3026     printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
3027     pfn, page_mapcount(ppage));
3028    
3029     - if (hpage != ppage)
3030     - unlock_page(ppage);
3031     -
3032     /*
3033     * Now that the dirty bit has been propagated to the
3034     * struct page and all unmaps done we can decide if
3035     @@ -1189,8 +1186,12 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
3036     /*
3037     * Now take care of user space mappings.
3038     * Abort on fail: __delete_from_page_cache() assumes unmapped page.
3039     + *
3040     + * When the raw error page is thp tail page, hpage points to the raw
3041     + * page after thp split.
3042     */
3043     - if (hwpoison_user_mappings(p, pfn, trapno, flags) != SWAP_SUCCESS) {
3044     + if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
3045     + != SWAP_SUCCESS) {
3046     printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
3047     res = -EBUSY;
3048     goto out;
3049     diff --git a/mm/oom_kill.c b/mm/oom_kill.c
3050     index 79e451a78c9e..dfa94ed3c7fa 100644
3051     --- a/mm/oom_kill.c
3052     +++ b/mm/oom_kill.c
3053     @@ -170,7 +170,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
3054     * implementation used by LSMs.
3055     */
3056     if (has_capability_noaudit(p, CAP_SYS_ADMIN))
3057     - adj -= 30;
3058     + points -= (points * 3) / 100;
3059    
3060     /* Normalize to oom_score_adj units */
3061     adj *= totalpages / 1000;
3062     diff --git a/mm/page-writeback.c b/mm/page-writeback.c
3063     index aca4364275b5..5a06d4cb9a3d 100644
3064     --- a/mm/page-writeback.c
3065     +++ b/mm/page-writeback.c
3066     @@ -188,6 +188,26 @@ static unsigned long writeout_period_time = 0;
3067     * global dirtyable memory first.
3068     */
3069    
3070     +/**
3071     + * zone_dirtyable_memory - number of dirtyable pages in a zone
3072     + * @zone: the zone
3073     + *
3074     + * Returns the zone's number of pages potentially available for dirty
3075     + * page cache. This is the base value for the per-zone dirty limits.
3076     + */
3077     +static unsigned long zone_dirtyable_memory(struct zone *zone)
3078     +{
3079     + unsigned long nr_pages;
3080     +
3081     + nr_pages = zone_page_state(zone, NR_FREE_PAGES);
3082     + nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
3083     +
3084     + nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
3085     + nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
3086     +
3087     + return nr_pages;
3088     +}
3089     +
3090     static unsigned long highmem_dirtyable_memory(unsigned long total)
3091     {
3092     #ifdef CONFIG_HIGHMEM
3093     @@ -195,11 +215,9 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
3094     unsigned long x = 0;
3095    
3096     for_each_node_state(node, N_HIGH_MEMORY) {
3097     - struct zone *z =
3098     - &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
3099     + struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
3100    
3101     - x += zone_page_state(z, NR_FREE_PAGES) +
3102     - zone_reclaimable_pages(z) - z->dirty_balance_reserve;
3103     + x += zone_dirtyable_memory(z);
3104     }
3105     /*
3106     * Unreclaimable memory (kernel memory or anonymous memory
3107     @@ -235,9 +253,12 @@ static unsigned long global_dirtyable_memory(void)
3108     {
3109     unsigned long x;
3110    
3111     - x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
3112     + x = global_page_state(NR_FREE_PAGES);
3113     x -= min(x, dirty_balance_reserve);
3114    
3115     + x += global_page_state(NR_INACTIVE_FILE);
3116     + x += global_page_state(NR_ACTIVE_FILE);
3117     +
3118     if (!vm_highmem_is_dirtyable)
3119     x -= highmem_dirtyable_memory(x);
3120    
3121     @@ -289,32 +310,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
3122     }
3123    
3124     /**
3125     - * zone_dirtyable_memory - number of dirtyable pages in a zone
3126     - * @zone: the zone
3127     - *
3128     - * Returns the zone's number of pages potentially available for dirty
3129     - * page cache. This is the base value for the per-zone dirty limits.
3130     - */
3131     -static unsigned long zone_dirtyable_memory(struct zone *zone)
3132     -{
3133     - /*
3134     - * The effective global number of dirtyable pages may exclude
3135     - * highmem as a big-picture measure to keep the ratio between
3136     - * dirty memory and lowmem reasonable.
3137     - *
3138     - * But this function is purely about the individual zone and a
3139     - * highmem zone can hold its share of dirty pages, so we don't
3140     - * care about vm_highmem_is_dirtyable here.
3141     - */
3142     - unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
3143     - zone_reclaimable_pages(zone);
3144     -
3145     - /* don't allow this to underflow */
3146     - nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
3147     - return nr_pages;
3148     -}
3149     -
3150     -/**
3151     * zone_dirty_limit - maximum number of dirty pages allowed in a zone
3152     * @zone: the zone
3153     *
3154     diff --git a/mm/slub.c b/mm/slub.c
3155     index c34bd44e8be9..deaed7b47213 100644
3156     --- a/mm/slub.c
3157     +++ b/mm/slub.c
3158     @@ -4285,7 +4285,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
3159    
3160     page = ACCESS_ONCE(c->partial);
3161     if (page) {
3162     - x = page->pobjects;
3163     + node = page_to_nid(page);
3164     + if (flags & SO_TOTAL)
3165     + WARN_ON_ONCE(1);
3166     + else if (flags & SO_OBJECTS)
3167     + WARN_ON_ONCE(1);
3168     + else
3169     + x = page->pages;
3170     total += x;
3171     nodes[node] += x;
3172     }
3173     diff --git a/mm/vmscan.c b/mm/vmscan.c
3174     index 7dbdb6afd101..43ddef3cf44f 100644
3175     --- a/mm/vmscan.c
3176     +++ b/mm/vmscan.c
3177     @@ -2117,6 +2117,20 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
3178     return aborted_reclaim;
3179     }
3180    
3181     +static unsigned long zone_reclaimable_pages(struct zone *zone)
3182     +{
3183     + int nr;
3184     +
3185     + nr = zone_page_state(zone, NR_ACTIVE_FILE) +
3186     + zone_page_state(zone, NR_INACTIVE_FILE);
3187     +
3188     + if (get_nr_swap_pages() > 0)
3189     + nr += zone_page_state(zone, NR_ACTIVE_ANON) +
3190     + zone_page_state(zone, NR_INACTIVE_ANON);
3191     +
3192     + return nr;
3193     +}
3194     +
3195     static bool zone_reclaimable(struct zone *zone)
3196     {
3197     return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
3198     @@ -3075,41 +3089,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3199     wake_up_interruptible(&pgdat->kswapd_wait);
3200     }
3201    
3202     -/*
3203     - * The reclaimable count would be mostly accurate.
3204     - * The less reclaimable pages may be
3205     - * - mlocked pages, which will be moved to unevictable list when encountered
3206     - * - mapped pages, which may require several travels to be reclaimed
3207     - * - dirty pages, which is not "instantly" reclaimable
3208     - */
3209     -unsigned long global_reclaimable_pages(void)
3210     -{
3211     - int nr;
3212     -
3213     - nr = global_page_state(NR_ACTIVE_FILE) +
3214     - global_page_state(NR_INACTIVE_FILE);
3215     -
3216     - if (get_nr_swap_pages() > 0)
3217     - nr += global_page_state(NR_ACTIVE_ANON) +
3218     - global_page_state(NR_INACTIVE_ANON);
3219     -
3220     - return nr;
3221     -}
3222     -
3223     -unsigned long zone_reclaimable_pages(struct zone *zone)
3224     -{
3225     - int nr;
3226     -
3227     - nr = zone_page_state(zone, NR_ACTIVE_FILE) +
3228     - zone_page_state(zone, NR_INACTIVE_FILE);
3229     -
3230     - if (get_nr_swap_pages() > 0)
3231     - nr += zone_page_state(zone, NR_ACTIVE_ANON) +
3232     - zone_page_state(zone, NR_INACTIVE_ANON);
3233     -
3234     - return nr;
3235     -}
3236     -
3237     #ifdef CONFIG_HIBERNATION
3238     /*
3239     * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
3240     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
3241     index c35b81b80fe2..20ee14d0a8a9 100644
3242     --- a/net/core/skbuff.c
3243     +++ b/net/core/skbuff.c
3244     @@ -74,36 +74,6 @@
3245     struct kmem_cache *skbuff_head_cache __read_mostly;
3246     static struct kmem_cache *skbuff_fclone_cache __read_mostly;
3247    
3248     -static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
3249     - struct pipe_buffer *buf)
3250     -{
3251     - put_page(buf->page);
3252     -}
3253     -
3254     -static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
3255     - struct pipe_buffer *buf)
3256     -{
3257     - get_page(buf->page);
3258     -}
3259     -
3260     -static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
3261     - struct pipe_buffer *buf)
3262     -{
3263     - return 1;
3264     -}
3265     -
3266     -
3267     -/* Pipe buffer operations for a socket. */
3268     -static const struct pipe_buf_operations sock_pipe_buf_ops = {
3269     - .can_merge = 0,
3270     - .map = generic_pipe_buf_map,
3271     - .unmap = generic_pipe_buf_unmap,
3272     - .confirm = generic_pipe_buf_confirm,
3273     - .release = sock_pipe_buf_release,
3274     - .steal = sock_pipe_buf_steal,
3275     - .get = sock_pipe_buf_get,
3276     -};
3277     -
3278     /**
3279     * skb_panic - private function for out-of-line support
3280     * @skb: buffer
3281     @@ -1811,7 +1781,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
3282     .partial = partial,
3283     .nr_pages_max = MAX_SKB_FRAGS,
3284     .flags = flags,
3285     - .ops = &sock_pipe_buf_ops,
3286     + .ops = &nosteal_pipe_buf_ops,
3287     .spd_release = sock_spd_release,
3288     };
3289     struct sk_buff *frag_iter;
3290     diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
3291     index 5b1bf7b530f1..3524a8b50046 100644
3292     --- a/net/sunrpc/clnt.c
3293     +++ b/net/sunrpc/clnt.c
3294     @@ -1402,9 +1402,13 @@ call_refreshresult(struct rpc_task *task)
3295     task->tk_action = call_refresh;
3296     switch (status) {
3297     case 0:
3298     - if (rpcauth_uptodatecred(task))
3299     + if (rpcauth_uptodatecred(task)) {
3300     task->tk_action = call_allocate;
3301     - return;
3302     + return;
3303     + }
3304     + /* Use rate-limiting and a max number of retries if refresh
3305     + * had status 0 but failed to update the cred.
3306     + */
3307     case -ETIMEDOUT:
3308     rpc_delay(task, 3*HZ);
3309     case -EAGAIN:
3310     diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
3311     index 9cd9b7c661ec..142a59f39796 100644
3312     --- a/security/selinux/ss/policydb.c
3313     +++ b/security/selinux/ss/policydb.c
3314     @@ -1941,7 +1941,19 @@ static int filename_trans_read(struct policydb *p, void *fp)
3315     if (rc)
3316     goto out;
3317    
3318     - hashtab_insert(p->filename_trans, ft, otype);
3319     + rc = hashtab_insert(p->filename_trans, ft, otype);
3320     + if (rc) {
3321     + /*
3322     + * Do not return -EEXIST to the caller, or the system
3323     + * will not boot.
3324     + */
3325     + if (rc != -EEXIST)
3326     + goto out;
3327     + /* But free memory to avoid memory leak. */
3328     + kfree(ft);
3329     + kfree(name);
3330     + kfree(otype);
3331     + }
3332     }
3333     hash_eval(p->filename_trans, "filenametr");
3334     return 0;
3335     diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
3336     index f09641da40d4..d1b3a361e526 100644
3337     --- a/tools/power/x86/turbostat/Makefile
3338     +++ b/tools/power/x86/turbostat/Makefile
3339     @@ -5,7 +5,7 @@ DESTDIR :=
3340    
3341     turbostat : turbostat.c
3342     CFLAGS += -Wall
3343     -CFLAGS += -I../../../../arch/x86/include/uapi/
3344     +CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
3345    
3346     %: %.c
3347     @mkdir -p $(BUILD_OUTPUT)
3348     diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
3349     index fe702076ca46..6a7ee5f21c9b 100644
3350     --- a/tools/power/x86/turbostat/turbostat.c
3351     +++ b/tools/power/x86/turbostat/turbostat.c
3352     @@ -20,7 +20,7 @@
3353     */
3354    
3355     #define _GNU_SOURCE
3356     -#include <asm/msr.h>
3357     +#include MSRHEADER
3358     #include <stdio.h>
3359     #include <unistd.h>
3360     #include <sys/types.h>
3361     @@ -35,6 +35,7 @@
3362     #include <string.h>
3363     #include <ctype.h>
3364     #include <sched.h>
3365     +#include <cpuid.h>
3366    
3367     char *proc_stat = "/proc/stat";
3368     unsigned int interval_sec = 5; /* set with -i interval_sec */
3369     @@ -1894,7 +1895,7 @@ void check_cpuid()
3370    
3371     eax = ebx = ecx = edx = 0;
3372    
3373     - asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
3374     + __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
3375    
3376     if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
3377     genuine_intel = 1;
3378     @@ -1903,7 +1904,7 @@ void check_cpuid()
3379     fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
3380     (char *)&ebx, (char *)&edx, (char *)&ecx);
3381    
3382     - asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
3383     + __get_cpuid(1, &fms, &ebx, &ecx, &edx);
3384     family = (fms >> 8) & 0xf;
3385     model = (fms >> 4) & 0xf;
3386     stepping = fms & 0xf;
3387     @@ -1925,7 +1926,7 @@ void check_cpuid()
3388     * This check is valid for both Intel and AMD.
3389     */
3390     ebx = ecx = edx = 0;
3391     - asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
3392     + __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
3393    
3394     if (max_level < 0x80000007) {
3395     fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
3396     @@ -1936,7 +1937,7 @@ void check_cpuid()
3397     * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
3398     * this check is valid for both Intel and AMD
3399     */
3400     - asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
3401     + __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
3402     has_invariant_tsc = edx & (1 << 8);
3403    
3404     if (!has_invariant_tsc) {
3405     @@ -1949,7 +1950,7 @@ void check_cpuid()
3406     * this check is valid for both Intel and AMD
3407     */
3408    
3409     - asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
3410     + __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
3411     has_aperf = ecx & (1 << 0);
3412     do_dts = eax & (1 << 0);
3413     do_ptm = eax & (1 << 6);