Magellan Linux

Contents of /trunk/kernel-alx/patches-3.12/0110-3.12.11-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2423 - (show annotations) (download)
Tue Mar 25 12:29:50 2014 UTC (10 years, 1 month ago) by niro
File size: 153425 byte(s)
-added 3.12 branch
1 diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
2 index 823c95faebd2..c4564e1b42e6 100644
3 --- a/Documentation/filesystems/proc.txt
4 +++ b/Documentation/filesystems/proc.txt
5 @@ -1376,8 +1376,8 @@ may allocate from based on an estimation of its current memory and swap use.
6 For example, if a task is using all allowed memory, its badness score will be
7 1000. If it is using half of its allowed memory, its score will be 500.
8
9 -There is an additional factor included in the badness score: root
10 -processes are given 3% extra memory over other tasks.
11 +There is an additional factor included in the badness score: the current memory
12 +and swap usage is discounted by 3% for root processes.
13
14 The amount of "allowed" memory depends on the context in which the oom killer
15 was called. If it is due to the memory assigned to the allocating task's cpuset
16 diff --git a/Makefile b/Makefile
17 index 49b64402f947..b9e092666bf9 100644
18 --- a/Makefile
19 +++ b/Makefile
20 @@ -1,6 +1,6 @@
21 VERSION = 3
22 PATCHLEVEL = 12
23 -SUBLEVEL = 10
24 +SUBLEVEL = 11
25 EXTRAVERSION =
26 NAME = One Giant Leap for Frogkind
27
28 diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.c b/arch/arm/mach-mvebu/mvebu-soc-id.c
29 index fe4fc1cbdfaf..f3b325f6cbd4 100644
30 --- a/arch/arm/mach-mvebu/mvebu-soc-id.c
31 +++ b/arch/arm/mach-mvebu/mvebu-soc-id.c
32 @@ -88,7 +88,7 @@ static int __init mvebu_soc_id_init(void)
33 }
34
35 pci_base = of_iomap(child, 0);
36 - if (IS_ERR(pci_base)) {
37 + if (pci_base == NULL) {
38 pr_err("cannot map registers\n");
39 ret = -ENOMEM;
40 goto res_ioremap;
41 diff --git a/arch/arm/plat-orion/irq.c b/arch/arm/plat-orion/irq.c
42 index c492e1b3dfdb..807df142444b 100644
43 --- a/arch/arm/plat-orion/irq.c
44 +++ b/arch/arm/plat-orion/irq.c
45 @@ -15,8 +15,51 @@
46 #include <linux/io.h>
47 #include <linux/of_address.h>
48 #include <linux/of_irq.h>
49 +#include <asm/exception.h>
50 #include <plat/irq.h>
51 #include <plat/orion-gpio.h>
52 +#include <mach/bridge-regs.h>
53 +
54 +#ifdef CONFIG_MULTI_IRQ_HANDLER
55 +/*
56 + * Compiling with both non-DT and DT support enabled, will
57 + * break asm irq handler used by non-DT boards. Therefore,
58 + * we provide a C-style irq handler even for non-DT boards,
59 + * if MULTI_IRQ_HANDLER is set.
60 + *
61 + * Notes:
62 + * - this is prepared for Kirkwood and Dove only, update
63 + * accordingly if you add Orion5x or MV78x00.
64 + * - Orion5x uses different macro names and has only one
65 + * set of CAUSE/MASK registers.
66 + * - MV78x00 uses the same macro names but has a third
67 + * set of CAUSE/MASK registers.
68 + *
69 + */
70 +
71 +static void __iomem *orion_irq_base = IRQ_VIRT_BASE;
72 +
73 +asmlinkage void
74 +__exception_irq_entry orion_legacy_handle_irq(struct pt_regs *regs)
75 +{
76 + u32 stat;
77 +
78 + stat = readl_relaxed(orion_irq_base + IRQ_CAUSE_LOW_OFF);
79 + stat &= readl_relaxed(orion_irq_base + IRQ_MASK_LOW_OFF);
80 + if (stat) {
81 + unsigned int hwirq = __fls(stat);
82 + handle_IRQ(hwirq, regs);
83 + return;
84 + }
85 + stat = readl_relaxed(orion_irq_base + IRQ_CAUSE_HIGH_OFF);
86 + stat &= readl_relaxed(orion_irq_base + IRQ_MASK_HIGH_OFF);
87 + if (stat) {
88 + unsigned int hwirq = 32 + __fls(stat);
89 + handle_IRQ(hwirq, regs);
90 + return;
91 + }
92 +}
93 +#endif
94
95 void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr)
96 {
97 @@ -35,6 +78,10 @@ void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr)
98 ct->chip.irq_unmask = irq_gc_mask_set_bit;
99 irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_MASK_CACHE,
100 IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
101 +
102 +#ifdef CONFIG_MULTI_IRQ_HANDLER
103 + set_handle_irq(orion_legacy_handle_irq);
104 +#endif
105 }
106
107 #ifdef CONFIG_OF
108 diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
109 index 38b313909ac9..adad46e41a1d 100644
110 --- a/arch/sh/kernel/kgdb.c
111 +++ b/arch/sh/kernel/kgdb.c
112 @@ -13,6 +13,7 @@
113 #include <linux/kdebug.h>
114 #include <linux/irq.h>
115 #include <linux/io.h>
116 +#include <linux/sched.h>
117 #include <asm/cacheflush.h>
118 #include <asm/traps.h>
119
120 diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
121 index 78f1f2ded86c..ffd4493efc78 100644
122 --- a/arch/tile/include/asm/compat.h
123 +++ b/arch/tile/include/asm/compat.h
124 @@ -281,7 +281,6 @@ long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
125 u32 dummy, u32 low, u32 high);
126 long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
127 u32 dummy, u32 low, u32 high);
128 -long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len);
129 long compat_sys_sync_file_range2(int fd, unsigned int flags,
130 u32 offset_lo, u32 offset_hi,
131 u32 nbytes_lo, u32 nbytes_hi);
132 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
133 index 0ecac257fb26..840c127a938e 100644
134 --- a/arch/x86/include/asm/pgtable_types.h
135 +++ b/arch/x86/include/asm/pgtable_types.h
136 @@ -121,7 +121,8 @@
137
138 /* Set of bits not changed in pte_modify */
139 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
140 - _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
141 + _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
142 + _PAGE_SOFT_DIRTY)
143 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
144
145 #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
146 diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
147 index 0a7852483ffe..ab84ac198a9a 100644
148 --- a/arch/x86/xen/platform-pci-unplug.c
149 +++ b/arch/x86/xen/platform-pci-unplug.c
150 @@ -69,6 +69,80 @@ static int check_platform_magic(void)
151 return 0;
152 }
153
154 +bool xen_has_pv_devices()
155 +{
156 + if (!xen_domain())
157 + return false;
158 +
159 + /* PV domains always have them. */
160 + if (xen_pv_domain())
161 + return true;
162 +
163 + /* And user has xen_platform_pci=0 set in guest config as
164 + * driver did not modify the value. */
165 + if (xen_platform_pci_unplug == 0)
166 + return false;
167 +
168 + if (xen_platform_pci_unplug & XEN_UNPLUG_NEVER)
169 + return false;
170 +
171 + if (xen_platform_pci_unplug & XEN_UNPLUG_ALL)
172 + return true;
173 +
174 + /* This is an odd one - we are going to run legacy
175 + * and PV drivers at the same time. */
176 + if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY)
177 + return true;
178 +
179 + /* And the caller has to follow with xen_pv_{disk,nic}_devices
180 + * to be certain which driver can load. */
181 + return false;
182 +}
183 +EXPORT_SYMBOL_GPL(xen_has_pv_devices);
184 +
185 +static bool __xen_has_pv_device(int state)
186 +{
187 + /* HVM domains might or might not */
188 + if (xen_hvm_domain() && (xen_platform_pci_unplug & state))
189 + return true;
190 +
191 + return xen_has_pv_devices();
192 +}
193 +
194 +bool xen_has_pv_nic_devices(void)
195 +{
196 + return __xen_has_pv_device(XEN_UNPLUG_ALL_NICS | XEN_UNPLUG_ALL);
197 +}
198 +EXPORT_SYMBOL_GPL(xen_has_pv_nic_devices);
199 +
200 +bool xen_has_pv_disk_devices(void)
201 +{
202 + return __xen_has_pv_device(XEN_UNPLUG_ALL_IDE_DISKS |
203 + XEN_UNPLUG_AUX_IDE_DISKS | XEN_UNPLUG_ALL);
204 +}
205 +EXPORT_SYMBOL_GPL(xen_has_pv_disk_devices);
206 +
207 +/*
208 + * This one is odd - it determines whether you want to run PV _and_
209 + * legacy (IDE) drivers together. This combination is only possible
210 + * under HVM.
211 + */
212 +bool xen_has_pv_and_legacy_disk_devices(void)
213 +{
214 + if (!xen_domain())
215 + return false;
216 +
217 + /* N.B. This is only ever used in HVM mode */
218 + if (xen_pv_domain())
219 + return false;
220 +
221 + if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY)
222 + return true;
223 +
224 + return false;
225 +}
226 +EXPORT_SYMBOL_GPL(xen_has_pv_and_legacy_disk_devices);
227 +
228 void xen_unplug_emulated_devices(void)
229 {
230 int r;
231 diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
232 index 74bb74fa3f87..ea2d39dd912a 100644
233 --- a/arch/xtensa/platforms/xtfpga/setup.c
234 +++ b/arch/xtensa/platforms/xtfpga/setup.c
235 @@ -194,7 +194,7 @@ void __init platform_calibrate_ccount(void)
236 * Ethernet -- OpenCores Ethernet MAC (ethoc driver)
237 */
238
239 -static struct resource ethoc_res[] __initdata = {
240 +static struct resource ethoc_res[] = {
241 [0] = { /* register space */
242 .start = OETH_REGS_PADDR,
243 .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1,
244 @@ -212,7 +212,7 @@ static struct resource ethoc_res[] __initdata = {
245 },
246 };
247
248 -static struct ethoc_platform_data ethoc_pdata __initdata = {
249 +static struct ethoc_platform_data ethoc_pdata = {
250 /*
251 * The MAC address for these boards is 00:50:c2:13:6f:xx.
252 * The last byte (here as zero) is read from the DIP switches on the
253 @@ -222,7 +222,7 @@ static struct ethoc_platform_data ethoc_pdata __initdata = {
254 .phy_id = -1,
255 };
256
257 -static struct platform_device ethoc_device __initdata = {
258 +static struct platform_device ethoc_device = {
259 .name = "ethoc",
260 .id = -1,
261 .num_resources = ARRAY_SIZE(ethoc_res),
262 @@ -236,13 +236,13 @@ static struct platform_device ethoc_device __initdata = {
263 * UART
264 */
265
266 -static struct resource serial_resource __initdata = {
267 +static struct resource serial_resource = {
268 .start = DUART16552_PADDR,
269 .end = DUART16552_PADDR + 0x1f,
270 .flags = IORESOURCE_MEM,
271 };
272
273 -static struct plat_serial8250_port serial_platform_data[] __initdata = {
274 +static struct plat_serial8250_port serial_platform_data[] = {
275 [0] = {
276 .mapbase = DUART16552_PADDR,
277 .irq = DUART16552_INTNUM,
278 @@ -255,7 +255,7 @@ static struct plat_serial8250_port serial_platform_data[] __initdata = {
279 { },
280 };
281
282 -static struct platform_device xtavnet_uart __initdata = {
283 +static struct platform_device xtavnet_uart = {
284 .name = "serial8250",
285 .id = PLAT8250_DEV_PLATFORM,
286 .dev = {
287 diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
288 index 661a5b7f5104..7d83ef13186f 100644
289 --- a/drivers/acpi/bus.c
290 +++ b/drivers/acpi/bus.c
291 @@ -33,6 +33,7 @@
292 #include <linux/proc_fs.h>
293 #include <linux/acpi.h>
294 #include <linux/slab.h>
295 +#include <linux/regulator/machine.h>
296 #ifdef CONFIG_X86
297 #include <asm/mpspec.h>
298 #endif
299 @@ -575,6 +576,14 @@ void __init acpi_early_init(void)
300 goto error0;
301 }
302
303 + /*
304 + * If the system is using ACPI then we can be reasonably
305 + * confident that any regulators are managed by the firmware
306 + * so tell the regulator core it has everything it needs to
307 + * know.
308 + */
309 + regulator_has_full_constraints();
310 +
311 return;
312
313 error0:
314 diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
315 index a4660bbee8a6..ed88b3c2e8ea 100644
316 --- a/drivers/block/xen-blkfront.c
317 +++ b/drivers/block/xen-blkfront.c
318 @@ -1278,7 +1278,7 @@ static int blkfront_probe(struct xenbus_device *dev,
319 char *type;
320 int len;
321 /* no unplug has been done: do not hook devices != xen vbds */
322 - if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
323 + if (xen_has_pv_and_legacy_disk_devices()) {
324 int major;
325
326 if (!VDEV_IS_EXTENDED(vdevice))
327 @@ -2022,7 +2022,7 @@ static int __init xlblk_init(void)
328 if (!xen_domain())
329 return -ENODEV;
330
331 - if (xen_hvm_domain() && !xen_platform_pci_unplug)
332 + if (!xen_has_pv_disk_devices())
333 return -ENODEV;
334
335 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
336 diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
337 index 94c280d36e8b..afa9362f4f4d 100644
338 --- a/drivers/char/tpm/xen-tpmfront.c
339 +++ b/drivers/char/tpm/xen-tpmfront.c
340 @@ -17,6 +17,7 @@
341 #include <xen/xenbus.h>
342 #include <xen/page.h>
343 #include "tpm.h"
344 +#include <xen/platform_pci.h>
345
346 struct tpm_private {
347 struct tpm_chip *chip;
348 @@ -423,6 +424,9 @@ static int __init xen_tpmfront_init(void)
349 if (!xen_domain())
350 return -ENODEV;
351
352 + if (!xen_has_pv_devices())
353 + return -ENODEV;
354 +
355 return xenbus_register_frontend(&tpmfront_driver);
356 }
357 module_init(xen_tpmfront_init);
358 diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
359 index 272a3ec35957..0314dde18a5d 100644
360 --- a/drivers/eisa/eisa-bus.c
361 +++ b/drivers/eisa/eisa-bus.c
362 @@ -275,11 +275,13 @@ static int __init eisa_request_resources(struct eisa_root_device *root,
363 }
364
365 if (slot) {
366 + edev->res[i].name = NULL;
367 edev->res[i].start = SLOT_ADDRESS(root, slot)
368 + (i * 0x400);
369 edev->res[i].end = edev->res[i].start + 0xff;
370 edev->res[i].flags = IORESOURCE_IO;
371 } else {
372 + edev->res[i].name = NULL;
373 edev->res[i].start = SLOT_ADDRESS(root, slot)
374 + EISA_VENDOR_ID_OFFSET;
375 edev->res[i].end = edev->res[i].start + 3;
376 @@ -326,19 +328,20 @@ static int __init eisa_probe(struct eisa_root_device *root)
377 return -ENOMEM;
378 }
379
380 - if (eisa_init_device(root, edev, 0)) {
381 + if (eisa_request_resources(root, edev, 0)) {
382 + dev_warn(root->dev,
383 + "EISA: Cannot allocate resource for mainboard\n");
384 kfree(edev);
385 if (!root->force_probe)
386 - return -ENODEV;
387 + return -EBUSY;
388 goto force_probe;
389 }
390
391 - if (eisa_request_resources(root, edev, 0)) {
392 - dev_warn(root->dev,
393 - "EISA: Cannot allocate resource for mainboard\n");
394 + if (eisa_init_device(root, edev, 0)) {
395 + eisa_release_resources(edev);
396 kfree(edev);
397 if (!root->force_probe)
398 - return -EBUSY;
399 + return -ENODEV;
400 goto force_probe;
401 }
402
403 @@ -361,11 +364,6 @@ static int __init eisa_probe(struct eisa_root_device *root)
404 continue;
405 }
406
407 - if (eisa_init_device(root, edev, i)) {
408 - kfree(edev);
409 - continue;
410 - }
411 -
412 if (eisa_request_resources(root, edev, i)) {
413 dev_warn(root->dev,
414 "Cannot allocate resource for EISA slot %d\n",
415 @@ -374,6 +372,12 @@ static int __init eisa_probe(struct eisa_root_device *root)
416 continue;
417 }
418
419 + if (eisa_init_device(root, edev, i)) {
420 + eisa_release_resources(edev);
421 + kfree(edev);
422 + continue;
423 + }
424 +
425 if (edev->state == (EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED))
426 enabled_str = " (forced enabled)";
427 else if (edev->state == EISA_CONFIG_FORCED)
428 diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
429 index 7b33e14e44aa..a28640f47c27 100644
430 --- a/drivers/gpu/drm/ast/ast_fb.c
431 +++ b/drivers/gpu/drm/ast/ast_fb.c
432 @@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
433 * then the BO is being moved and we should
434 * store up the damage until later.
435 */
436 - if (!in_interrupt())
437 + if (drm_can_sleep())
438 ret = ast_bo_reserve(bo, true);
439 if (ret) {
440 if (ret != -EBUSY)
441 diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
442 index b27e95666fab..86d779a9c245 100644
443 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
444 +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
445 @@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
446 * then the BO is being moved and we should
447 * store up the damage until later.
448 */
449 - if (!in_interrupt())
450 + if (drm_can_sleep())
451 ret = cirrus_bo_reserve(bo, true);
452 if (ret) {
453 if (ret != -EBUSY)
454 diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
455 index 60685b21cc36..379a47ea99f6 100644
456 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c
457 +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
458 @@ -273,8 +273,8 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
459 sr07 |= 0x11;
460 break;
461 case 16:
462 - sr07 |= 0xc1;
463 - hdr = 0xc0;
464 + sr07 |= 0x17;
465 + hdr = 0xc1;
466 break;
467 case 24:
468 sr07 |= 0x15;
469 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
470 index 49293bdc972a..da0c0080ac17 100644
471 --- a/drivers/gpu/drm/drm_gem.c
472 +++ b/drivers/gpu/drm/drm_gem.c
473 @@ -129,11 +129,12 @@ int drm_gem_object_init(struct drm_device *dev,
474 {
475 struct file *filp;
476
477 + drm_gem_private_object_init(dev, obj, size);
478 +
479 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
480 if (IS_ERR(filp))
481 return PTR_ERR(filp);
482
483 - drm_gem_private_object_init(dev, obj, size);
484 obj->filp = filp;
485
486 return 0;
487 diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
488 index 24e8af3d22bf..386de2c9dc86 100644
489 --- a/drivers/gpu/drm/gma500/gma_display.c
490 +++ b/drivers/gpu/drm/gma500/gma_display.c
491 @@ -349,6 +349,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
492 /* If we didn't get a handle then turn the cursor off */
493 if (!handle) {
494 temp = CURSOR_MODE_DISABLE;
495 + mutex_lock(&dev->struct_mutex);
496
497 if (gma_power_begin(dev, false)) {
498 REG_WRITE(control, temp);
499 @@ -365,6 +366,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
500 gma_crtc->cursor_obj = NULL;
501 }
502
503 + mutex_unlock(&dev->struct_mutex);
504 return 0;
505 }
506
507 @@ -374,9 +376,12 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
508 return -EINVAL;
509 }
510
511 + mutex_lock(&dev->struct_mutex);
512 obj = drm_gem_object_lookup(dev, file_priv, handle);
513 - if (!obj)
514 - return -ENOENT;
515 + if (!obj) {
516 + ret = -ENOENT;
517 + goto unlock;
518 + }
519
520 if (obj->size < width * height * 4) {
521 dev_dbg(dev->dev, "Buffer is too small\n");
522 @@ -440,10 +445,13 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
523 }
524
525 gma_crtc->cursor_obj = obj;
526 +unlock:
527 + mutex_unlock(&dev->struct_mutex);
528 return ret;
529
530 unref_cursor:
531 drm_gem_object_unreference(obj);
532 + mutex_unlock(&dev->struct_mutex);
533 return ret;
534 }
535
536 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
537 index 5a25f2476c3b..50d42daae15f 100644
538 --- a/drivers/gpu/drm/i915/i915_dma.c
539 +++ b/drivers/gpu/drm/i915/i915_dma.c
540 @@ -1683,6 +1683,7 @@ out_gem_unload:
541
542 intel_teardown_gmbus(dev);
543 intel_teardown_mchbar(dev);
544 + pm_qos_remove_request(&dev_priv->pm_qos);
545 destroy_workqueue(dev_priv->wq);
546 out_mtrrfree:
547 arch_phys_wc_del(dev_priv->gtt.mtrr);
548 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
549 index ab0f2c0a440c..881c9af0971d 100644
550 --- a/drivers/gpu/drm/i915/i915_drv.h
551 +++ b/drivers/gpu/drm/i915/i915_drv.h
552 @@ -296,6 +296,7 @@ struct drm_i915_error_state {
553 u64 fence[I915_MAX_NUM_FENCES];
554 struct timeval time;
555 struct drm_i915_error_ring {
556 + bool valid;
557 struct drm_i915_error_object {
558 int page_count;
559 u32 gtt_offset;
560 diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
561 index e15a1d90037d..fe4a7d16e261 100644
562 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
563 +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
564 @@ -250,7 +250,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
565 }
566
567 sg = st->sgl;
568 - sg->offset = offset;
569 + sg->offset = 0;
570 sg->length = size;
571
572 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
573 diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
574 index dae364f0028c..354e3e32b30e 100644
575 --- a/drivers/gpu/drm/i915/i915_gpu_error.c
576 +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
577 @@ -221,6 +221,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
578 unsigned ring)
579 {
580 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
581 + if (!error->ring[ring].valid)
582 + return;
583 +
584 err_printf(m, "%s command stream:\n", ring_str(ring));
585 err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
586 err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
587 @@ -272,7 +275,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
588 struct drm_device *dev = error_priv->dev;
589 drm_i915_private_t *dev_priv = dev->dev_private;
590 struct drm_i915_error_state *error = error_priv->error;
591 - struct intel_ring_buffer *ring;
592 int i, j, page, offset, elt;
593
594 if (!error) {
595 @@ -306,7 +308,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
596 if (INTEL_INFO(dev)->gen == 7)
597 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
598
599 - for_each_ring(ring, dev_priv, i)
600 + for (i = 0; i < ARRAY_SIZE(error->ring); i++)
601 i915_ring_error_state(m, dev, error, i);
602
603 if (error->active_bo)
604 @@ -363,8 +365,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
605 }
606 }
607
608 - obj = error->ring[i].ctx;
609 - if (obj) {
610 + if ((obj = error->ring[i].ctx)) {
611 err_printf(m, "%s --- HW Context = 0x%08x\n",
612 dev_priv->ring[i].name,
613 obj->gtt_offset);
614 @@ -644,7 +645,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
615 return NULL;
616
617 obj = ring->scratch.obj;
618 - if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
619 + if (obj != NULL &&
620 + acthd >= i915_gem_obj_ggtt_offset(obj) &&
621 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
622 return i915_error_object_create(dev_priv, obj);
623 }
624 @@ -747,11 +749,17 @@ static void i915_gem_record_rings(struct drm_device *dev,
625 struct drm_i915_error_state *error)
626 {
627 struct drm_i915_private *dev_priv = dev->dev_private;
628 - struct intel_ring_buffer *ring;
629 struct drm_i915_gem_request *request;
630 int i, count;
631
632 - for_each_ring(ring, dev_priv, i) {
633 + for (i = 0; i < I915_NUM_RINGS; i++) {
634 + struct intel_ring_buffer *ring = &dev_priv->ring[i];
635 +
636 + if (ring->dev == NULL)
637 + continue;
638 +
639 + error->ring[i].valid = true;
640 +
641 i915_record_ring_state(dev, error, ring);
642
643 error->ring[i].batchbuffer =
644 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
645 index ef9b35479f01..375abe708268 100644
646 --- a/drivers/gpu/drm/i915/i915_reg.h
647 +++ b/drivers/gpu/drm/i915/i915_reg.h
648 @@ -1955,9 +1955,13 @@
649 * Please check the detailed lore in the commit message for for experimental
650 * evidence.
651 */
652 -#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
653 -#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
654 -#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
655 +#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
656 +#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
657 +#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
658 +/* VLV DP/HDMI bits again match Bspec */
659 +#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
660 +#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
661 +#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
662 #define PORTD_HOTPLUG_INT_STATUS (3 << 21)
663 #define PORTC_HOTPLUG_INT_STATUS (3 << 19)
664 #define PORTB_HOTPLUG_INT_STATUS (3 << 17)
665 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
666 index 1a431377d83b..5a97f7356843 100644
667 --- a/drivers/gpu/drm/i915/intel_dp.c
668 +++ b/drivers/gpu/drm/i915/intel_dp.c
669 @@ -2803,18 +2803,34 @@ g4x_dp_detect(struct intel_dp *intel_dp)
670 return status;
671 }
672
673 - switch (intel_dig_port->port) {
674 - case PORT_B:
675 - bit = PORTB_HOTPLUG_LIVE_STATUS;
676 - break;
677 - case PORT_C:
678 - bit = PORTC_HOTPLUG_LIVE_STATUS;
679 - break;
680 - case PORT_D:
681 - bit = PORTD_HOTPLUG_LIVE_STATUS;
682 - break;
683 - default:
684 - return connector_status_unknown;
685 + if (IS_VALLEYVIEW(dev)) {
686 + switch (intel_dig_port->port) {
687 + case PORT_B:
688 + bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
689 + break;
690 + case PORT_C:
691 + bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
692 + break;
693 + case PORT_D:
694 + bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
695 + break;
696 + default:
697 + return connector_status_unknown;
698 + }
699 + } else {
700 + switch (intel_dig_port->port) {
701 + case PORT_B:
702 + bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
703 + break;
704 + case PORT_C:
705 + bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
706 + break;
707 + case PORT_D:
708 + bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
709 + break;
710 + default:
711 + return connector_status_unknown;
712 + }
713 }
714
715 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
716 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
717 index 460ee1026fca..43719bbb2595 100644
718 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
719 +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
720 @@ -1501,8 +1501,8 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
721 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
722 }
723
724 -static int __intel_ring_begin(struct intel_ring_buffer *ring,
725 - int bytes)
726 +static int __intel_ring_prepare(struct intel_ring_buffer *ring,
727 + int bytes)
728 {
729 int ret;
730
731 @@ -1518,7 +1518,6 @@ static int __intel_ring_begin(struct intel_ring_buffer *ring,
732 return ret;
733 }
734
735 - ring->space -= bytes;
736 return 0;
737 }
738
739 @@ -1533,12 +1532,17 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
740 if (ret)
741 return ret;
742
743 + ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
744 + if (ret)
745 + return ret;
746 +
747 /* Preallocate the olr before touching the ring */
748 ret = intel_ring_alloc_seqno(ring);
749 if (ret)
750 return ret;
751
752 - return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
753 + ring->space -= num_dwords * sizeof(uint32_t);
754 + return 0;
755 }
756
757 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
758 diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
759 index 801731aeab61..9f9780b7ddf0 100644
760 --- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
761 +++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
762 @@ -22,8 +22,10 @@ static void mga_hide_cursor(struct mga_device *mdev)
763 {
764 WREG8(MGA_CURPOSXL, 0);
765 WREG8(MGA_CURPOSXH, 0);
766 - mgag200_bo_unpin(mdev->cursor.pixels_1);
767 - mgag200_bo_unpin(mdev->cursor.pixels_2);
768 + if (mdev->cursor.pixels_1->pin_count)
769 + mgag200_bo_unpin(mdev->cursor.pixels_1);
770 + if (mdev->cursor.pixels_2->pin_count)
771 + mgag200_bo_unpin(mdev->cursor.pixels_2);
772 }
773
774 int mga_crtc_cursor_set(struct drm_crtc *crtc,
775 @@ -32,7 +34,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
776 uint32_t width,
777 uint32_t height)
778 {
779 - struct drm_device *dev = (struct drm_device *)file_priv->minor->dev;
780 + struct drm_device *dev = crtc->dev;
781 struct mga_device *mdev = (struct mga_device *)dev->dev_private;
782 struct mgag200_bo *pixels_1 = mdev->cursor.pixels_1;
783 struct mgag200_bo *pixels_2 = mdev->cursor.pixels_2;
784 diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
785 index 964f58cee5ea..d29bb335cccc 100644
786 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c
787 +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
788 @@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
789 * then the BO is being moved and we should
790 * store up the damage until later.
791 */
792 - if (!in_interrupt())
793 + if (drm_can_sleep())
794 ret = mgag200_bo_reserve(bo, true);
795 if (ret) {
796 if (ret != -EBUSY)
797 diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
798 index 503a414cbdad..1288cd9f67d1 100644
799 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c
800 +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
801 @@ -1521,11 +1521,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
802 (mga_vga_calculate_mode_bandwidth(mode, bpp)
803 > (32700 * 1024))) {
804 return MODE_BANDWIDTH;
805 - } else if (mode->type == G200_EH &&
806 + } else if (mdev->type == G200_EH &&
807 (mga_vga_calculate_mode_bandwidth(mode, bpp)
808 > (37500 * 1024))) {
809 return MODE_BANDWIDTH;
810 - } else if (mode->type == G200_ER &&
811 + } else if (mdev->type == G200_ER &&
812 (mga_vga_calculate_mode_bandwidth(mode,
813 bpp) > (55000 * 1024))) {
814 return MODE_BANDWIDTH;
815 diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c
816 index e03fc8e4dc1d..5e077e4ed7f6 100644
817 --- a/drivers/gpu/drm/nouveau/core/engine/falcon.c
818 +++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c
819 @@ -56,6 +56,16 @@ _nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
820 nv_wr32(falcon, falcon->addr + addr, data);
821 }
822
823 +static void *
824 +vmemdup(const void *src, size_t len)
825 +{
826 + void *p = vmalloc(len);
827 +
828 + if (p)
829 + memcpy(p, src, len);
830 + return p;
831 +}
832 +
833 int
834 _nouveau_falcon_init(struct nouveau_object *object)
835 {
836 @@ -111,7 +121,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
837
838 ret = request_firmware(&fw, name, &device->pdev->dev);
839 if (ret == 0) {
840 - falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
841 + falcon->code.data = vmemdup(fw->data, fw->size);
842 falcon->code.size = fw->size;
843 falcon->data.data = NULL;
844 falcon->data.size = 0;
845 @@ -134,7 +144,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
846 return ret;
847 }
848
849 - falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
850 + falcon->data.data = vmemdup(fw->data, fw->size);
851 falcon->data.size = fw->size;
852 release_firmware(fw);
853 if (!falcon->data.data)
854 @@ -149,7 +159,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
855 return ret;
856 }
857
858 - falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
859 + falcon->code.data = vmemdup(fw->data, fw->size);
860 falcon->code.size = fw->size;
861 release_firmware(fw);
862 if (!falcon->code.data)
863 @@ -235,8 +245,8 @@ _nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
864 if (!suspend) {
865 nouveau_gpuobj_ref(NULL, &falcon->core);
866 if (falcon->external) {
867 - kfree(falcon->data.data);
868 - kfree(falcon->code.data);
869 + vfree(falcon->data.data);
870 + vfree(falcon->code.data);
871 falcon->code.data = NULL;
872 }
873 }
874 diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
875 index 755c38d06271..60a97b6b908c 100644
876 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
877 +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
878 @@ -802,25 +802,25 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
879 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
880 {
881 struct nouveau_mem *node = old_mem->mm_node;
882 - struct nouveau_bo *nvbo = nouveau_bo(bo);
883 u64 length = (new_mem->num_pages << PAGE_SHIFT);
884 u64 src_offset = node->vma[0].offset;
885 u64 dst_offset = node->vma[1].offset;
886 + int src_tiled = !!node->memtype;
887 + int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
888 int ret;
889
890 while (length) {
891 u32 amount, stride, height;
892
893 + ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
894 + if (ret)
895 + return ret;
896 +
897 amount = min(length, (u64)(4 * 1024 * 1024));
898 stride = 16 * 4;
899 height = amount / stride;
900
901 - if (old_mem->mem_type == TTM_PL_VRAM &&
902 - nouveau_bo_tile_layout(nvbo)) {
903 - ret = RING_SPACE(chan, 8);
904 - if (ret)
905 - return ret;
906 -
907 + if (src_tiled) {
908 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
909 OUT_RING (chan, 0);
910 OUT_RING (chan, 0);
911 @@ -830,19 +830,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
912 OUT_RING (chan, 0);
913 OUT_RING (chan, 0);
914 } else {
915 - ret = RING_SPACE(chan, 2);
916 - if (ret)
917 - return ret;
918 -
919 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
920 OUT_RING (chan, 1);
921 }
922 - if (new_mem->mem_type == TTM_PL_VRAM &&
923 - nouveau_bo_tile_layout(nvbo)) {
924 - ret = RING_SPACE(chan, 8);
925 - if (ret)
926 - return ret;
927 -
928 + if (dst_tiled) {
929 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
930 OUT_RING (chan, 0);
931 OUT_RING (chan, 0);
932 @@ -852,18 +843,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
933 OUT_RING (chan, 0);
934 OUT_RING (chan, 0);
935 } else {
936 - ret = RING_SPACE(chan, 2);
937 - if (ret)
938 - return ret;
939 -
940 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
941 OUT_RING (chan, 1);
942 }
943
944 - ret = RING_SPACE(chan, 14);
945 - if (ret)
946 - return ret;
947 -
948 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
949 OUT_RING (chan, upper_32_bits(src_offset));
950 OUT_RING (chan, upper_32_bits(dst_offset));
951 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
952 index 368e1b84f429..0ee2cf5cf76e 100644
953 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
954 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
955 @@ -209,6 +209,16 @@ static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
956 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
957 }
958
959 +static const u32 vga_control_regs[6] =
960 +{
961 + AVIVO_D1VGA_CONTROL,
962 + AVIVO_D2VGA_CONTROL,
963 + EVERGREEN_D3VGA_CONTROL,
964 + EVERGREEN_D4VGA_CONTROL,
965 + EVERGREEN_D5VGA_CONTROL,
966 + EVERGREEN_D6VGA_CONTROL,
967 +};
968 +
969 static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
970 {
971 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
972 @@ -216,13 +226,23 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
973 struct radeon_device *rdev = dev->dev_private;
974 int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
975 BLANK_CRTC_PS_ALLOCATION args;
976 + u32 vga_control = 0;
977
978 memset(&args, 0, sizeof(args));
979
980 + if (ASIC_IS_DCE8(rdev)) {
981 + vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]);
982 + WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1);
983 + }
984 +
985 args.ucCRTC = radeon_crtc->crtc_id;
986 args.ucBlanking = state;
987
988 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
989 +
990 + if (ASIC_IS_DCE8(rdev)) {
991 + WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
992 + }
993 }
994
995 static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
996 @@ -938,11 +958,14 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
997 radeon_atombios_get_ppll_ss_info(rdev,
998 &radeon_crtc->ss,
999 ATOM_DP_SS_ID1);
1000 - } else
1001 + } else {
1002 radeon_crtc->ss_enabled =
1003 radeon_atombios_get_ppll_ss_info(rdev,
1004 &radeon_crtc->ss,
1005 ATOM_DP_SS_ID1);
1006 + }
1007 + /* disable spread spectrum on DCE3 DP */
1008 + radeon_crtc->ss_enabled = false;
1009 }
1010 break;
1011 case ATOM_ENCODER_MODE_LVDS:
1012 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1013 index b5c67a99dda9..ffb36c1ee005 100644
1014 --- a/drivers/gpu/drm/radeon/evergreen.c
1015 +++ b/drivers/gpu/drm/radeon/evergreen.c
1016 @@ -4249,8 +4249,8 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
1017 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1018 }
1019
1020 - /* only one DAC on DCE6 */
1021 - if (!ASIC_IS_DCE6(rdev))
1022 + /* only one DAC on DCE5 */
1023 + if (!ASIC_IS_DCE5(rdev))
1024 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
1025 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
1026
1027 diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
1028 index eb8ac315f92f..c7cac07f139b 100644
1029 --- a/drivers/gpu/drm/radeon/evergreen_cs.c
1030 +++ b/drivers/gpu/drm/radeon/evergreen_cs.c
1031 @@ -967,7 +967,10 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
1032 if (track->cb_dirty) {
1033 tmp = track->cb_target_mask;
1034 for (i = 0; i < 8; i++) {
1035 - if ((tmp >> (i * 4)) & 0xF) {
1036 + u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
1037 +
1038 + if (format != V_028C70_COLOR_INVALID &&
1039 + (tmp >> (i * 4)) & 0xF) {
1040 /* at least one component is enabled */
1041 if (track->cb_color_bo[i] == NULL) {
1042 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
1043 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
1044 index 954eb9afbe71..b2dbd48f7f28 100644
1045 --- a/drivers/gpu/drm/radeon/ni.c
1046 +++ b/drivers/gpu/drm/radeon/ni.c
1047 @@ -1335,13 +1335,12 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
1048 {
1049 struct radeon_ring *ring = &rdev->ring[fence->ring];
1050 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
1051 + u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1052 + PACKET3_SH_ACTION_ENA;
1053
1054 /* flush read cache over gart for this vmid */
1055 - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1056 - radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
1057 - radeon_ring_write(ring, 0);
1058 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1059 - radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
1060 + radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
1061 radeon_ring_write(ring, 0xFFFFFFFF);
1062 radeon_ring_write(ring, 0);
1063 radeon_ring_write(ring, 10); /* poll interval */
1064 @@ -1357,6 +1356,8 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
1065 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1066 {
1067 struct radeon_ring *ring = &rdev->ring[ib->ring];
1068 + u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1069 + PACKET3_SH_ACTION_ENA;
1070
1071 /* set to DX10/11 mode */
1072 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1073 @@ -1381,14 +1382,11 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1074 (ib->vm ? (ib->vm->id << 24) : 0));
1075
1076 /* flush read cache over gart for this vmid */
1077 - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1078 - radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
1079 - radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
1080 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1081 - radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
1082 + radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
1083 radeon_ring_write(ring, 0xFFFFFFFF);
1084 radeon_ring_write(ring, 0);
1085 - radeon_ring_write(ring, 10); /* poll interval */
1086 + radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
1087 }
1088
1089 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
1090 diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
1091 index 22421bc80c0d..d996033c243e 100644
1092 --- a/drivers/gpu/drm/radeon/nid.h
1093 +++ b/drivers/gpu/drm/radeon/nid.h
1094 @@ -1154,6 +1154,7 @@
1095 # define PACKET3_DB_ACTION_ENA (1 << 26)
1096 # define PACKET3_SH_ACTION_ENA (1 << 27)
1097 # define PACKET3_SX_ACTION_ENA (1 << 28)
1098 +# define PACKET3_ENGINE_ME (1 << 31)
1099 #define PACKET3_ME_INITIALIZE 0x44
1100 #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
1101 #define PACKET3_COND_WRITE 0x45
1102 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
1103 index f9be22062df1..2acbf89cdfd3 100644
1104 --- a/drivers/gpu/drm/radeon/r600.c
1105 +++ b/drivers/gpu/drm/radeon/r600.c
1106 @@ -2554,14 +2554,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
1107 struct radeon_fence *fence)
1108 {
1109 struct radeon_ring *ring = &rdev->ring[fence->ring];
1110 + u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
1111 + PACKET3_SH_ACTION_ENA;
1112 +
1113 + if (rdev->family >= CHIP_RV770)
1114 + cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
1115
1116 if (rdev->wb.use_event) {
1117 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
1118 /* flush read cache over gart */
1119 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1120 - radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
1121 - PACKET3_VC_ACTION_ENA |
1122 - PACKET3_SH_ACTION_ENA);
1123 + radeon_ring_write(ring, cp_coher_cntl);
1124 radeon_ring_write(ring, 0xFFFFFFFF);
1125 radeon_ring_write(ring, 0);
1126 radeon_ring_write(ring, 10); /* poll interval */
1127 @@ -2575,9 +2578,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
1128 } else {
1129 /* flush read cache over gart */
1130 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1131 - radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
1132 - PACKET3_VC_ACTION_ENA |
1133 - PACKET3_SH_ACTION_ENA);
1134 + radeon_ring_write(ring, cp_coher_cntl);
1135 radeon_ring_write(ring, 0xFFFFFFFF);
1136 radeon_ring_write(ring, 0);
1137 radeon_ring_write(ring, 10); /* poll interval */
1138 diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
1139 index 01a3ec83f284..745e66eacd47 100644
1140 --- a/drivers/gpu/drm/radeon/r600_cs.c
1141 +++ b/drivers/gpu/drm/radeon/r600_cs.c
1142 @@ -749,7 +749,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
1143 }
1144
1145 for (i = 0; i < 8; i++) {
1146 - if ((tmp >> (i * 4)) & 0xF) {
1147 + u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
1148 +
1149 + if (format != V_0280A0_COLOR_INVALID &&
1150 + (tmp >> (i * 4)) & 0xF) {
1151 /* at least one component is enabled */
1152 if (track->cb_color_bo[i] == NULL) {
1153 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
1154 diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
1155 index 7b3c7b5932c5..72484b4b679e 100644
1156 --- a/drivers/gpu/drm/radeon/r600d.h
1157 +++ b/drivers/gpu/drm/radeon/r600d.h
1158 @@ -1547,6 +1547,7 @@
1159 # define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
1160 #define PACKET3_SURFACE_SYNC 0x43
1161 # define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
1162 +# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
1163 # define PACKET3_TC_ACTION_ENA (1 << 23)
1164 # define PACKET3_VC_ACTION_ENA (1 << 24)
1165 # define PACKET3_CB_ACTION_ENA (1 << 25)
1166 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
1167 index 5c39bf7c3d88..dfa641277175 100644
1168 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
1169 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
1170 @@ -3944,6 +3944,10 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
1171 /* tell the bios not to handle mode switching */
1172 bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
1173
1174 + /* clear the vbios dpms state */
1175 + if (ASIC_IS_DCE4(rdev))
1176 + bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
1177 +
1178 if (rdev->family >= CHIP_R600) {
1179 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
1180 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
1181 diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
1182 index fc60b74ee304..e24ca6ab96de 100644
1183 --- a/drivers/gpu/drm/radeon/radeon_i2c.c
1184 +++ b/drivers/gpu/drm/radeon/radeon_i2c.c
1185 @@ -1020,6 +1020,9 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
1186 /* Add the default buses */
1187 void radeon_i2c_init(struct radeon_device *rdev)
1188 {
1189 + if (radeon_hw_i2c)
1190 + DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
1191 +
1192 if (rdev->is_atom_bios)
1193 radeon_atombios_i2c_init(rdev);
1194 else
1195 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
1196 index 4f6b7fc7ad3c..a0ec4bb9d896 100644
1197 --- a/drivers/gpu/drm/radeon/radeon_pm.c
1198 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
1199 @@ -1024,8 +1024,10 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
1200 rdev->pm.current_clock_mode_index = 0;
1201 rdev->pm.current_sclk = rdev->pm.default_sclk;
1202 rdev->pm.current_mclk = rdev->pm.default_mclk;
1203 - rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1204 - rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1205 + if (rdev->pm.power_state) {
1206 + rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1207 + rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1208 + }
1209 if (rdev->pm.pm_method == PM_METHOD_DYNPM
1210 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1211 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1212 diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
1213 index 1d029ccf428b..6d916fc93116 100644
1214 --- a/drivers/gpu/drm/radeon/radeon_uvd.c
1215 +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
1216 @@ -91,6 +91,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
1217 case CHIP_VERDE:
1218 case CHIP_PITCAIRN:
1219 case CHIP_ARUBA:
1220 + case CHIP_OLAND:
1221 fw_name = FIRMWARE_TAHITI;
1222 break;
1223
1224 diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
1225 index 374499db20c7..a239b30aaf9d 100644
1226 --- a/drivers/gpu/drm/radeon/rv770_dpm.c
1227 +++ b/drivers/gpu/drm/radeon/rv770_dpm.c
1228 @@ -2531,6 +2531,12 @@ bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
1229 (rdev->pdev->subsystem_device == 0x1c42))
1230 switch_limit = 200;
1231
1232 + /* RV770 */
1233 + /* mclk switching doesn't seem to work reliably on desktop RV770s */
1234 + if ((rdev->family == CHIP_RV770) &&
1235 + !(rdev->flags & RADEON_IS_MOBILITY))
1236 + switch_limit = 0xffffffff; /* disable mclk switching */
1237 +
1238 if (vblank_time < switch_limit)
1239 return true;
1240 else
1241 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
1242 index 37acf938b779..3f39f15d48a6 100644
1243 --- a/drivers/gpu/drm/radeon/si.c
1244 +++ b/drivers/gpu/drm/radeon/si.c
1245 @@ -5625,7 +5625,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
1246 }
1247
1248 if (!ASIC_IS_NODCE(rdev)) {
1249 - WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
1250 + WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
1251
1252 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
1253 WREG32(DC_HPD1_INT_CONTROL, tmp);
1254 diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
1255 index 6e23019cd110..205a96177f95 100644
1256 --- a/drivers/gpu/drm/radeon/sid.h
1257 +++ b/drivers/gpu/drm/radeon/sid.h
1258 @@ -770,7 +770,7 @@
1259 # define GRPH_PFLIP_INT_MASK (1 << 0)
1260 # define GRPH_PFLIP_INT_TYPE (1 << 8)
1261
1262 -#define DACA_AUTODETECT_INT_CONTROL 0x66c8
1263 +#define DAC_AUTODETECT_INT_CONTROL 0x67c8
1264
1265 #define DC_HPD1_INT_STATUS 0x601c
1266 #define DC_HPD2_INT_STATUS 0x6028
1267 diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
1268 index b19ef4951085..824550db3fed 100644
1269 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c
1270 +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
1271 @@ -153,6 +153,7 @@ int uvd_v2_2_resume(struct radeon_device *rdev)
1272 chip_id = 0x01000015;
1273 break;
1274 case CHIP_PITCAIRN:
1275 + case CHIP_OLAND:
1276 chip_id = 0x01000016;
1277 break;
1278 case CHIP_ARUBA:
1279 diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
1280 index a9d24e4bf792..c9511fd2f501 100644
1281 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
1282 +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
1283 @@ -371,7 +371,6 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
1284 goto error;
1285
1286 rcrtc->plane->format = format;
1287 - rcrtc->plane->pitch = crtc->fb->pitches[0];
1288
1289 rcrtc->plane->src_x = x;
1290 rcrtc->plane->src_y = y;
1291 diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
1292 index 53000644733f..3fb69d9ae61b 100644
1293 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
1294 +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
1295 @@ -104,6 +104,15 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane)
1296 {
1297 struct rcar_du_group *rgrp = plane->group;
1298 unsigned int index = plane->hwindex;
1299 + u32 mwr;
1300 +
1301 + /* Memory pitch (expressed in pixels) */
1302 + if (plane->format->planes == 2)
1303 + mwr = plane->pitch;
1304 + else
1305 + mwr = plane->pitch * 8 / plane->format->bpp;
1306 +
1307 + rcar_du_plane_write(rgrp, index, PnMWR, mwr);
1308
1309 /* The Y position is expressed in raster line units and must be doubled
1310 * for 32bpp formats, according to the R8A7790 datasheet. No mention of
1311 @@ -133,6 +142,8 @@ void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
1312 {
1313 struct drm_gem_cma_object *gem;
1314
1315 + plane->pitch = fb->pitches[0];
1316 +
1317 gem = drm_fb_cma_get_gem_obj(fb, 0);
1318 plane->dma[0] = gem->paddr + fb->offsets[0];
1319
1320 @@ -209,7 +220,6 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
1321 struct rcar_du_group *rgrp = plane->group;
1322 u32 ddcr2 = PnDDCR2_CODE;
1323 u32 ddcr4;
1324 - u32 mwr;
1325
1326 /* Data format
1327 *
1328 @@ -240,14 +250,6 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
1329 rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
1330 rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
1331
1332 - /* Memory pitch (expressed in pixels) */
1333 - if (plane->format->planes == 2)
1334 - mwr = plane->pitch;
1335 - else
1336 - mwr = plane->pitch * 8 / plane->format->bpp;
1337 -
1338 - rcar_du_plane_write(rgrp, index, PnMWR, mwr);
1339 -
1340 /* Destination position and size */
1341 rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
1342 rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
1343 @@ -309,7 +311,6 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
1344
1345 rplane->crtc = crtc;
1346 rplane->format = format;
1347 - rplane->pitch = fb->pitches[0];
1348
1349 rplane->src_x = src_x >> 16;
1350 rplane->src_y = src_y >> 16;
1351 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1352 index 599f6469a1eb..8b059eb09d9b 100644
1353 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1354 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1355 @@ -1483,11 +1483,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1356 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
1357 command_size);
1358 if (unlikely(ret != 0))
1359 - goto out_err;
1360 + goto out_err_nores;
1361
1362 ret = vmw_resources_reserve(sw_context);
1363 if (unlikely(ret != 0))
1364 - goto out_err;
1365 + goto out_err_nores;
1366
1367 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
1368 if (unlikely(ret != 0))
1369 @@ -1569,10 +1569,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1370 return 0;
1371
1372 out_err:
1373 - vmw_resource_relocations_free(&sw_context->res_relocations);
1374 - vmw_free_relocations(sw_context);
1375 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
1376 +out_err_nores:
1377 vmw_resource_list_unreserve(&sw_context->resource_list, true);
1378 + vmw_resource_relocations_free(&sw_context->res_relocations);
1379 + vmw_free_relocations(sw_context);
1380 vmw_clear_validations(sw_context);
1381 if (unlikely(dev_priv->pinned_bo != NULL &&
1382 !dev_priv->query_cid_valid))
1383 diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
1384 index d6c7fe7f88d5..3ad651c3356c 100644
1385 --- a/drivers/infiniband/hw/qib/qib_ud.c
1386 +++ b/drivers/infiniband/hw/qib/qib_ud.c
1387 @@ -57,13 +57,20 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
1388 struct qib_sge *sge;
1389 struct ib_wc wc;
1390 u32 length;
1391 + enum ib_qp_type sqptype, dqptype;
1392
1393 qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
1394 if (!qp) {
1395 ibp->n_pkt_drops++;
1396 return;
1397 }
1398 - if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
1399 +
1400 + sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
1401 + IB_QPT_UD : sqp->ibqp.qp_type;
1402 + dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
1403 + IB_QPT_UD : qp->ibqp.qp_type;
1404 +
1405 + if (dqptype != sqptype ||
1406 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
1407 ibp->n_pkt_drops++;
1408 goto drop;
1409 diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
1410 index e21c1816a8f9..fbfdc10573be 100644
1411 --- a/drivers/input/misc/xen-kbdfront.c
1412 +++ b/drivers/input/misc/xen-kbdfront.c
1413 @@ -29,6 +29,7 @@
1414 #include <xen/interface/io/fbif.h>
1415 #include <xen/interface/io/kbdif.h>
1416 #include <xen/xenbus.h>
1417 +#include <xen/platform_pci.h>
1418
1419 struct xenkbd_info {
1420 struct input_dev *kbd;
1421 @@ -380,6 +381,9 @@ static int __init xenkbd_init(void)
1422 if (xen_initial_domain())
1423 return -ENODEV;
1424
1425 + if (!xen_has_pv_devices())
1426 + return -ENODEV;
1427 +
1428 return xenbus_register_frontend(&xenkbd_driver);
1429 }
1430
1431 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1432 index 40203ada635e..cae5a0866046 100644
1433 --- a/drivers/iommu/intel-iommu.c
1434 +++ b/drivers/iommu/intel-iommu.c
1435 @@ -917,7 +917,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
1436
1437 /* If range covers entire pagetable, free it */
1438 if (!(start_pfn > level_pfn ||
1439 - last_pfn < level_pfn + level_size(level))) {
1440 + last_pfn < level_pfn + level_size(level) - 1)) {
1441 dma_clear_pte(pte);
1442 domain_flush_cache(domain, pte, sizeof(*pte));
1443 free_pgtable_page(level_pte);
1444 diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
1445 index 30b426ed744b..34d009728d81 100644
1446 --- a/drivers/md/Kconfig
1447 +++ b/drivers/md/Kconfig
1448 @@ -176,8 +176,12 @@ config MD_FAULTY
1449
1450 source "drivers/md/bcache/Kconfig"
1451
1452 +config BLK_DEV_DM_BUILTIN
1453 + boolean
1454 +
1455 config BLK_DEV_DM
1456 tristate "Device mapper support"
1457 + select BLK_DEV_DM_BUILTIN
1458 ---help---
1459 Device-mapper is a low level volume manager. It works by allowing
1460 people to specify mappings for ranges of logical sectors. Various
1461 diff --git a/drivers/md/Makefile b/drivers/md/Makefile
1462 index 2acc43fe0229..f26d83292579 100644
1463 --- a/drivers/md/Makefile
1464 +++ b/drivers/md/Makefile
1465 @@ -32,6 +32,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o
1466 obj-$(CONFIG_BCACHE) += bcache/
1467 obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
1468 obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
1469 +obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o
1470 obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
1471 obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
1472 obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
1473 diff --git a/drivers/md/dm-builtin.c b/drivers/md/dm-builtin.c
1474 new file mode 100644
1475 index 000000000000..6c9049c51b2b
1476 --- /dev/null
1477 +++ b/drivers/md/dm-builtin.c
1478 @@ -0,0 +1,48 @@
1479 +#include "dm.h"
1480 +
1481 +/*
1482 + * The kobject release method must not be placed in the module itself,
1483 + * otherwise we are subject to module unload races.
1484 + *
1485 + * The release method is called when the last reference to the kobject is
1486 + * dropped. It may be called by any other kernel code that drops the last
1487 + * reference.
1488 + *
1489 + * The release method suffers from module unload race. We may prevent the
1490 + * module from being unloaded at the start of the release method (using
1491 + * increased module reference count or synchronizing against the release
1492 + * method), however there is no way to prevent the module from being
1493 + * unloaded at the end of the release method.
1494 + *
1495 + * If this code were placed in the dm module, the following race may
1496 + * happen:
1497 + * 1. Some other process takes a reference to dm kobject
1498 + * 2. The user issues ioctl function to unload the dm device
1499 + * 3. dm_sysfs_exit calls kobject_put, however the object is not released
1500 + * because of the other reference taken at step 1
1501 + * 4. dm_sysfs_exit waits on the completion
1502 + * 5. The other process that took the reference in step 1 drops it,
1503 + * dm_kobject_release is called from this process
1504 + * 6. dm_kobject_release calls complete()
1505 + * 7. a reschedule happens before dm_kobject_release returns
1506 + * 8. dm_sysfs_exit continues, the dm device is unloaded, module reference
1507 + * count is decremented
1508 + * 9. The user unloads the dm module
1509 + * 10. The other process that was rescheduled in step 7 continues to run,
1510 + * it is now executing code in unloaded module, so it crashes
1511 + *
1512 + * Note that if the process that takes the foreign reference to dm kobject
1513 + * has a low priority and the system is sufficiently loaded with
1514 + * higher-priority processes that prevent the low-priority process from
1515 + * being scheduled long enough, this bug may really happen.
1516 + *
1517 + * In order to fix this module unload race, we place the release method
1518 + * into a helper code that is compiled directly into the kernel.
1519 + */
1520 +
1521 +void dm_kobject_release(struct kobject *kobj)
1522 +{
1523 + complete(dm_get_completion_from_kobject(kobj));
1524 +}
1525 +
1526 +EXPORT_SYMBOL(dm_kobject_release);
1527 diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
1528 index 84d2b91e4efb..c62c5ab6aed5 100644
1529 --- a/drivers/md/dm-sysfs.c
1530 +++ b/drivers/md/dm-sysfs.c
1531 @@ -86,6 +86,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
1532 static struct kobj_type dm_ktype = {
1533 .sysfs_ops = &dm_sysfs_ops,
1534 .default_attrs = dm_attrs,
1535 + .release = dm_kobject_release,
1536 };
1537
1538 /*
1539 @@ -104,5 +105,7 @@ int dm_sysfs_init(struct mapped_device *md)
1540 */
1541 void dm_sysfs_exit(struct mapped_device *md)
1542 {
1543 - kobject_put(dm_kobject(md));
1544 + struct kobject *kobj = dm_kobject(md);
1545 + kobject_put(kobj);
1546 + wait_for_completion(dm_get_completion_from_kobject(kobj));
1547 }
1548 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
1549 index 8a30ad54bd46..7da347665552 100644
1550 --- a/drivers/md/dm-thin-metadata.c
1551 +++ b/drivers/md/dm-thin-metadata.c
1552 @@ -1349,6 +1349,12 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
1553 return td->id;
1554 }
1555
1556 +/*
1557 + * Check whether @time (of block creation) is older than @td's last snapshot.
1558 + * If so then the associated block is shared with the last snapshot device.
1559 + * Any block on a device created *after* the device last got snapshotted is
1560 + * necessarily not shared.
1561 + */
1562 static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
1563 {
1564 return td->snapshotted_time > time;
1565 @@ -1458,6 +1464,20 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
1566 return r;
1567 }
1568
1569 +int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1570 +{
1571 + int r;
1572 + uint32_t ref_count;
1573 +
1574 + down_read(&pmd->root_lock);
1575 + r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1576 + if (!r)
1577 + *result = (ref_count != 0);
1578 + up_read(&pmd->root_lock);
1579 +
1580 + return r;
1581 +}
1582 +
1583 bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
1584 {
1585 int r;
1586 diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
1587 index 7bcc0e1d6238..2edf5dbac76a 100644
1588 --- a/drivers/md/dm-thin-metadata.h
1589 +++ b/drivers/md/dm-thin-metadata.h
1590 @@ -181,6 +181,8 @@ int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result);
1591
1592 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
1593
1594 +int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
1595 +
1596 /*
1597 * Returns -ENOSPC if the new size is too small and already allocated
1598 * blocks would be lost.
1599 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1600 index ee29037ffc2e..bc0c97d7921e 100644
1601 --- a/drivers/md/dm-thin.c
1602 +++ b/drivers/md/dm-thin.c
1603 @@ -512,6 +512,7 @@ struct dm_thin_new_mapping {
1604 unsigned quiesced:1;
1605 unsigned prepared:1;
1606 unsigned pass_discard:1;
1607 + unsigned definitely_not_shared:1;
1608
1609 struct thin_c *tc;
1610 dm_block_t virt_block;
1611 @@ -683,7 +684,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
1612 cell_defer_no_holder(tc, m->cell2);
1613
1614 if (m->pass_discard)
1615 - remap_and_issue(tc, m->bio, m->data_block);
1616 + if (m->definitely_not_shared)
1617 + remap_and_issue(tc, m->bio, m->data_block);
1618 + else {
1619 + bool used = false;
1620 + if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
1621 + bio_endio(m->bio, 0);
1622 + else
1623 + remap_and_issue(tc, m->bio, m->data_block);
1624 + }
1625 else
1626 bio_endio(m->bio, 0);
1627
1628 @@ -751,13 +760,17 @@ static int ensure_next_mapping(struct pool *pool)
1629
1630 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
1631 {
1632 - struct dm_thin_new_mapping *r = pool->next_mapping;
1633 + struct dm_thin_new_mapping *m = pool->next_mapping;
1634
1635 BUG_ON(!pool->next_mapping);
1636
1637 + memset(m, 0, sizeof(struct dm_thin_new_mapping));
1638 + INIT_LIST_HEAD(&m->list);
1639 + m->bio = NULL;
1640 +
1641 pool->next_mapping = NULL;
1642
1643 - return r;
1644 + return m;
1645 }
1646
1647 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1648 @@ -769,15 +782,10 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1649 struct pool *pool = tc->pool;
1650 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1651
1652 - INIT_LIST_HEAD(&m->list);
1653 - m->quiesced = 0;
1654 - m->prepared = 0;
1655 m->tc = tc;
1656 m->virt_block = virt_block;
1657 m->data_block = data_dest;
1658 m->cell = cell;
1659 - m->err = 0;
1660 - m->bio = NULL;
1661
1662 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1663 m->quiesced = 1;
1664 @@ -840,15 +848,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1665 struct pool *pool = tc->pool;
1666 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1667
1668 - INIT_LIST_HEAD(&m->list);
1669 m->quiesced = 1;
1670 m->prepared = 0;
1671 m->tc = tc;
1672 m->virt_block = virt_block;
1673 m->data_block = data_block;
1674 m->cell = cell;
1675 - m->err = 0;
1676 - m->bio = NULL;
1677
1678 /*
1679 * If the whole block of data is being overwritten or we are not
1680 @@ -1040,12 +1045,12 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1681 */
1682 m = get_next_mapping(pool);
1683 m->tc = tc;
1684 - m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
1685 + m->pass_discard = pool->pf.discard_passdown;
1686 + m->definitely_not_shared = !lookup_result.shared;
1687 m->virt_block = block;
1688 m->data_block = lookup_result.block;
1689 m->cell = cell;
1690 m->cell2 = cell2;
1691 - m->err = 0;
1692 m->bio = bio;
1693
1694 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1695 @@ -1390,16 +1395,16 @@ static enum pool_mode get_pool_mode(struct pool *pool)
1696 return pool->pf.mode;
1697 }
1698
1699 -static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1700 +static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1701 {
1702 int r;
1703 + enum pool_mode old_mode = pool->pf.mode;
1704
1705 - pool->pf.mode = mode;
1706 -
1707 - switch (mode) {
1708 + switch (new_mode) {
1709 case PM_FAIL:
1710 - DMERR("%s: switching pool to failure mode",
1711 - dm_device_name(pool->pool_md));
1712 + if (old_mode != new_mode)
1713 + DMERR("%s: switching pool to failure mode",
1714 + dm_device_name(pool->pool_md));
1715 dm_pool_metadata_read_only(pool->pmd);
1716 pool->process_bio = process_bio_fail;
1717 pool->process_discard = process_bio_fail;
1718 @@ -1408,13 +1413,15 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1719 break;
1720
1721 case PM_READ_ONLY:
1722 - DMERR("%s: switching pool to read-only mode",
1723 - dm_device_name(pool->pool_md));
1724 + if (old_mode != new_mode)
1725 + DMERR("%s: switching pool to read-only mode",
1726 + dm_device_name(pool->pool_md));
1727 r = dm_pool_abort_metadata(pool->pmd);
1728 if (r) {
1729 DMERR("%s: aborting transaction failed",
1730 dm_device_name(pool->pool_md));
1731 - set_pool_mode(pool, PM_FAIL);
1732 + new_mode = PM_FAIL;
1733 + set_pool_mode(pool, new_mode);
1734 } else {
1735 dm_pool_metadata_read_only(pool->pmd);
1736 pool->process_bio = process_bio_read_only;
1737 @@ -1425,6 +1432,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1738 break;
1739
1740 case PM_WRITE:
1741 + if (old_mode != new_mode)
1742 + DMINFO("%s: switching pool to write mode",
1743 + dm_device_name(pool->pool_md));
1744 dm_pool_metadata_read_write(pool->pmd);
1745 pool->process_bio = process_bio;
1746 pool->process_discard = process_discard;
1747 @@ -1432,6 +1442,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1748 pool->process_prepared_discard = process_prepared_discard;
1749 break;
1750 }
1751 +
1752 + pool->pf.mode = new_mode;
1753 }
1754
1755 /*----------------------------------------------------------------*/
1756 @@ -1648,6 +1660,17 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
1757 enum pool_mode new_mode = pt->adjusted_pf.mode;
1758
1759 /*
1760 + * Don't change the pool's mode until set_pool_mode() below.
1761 + * Otherwise the pool's process_* function pointers may
1762 + * not match the desired pool mode.
1763 + */
1764 + pt->adjusted_pf.mode = old_mode;
1765 +
1766 + pool->ti = ti;
1767 + pool->pf = pt->adjusted_pf;
1768 + pool->low_water_blocks = pt->low_water_blocks;
1769 +
1770 + /*
1771 * If we were in PM_FAIL mode, rollback of metadata failed. We're
1772 * not going to recover without a thin_repair. So we never let the
1773 * pool move out of the old mode. On the other hand a PM_READ_ONLY
1774 @@ -1657,10 +1680,6 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
1775 if (old_mode == PM_FAIL)
1776 new_mode = old_mode;
1777
1778 - pool->ti = ti;
1779 - pool->low_water_blocks = pt->low_water_blocks;
1780 - pool->pf = pt->adjusted_pf;
1781 -
1782 set_pool_mode(pool, new_mode);
1783
1784 return 0;
1785 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1786 index b3e26c7d1417..a562d5a4fa9d 100644
1787 --- a/drivers/md/dm.c
1788 +++ b/drivers/md/dm.c
1789 @@ -194,8 +194,8 @@ struct mapped_device {
1790 /* forced geometry settings */
1791 struct hd_geometry geometry;
1792
1793 - /* sysfs handle */
1794 - struct kobject kobj;
1795 + /* kobject and completion */
1796 + struct dm_kobject_holder kobj_holder;
1797
1798 /* zero-length flush that will be cloned and submitted to targets */
1799 struct bio flush_bio;
1800 @@ -2005,6 +2005,7 @@ static struct mapped_device *alloc_dev(int minor)
1801 init_waitqueue_head(&md->wait);
1802 INIT_WORK(&md->work, dm_wq_work);
1803 init_waitqueue_head(&md->eventq);
1804 + init_completion(&md->kobj_holder.completion);
1805
1806 md->disk->major = _major;
1807 md->disk->first_minor = minor;
1808 @@ -2866,20 +2867,14 @@ struct gendisk *dm_disk(struct mapped_device *md)
1809
1810 struct kobject *dm_kobject(struct mapped_device *md)
1811 {
1812 - return &md->kobj;
1813 + return &md->kobj_holder.kobj;
1814 }
1815
1816 -/*
1817 - * struct mapped_device should not be exported outside of dm.c
1818 - * so use this check to verify that kobj is part of md structure
1819 - */
1820 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1821 {
1822 struct mapped_device *md;
1823
1824 - md = container_of(kobj, struct mapped_device, kobj);
1825 - if (&md->kobj != kobj)
1826 - return NULL;
1827 + md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
1828
1829 if (test_bit(DMF_FREEING, &md->flags) ||
1830 dm_deleting_md(md))
1831 diff --git a/drivers/md/dm.h b/drivers/md/dm.h
1832 index 1d1ad7b7e527..a8db73cc708f 100644
1833 --- a/drivers/md/dm.h
1834 +++ b/drivers/md/dm.h
1835 @@ -15,6 +15,8 @@
1836 #include <linux/list.h>
1837 #include <linux/blkdev.h>
1838 #include <linux/hdreg.h>
1839 +#include <linux/completion.h>
1840 +#include <linux/kobject.h>
1841
1842 #include "dm-stats.h"
1843
1844 @@ -138,12 +140,27 @@ void dm_interface_exit(void);
1845 /*
1846 * sysfs interface
1847 */
1848 +struct dm_kobject_holder {
1849 + struct kobject kobj;
1850 + struct completion completion;
1851 +};
1852 +
1853 +static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
1854 +{
1855 + return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
1856 +}
1857 +
1858 int dm_sysfs_init(struct mapped_device *md);
1859 void dm_sysfs_exit(struct mapped_device *md);
1860 struct kobject *dm_kobject(struct mapped_device *md);
1861 struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
1862
1863 /*
1864 + * The kobject helper
1865 + */
1866 +void dm_kobject_release(struct kobject *kobj);
1867 +
1868 +/*
1869 * Targets for linear and striped mappings
1870 */
1871 int dm_linear_init(void);
1872 diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
1873 index 466a60bbd716..aacbe70c2c2e 100644
1874 --- a/drivers/md/persistent-data/dm-space-map-common.c
1875 +++ b/drivers/md/persistent-data/dm-space-map-common.c
1876 @@ -245,6 +245,10 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
1877 return -EINVAL;
1878 }
1879
1880 + /*
1881 + * We need to set this before the dm_tm_new_block() call below.
1882 + */
1883 + ll->nr_blocks = nr_blocks;
1884 for (i = old_blocks; i < blocks; i++) {
1885 struct dm_block *b;
1886 struct disk_index_entry idx;
1887 @@ -252,6 +256,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
1888 r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
1889 if (r < 0)
1890 return r;
1891 +
1892 idx.blocknr = cpu_to_le64(dm_block_location(b));
1893
1894 r = dm_tm_unlock(ll->tm, b);
1895 @@ -266,7 +271,6 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
1896 return r;
1897 }
1898
1899 - ll->nr_blocks = nr_blocks;
1900 return 0;
1901 }
1902
1903 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
1904 index 58fc1eef7499..afb419e514bf 100644
1905 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
1906 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
1907 @@ -608,20 +608,38 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
1908 * Flick into a mode where all blocks get allocated in the new area.
1909 */
1910 smm->begin = old_len;
1911 - memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
1912 + memcpy(sm, &bootstrap_ops, sizeof(*sm));
1913
1914 /*
1915 * Extend.
1916 */
1917 r = sm_ll_extend(&smm->ll, extra_blocks);
1918 + if (r)
1919 + goto out;
1920
1921 /*
1922 - * Switch back to normal behaviour.
1923 + * We repeatedly increment then commit until the commit doesn't
1924 + * allocate any new blocks.
1925 */
1926 - memcpy(&smm->sm, &ops, sizeof(smm->sm));
1927 - for (i = old_len; !r && i < smm->begin; i++)
1928 - r = sm_ll_inc(&smm->ll, i, &ev);
1929 + do {
1930 + for (i = old_len; !r && i < smm->begin; i++) {
1931 + r = sm_ll_inc(&smm->ll, i, &ev);
1932 + if (r)
1933 + goto out;
1934 + }
1935 + old_len = smm->begin;
1936 +
1937 + r = sm_ll_commit(&smm->ll);
1938 + if (r)
1939 + goto out;
1940 +
1941 + } while (old_len != smm->begin);
1942
1943 +out:
1944 + /*
1945 + * Switch back to normal behaviour.
1946 + */
1947 + memcpy(sm, &ops, sizeof(*sm));
1948 return r;
1949 }
1950
1951 diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
1952 index 419a2d6b4349..7e0f61930a12 100644
1953 --- a/drivers/media/dvb-core/dvb-usb-ids.h
1954 +++ b/drivers/media/dvb-core/dvb-usb-ids.h
1955 @@ -239,6 +239,7 @@
1956 #define USB_PID_AVERMEDIA_A835B_4835 0x4835
1957 #define USB_PID_AVERMEDIA_1867 0x1867
1958 #define USB_PID_AVERMEDIA_A867 0xa867
1959 +#define USB_PID_AVERMEDIA_H335 0x0335
1960 #define USB_PID_AVERMEDIA_TWINSTAR 0x0825
1961 #define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
1962 #define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
1963 diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
1964 index 90536147bf04..ccac8467a28b 100644
1965 --- a/drivers/media/dvb-frontends/dib8000.c
1966 +++ b/drivers/media/dvb-frontends/dib8000.c
1967 @@ -157,15 +157,10 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
1968 return ret;
1969 }
1970
1971 -static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
1972 +static u16 __dib8000_read_word(struct dib8000_state *state, u16 reg)
1973 {
1974 u16 ret;
1975
1976 - if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
1977 - dprintk("could not acquire lock");
1978 - return 0;
1979 - }
1980 -
1981 state->i2c_write_buffer[0] = reg >> 8;
1982 state->i2c_write_buffer[1] = reg & 0xff;
1983
1984 @@ -183,6 +178,21 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
1985 dprintk("i2c read error on %d", reg);
1986
1987 ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
1988 +
1989 + return ret;
1990 +}
1991 +
1992 +static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
1993 +{
1994 + u16 ret;
1995 +
1996 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
1997 + dprintk("could not acquire lock");
1998 + return 0;
1999 + }
2000 +
2001 + ret = __dib8000_read_word(state, reg);
2002 +
2003 mutex_unlock(&state->i2c_buffer_lock);
2004
2005 return ret;
2006 @@ -192,8 +202,15 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
2007 {
2008 u16 rw[2];
2009
2010 - rw[0] = dib8000_read_word(state, reg + 0);
2011 - rw[1] = dib8000_read_word(state, reg + 1);
2012 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
2013 + dprintk("could not acquire lock");
2014 + return 0;
2015 + }
2016 +
2017 + rw[0] = __dib8000_read_word(state, reg + 0);
2018 + rw[1] = __dib8000_read_word(state, reg + 1);
2019 +
2020 + mutex_unlock(&state->i2c_buffer_lock);
2021
2022 return ((rw[0] << 16) | (rw[1]));
2023 }
2024 @@ -2445,7 +2462,8 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
2025 if (state->revision == 0x8090)
2026 internal = dib8000_read32(state, 23) / 1000;
2027
2028 - if (state->autosearch_state == AS_SEARCHING_FFT) {
2029 + if ((state->revision >= 0x8002) &&
2030 + (state->autosearch_state == AS_SEARCHING_FFT)) {
2031 dib8000_write_word(state, 37, 0x0065); /* P_ctrl_pha_off_max default values */
2032 dib8000_write_word(state, 116, 0x0000); /* P_ana_gain to 0 */
2033
2034 @@ -2481,7 +2499,8 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
2035 dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (1 << 13)); /* P_restart_ccg = 1 */
2036 dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (0 << 13)); /* P_restart_ccg = 0 */
2037 dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x7ff) | (0 << 15) | (1 << 13)); /* P_restart_search = 0; */
2038 - } else if (state->autosearch_state == AS_SEARCHING_GUARD) {
2039 + } else if ((state->revision >= 0x8002) &&
2040 + (state->autosearch_state == AS_SEARCHING_GUARD)) {
2041 c->transmission_mode = TRANSMISSION_MODE_8K;
2042 c->guard_interval = GUARD_INTERVAL_1_8;
2043 c->inversion = 0;
2044 @@ -2583,7 +2602,8 @@ static int dib8000_autosearch_irq(struct dvb_frontend *fe)
2045 struct dib8000_state *state = fe->demodulator_priv;
2046 u16 irq_pending = dib8000_read_word(state, 1284);
2047
2048 - if (state->autosearch_state == AS_SEARCHING_FFT) {
2049 + if ((state->revision >= 0x8002) &&
2050 + (state->autosearch_state == AS_SEARCHING_FFT)) {
2051 if (irq_pending & 0x1) {
2052 dprintk("dib8000_autosearch_irq: max correlation result available");
2053 return 3;
2054 diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
2055 index 4da5272075cb..02699c111019 100644
2056 --- a/drivers/media/dvb-frontends/m88rs2000.c
2057 +++ b/drivers/media/dvb-frontends/m88rs2000.c
2058 @@ -110,28 +110,94 @@ static u8 m88rs2000_readreg(struct m88rs2000_state *state, u8 reg)
2059 return b1[0];
2060 }
2061
2062 +static u32 m88rs2000_get_mclk(struct dvb_frontend *fe)
2063 +{
2064 + struct m88rs2000_state *state = fe->demodulator_priv;
2065 + u32 mclk;
2066 + u8 reg;
2067 + /* Must not be 0x00 or 0xff */
2068 + reg = m88rs2000_readreg(state, 0x86);
2069 + if (!reg || reg == 0xff)
2070 + return 0;
2071 +
2072 + reg /= 2;
2073 + reg += 1;
2074 +
2075 + mclk = (u32)(reg * RS2000_FE_CRYSTAL_KHZ + 28 / 2) / 28;
2076 +
2077 + return mclk;
2078 +}
2079 +
2080 +static int m88rs2000_set_carrieroffset(struct dvb_frontend *fe, s16 offset)
2081 +{
2082 + struct m88rs2000_state *state = fe->demodulator_priv;
2083 + u32 mclk;
2084 + s32 tmp;
2085 + u8 reg;
2086 + int ret;
2087 +
2088 + mclk = m88rs2000_get_mclk(fe);
2089 + if (!mclk)
2090 + return -EINVAL;
2091 +
2092 + tmp = (offset * 4096 + (s32)mclk / 2) / (s32)mclk;
2093 + if (tmp < 0)
2094 + tmp += 4096;
2095 +
2096 + /* Carrier Offset */
2097 + ret = m88rs2000_writereg(state, 0x9c, (u8)(tmp >> 4));
2098 +
2099 + reg = m88rs2000_readreg(state, 0x9d);
2100 + reg &= 0xf;
2101 + reg |= (u8)(tmp & 0xf) << 4;
2102 +
2103 + ret |= m88rs2000_writereg(state, 0x9d, reg);
2104 +
2105 + return ret;
2106 +}
2107 +
2108 static int m88rs2000_set_symbolrate(struct dvb_frontend *fe, u32 srate)
2109 {
2110 struct m88rs2000_state *state = fe->demodulator_priv;
2111 int ret;
2112 - u32 temp;
2113 + u64 temp;
2114 + u32 mclk;
2115 u8 b[3];
2116
2117 if ((srate < 1000000) || (srate > 45000000))
2118 return -EINVAL;
2119
2120 + mclk = m88rs2000_get_mclk(fe);
2121 + if (!mclk)
2122 + return -EINVAL;
2123 +
2124 temp = srate / 1000;
2125 - temp *= 11831;
2126 - temp /= 68;
2127 - temp -= 3;
2128 + temp *= 1 << 24;
2129 +
2130 + do_div(temp, mclk);
2131
2132 b[0] = (u8) (temp >> 16) & 0xff;
2133 b[1] = (u8) (temp >> 8) & 0xff;
2134 b[2] = (u8) temp & 0xff;
2135 +
2136 ret = m88rs2000_writereg(state, 0x93, b[2]);
2137 ret |= m88rs2000_writereg(state, 0x94, b[1]);
2138 ret |= m88rs2000_writereg(state, 0x95, b[0]);
2139
2140 + if (srate > 10000000)
2141 + ret |= m88rs2000_writereg(state, 0xa0, 0x20);
2142 + else
2143 + ret |= m88rs2000_writereg(state, 0xa0, 0x60);
2144 +
2145 + ret |= m88rs2000_writereg(state, 0xa1, 0xe0);
2146 +
2147 + if (srate > 12000000)
2148 + ret |= m88rs2000_writereg(state, 0xa3, 0x20);
2149 + else if (srate > 2800000)
2150 + ret |= m88rs2000_writereg(state, 0xa3, 0x98);
2151 + else
2152 + ret |= m88rs2000_writereg(state, 0xa3, 0x90);
2153 +
2154 deb_info("m88rs2000: m88rs2000_set_symbolrate\n");
2155 return ret;
2156 }
2157 @@ -261,8 +327,6 @@ struct inittab m88rs2000_shutdown[] = {
2158
2159 struct inittab fe_reset[] = {
2160 {DEMOD_WRITE, 0x00, 0x01},
2161 - {DEMOD_WRITE, 0xf1, 0xbf},
2162 - {DEMOD_WRITE, 0x00, 0x01},
2163 {DEMOD_WRITE, 0x20, 0x81},
2164 {DEMOD_WRITE, 0x21, 0x80},
2165 {DEMOD_WRITE, 0x10, 0x33},
2166 @@ -305,9 +369,6 @@ struct inittab fe_trigger[] = {
2167 {DEMOD_WRITE, 0x9b, 0x64},
2168 {DEMOD_WRITE, 0x9e, 0x00},
2169 {DEMOD_WRITE, 0x9f, 0xf8},
2170 - {DEMOD_WRITE, 0xa0, 0x20},
2171 - {DEMOD_WRITE, 0xa1, 0xe0},
2172 - {DEMOD_WRITE, 0xa3, 0x38},
2173 {DEMOD_WRITE, 0x98, 0xff},
2174 {DEMOD_WRITE, 0xc0, 0x0f},
2175 {DEMOD_WRITE, 0x89, 0x01},
2176 @@ -540,9 +601,8 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
2177 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
2178 fe_status_t status;
2179 int i, ret = 0;
2180 - s32 tmp;
2181 u32 tuner_freq;
2182 - u16 offset = 0;
2183 + s16 offset = 0;
2184 u8 reg;
2185
2186 state->no_lock_count = 0;
2187 @@ -567,29 +627,26 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
2188 if (ret < 0)
2189 return -ENODEV;
2190
2191 - offset = tuner_freq - c->frequency;
2192 -
2193 - /* calculate offset assuming 96000kHz*/
2194 - tmp = offset;
2195 - tmp *= 65536;
2196 -
2197 - tmp = (2 * tmp + 96000) / (2 * 96000);
2198 - if (tmp < 0)
2199 - tmp += 65536;
2200 + offset = (s16)((s32)tuner_freq - c->frequency);
2201
2202 - offset = tmp & 0xffff;
2203 + /* default mclk value 96.4285 * 2 * 1000 = 192857 */
2204 + if (((c->frequency % 192857) >= (192857 - 3000)) ||
2205 + (c->frequency % 192857) <= 3000)
2206 + ret = m88rs2000_writereg(state, 0x86, 0xc2);
2207 + else
2208 + ret = m88rs2000_writereg(state, 0x86, 0xc6);
2209
2210 - ret = m88rs2000_writereg(state, 0x9a, 0x30);
2211 - /* Unknown usually 0xc6 sometimes 0xc1 */
2212 - reg = m88rs2000_readreg(state, 0x86);
2213 - ret |= m88rs2000_writereg(state, 0x86, reg);
2214 - /* Offset lower nibble always 0 */
2215 - ret |= m88rs2000_writereg(state, 0x9c, (offset >> 8));
2216 - ret |= m88rs2000_writereg(state, 0x9d, offset & 0xf0);
2217 + ret |= m88rs2000_set_carrieroffset(fe, offset);
2218 + if (ret < 0)
2219 + return -ENODEV;
2220
2221 + /* Reset demod by symbol rate */
2222 + if (c->symbol_rate > 27500000)
2223 + ret = m88rs2000_writereg(state, 0xf1, 0xa4);
2224 + else
2225 + ret = m88rs2000_writereg(state, 0xf1, 0xbf);
2226
2227 - /* Reset Demod */
2228 - ret = m88rs2000_tab_set(state, fe_reset);
2229 + ret |= m88rs2000_tab_set(state, fe_reset);
2230 if (ret < 0)
2231 return -ENODEV;
2232
2233 diff --git a/drivers/media/dvb-frontends/m88rs2000.h b/drivers/media/dvb-frontends/m88rs2000.h
2234 index 14ce31e76ae6..0a50ea90736b 100644
2235 --- a/drivers/media/dvb-frontends/m88rs2000.h
2236 +++ b/drivers/media/dvb-frontends/m88rs2000.h
2237 @@ -53,6 +53,8 @@ static inline struct dvb_frontend *m88rs2000_attach(
2238 }
2239 #endif /* CONFIG_DVB_M88RS2000 */
2240
2241 +#define RS2000_FE_CRYSTAL_KHZ 27000
2242 +
2243 enum {
2244 DEMOD_WRITE = 0x1,
2245 WRITE_DELAY = 0x10,
2246 diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
2247 index fbca9856313a..4bf057544607 100644
2248 --- a/drivers/media/dvb-frontends/nxt200x.c
2249 +++ b/drivers/media/dvb-frontends/nxt200x.c
2250 @@ -40,7 +40,7 @@
2251 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2252
2253 /* Max transfer size done by I2C transfer functions */
2254 -#define MAX_XFER_SIZE 64
2255 +#define MAX_XFER_SIZE 256
2256
2257 #define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw"
2258 #define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw"
2259 diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
2260 index 084263dd126f..4a521a9a6e9d 100644
2261 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
2262 +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
2263 @@ -177,21 +177,6 @@ unlock:
2264 mutex_unlock(&dev->mfc_mutex);
2265 }
2266
2267 -static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
2268 -{
2269 - struct video_device *vdev = video_devdata(file);
2270 -
2271 - if (!vdev) {
2272 - mfc_err("failed to get video_device");
2273 - return MFCNODE_INVALID;
2274 - }
2275 - if (vdev->index == 0)
2276 - return MFCNODE_DECODER;
2277 - else if (vdev->index == 1)
2278 - return MFCNODE_ENCODER;
2279 - return MFCNODE_INVALID;
2280 -}
2281 -
2282 static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
2283 {
2284 mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
2285 @@ -701,6 +686,7 @@ irq_cleanup_hw:
2286 /* Open an MFC node */
2287 static int s5p_mfc_open(struct file *file)
2288 {
2289 + struct video_device *vdev = video_devdata(file);
2290 struct s5p_mfc_dev *dev = video_drvdata(file);
2291 struct s5p_mfc_ctx *ctx = NULL;
2292 struct vb2_queue *q;
2293 @@ -738,7 +724,7 @@ static int s5p_mfc_open(struct file *file)
2294 /* Mark context as idle */
2295 clear_work_bit_irqsave(ctx);
2296 dev->ctx[ctx->num] = ctx;
2297 - if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
2298 + if (vdev == dev->vfd_dec) {
2299 ctx->type = MFCINST_DECODER;
2300 ctx->c_ops = get_dec_codec_ops();
2301 s5p_mfc_dec_init(ctx);
2302 @@ -748,7 +734,7 @@ static int s5p_mfc_open(struct file *file)
2303 mfc_err("Failed to setup mfc controls\n");
2304 goto err_ctrls_setup;
2305 }
2306 - } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
2307 + } else if (vdev == dev->vfd_enc) {
2308 ctx->type = MFCINST_ENCODER;
2309 ctx->c_ops = get_enc_codec_ops();
2310 /* only for encoder */
2311 @@ -793,10 +779,10 @@ static int s5p_mfc_open(struct file *file)
2312 q = &ctx->vq_dst;
2313 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
2314 q->drv_priv = &ctx->fh;
2315 - if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
2316 + if (vdev == dev->vfd_dec) {
2317 q->io_modes = VB2_MMAP;
2318 q->ops = get_dec_queue_ops();
2319 - } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
2320 + } else if (vdev == dev->vfd_enc) {
2321 q->io_modes = VB2_MMAP | VB2_USERPTR;
2322 q->ops = get_enc_queue_ops();
2323 } else {
2324 @@ -815,10 +801,10 @@ static int s5p_mfc_open(struct file *file)
2325 q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
2326 q->io_modes = VB2_MMAP;
2327 q->drv_priv = &ctx->fh;
2328 - if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
2329 + if (vdev == dev->vfd_dec) {
2330 q->io_modes = VB2_MMAP;
2331 q->ops = get_dec_queue_ops();
2332 - } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
2333 + } else if (vdev == dev->vfd_enc) {
2334 q->io_modes = VB2_MMAP | VB2_USERPTR;
2335 q->ops = get_enc_queue_ops();
2336 } else {
2337 diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
2338 index 6920b546181a..823812c6b9b0 100644
2339 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
2340 +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
2341 @@ -115,15 +115,6 @@ enum s5p_mfc_fmt_type {
2342 };
2343
2344 /**
2345 - * enum s5p_mfc_node_type - The type of an MFC device node.
2346 - */
2347 -enum s5p_mfc_node_type {
2348 - MFCNODE_INVALID = -1,
2349 - MFCNODE_DECODER = 0,
2350 - MFCNODE_ENCODER = 1,
2351 -};
2352 -
2353 -/**
2354 * enum s5p_mfc_inst_type - The type of an MFC instance.
2355 */
2356 enum s5p_mfc_inst_type {
2357 diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
2358 index 90cfa35ef6e6..eeab79bdd2aa 100644
2359 --- a/drivers/media/usb/dvb-usb-v2/anysee.c
2360 +++ b/drivers/media/usb/dvb-usb-v2/anysee.c
2361 @@ -442,6 +442,7 @@ static struct cxd2820r_config anysee_cxd2820r_config = {
2362 * IOD[0] ZL10353 1=enabled
2363 * IOE[0] tuner 0=enabled
2364 * tuner is behind ZL10353 I2C-gate
2365 + * tuner is behind TDA10023 I2C-gate
2366 *
2367 * E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)"
2368 * PCB: 508TC (rev0.6)
2369 @@ -956,7 +957,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
2370
2371 if (fe && adap->fe[1]) {
2372 /* attach tuner for 2nd FE */
2373 - fe = dvb_attach(dvb_pll_attach, adap->fe[0],
2374 + fe = dvb_attach(dvb_pll_attach, adap->fe[1],
2375 (0xc0 >> 1), &d->i2c_adap,
2376 DVB_PLL_SAMSUNG_DTOS403IH102A);
2377 }
2378 diff --git a/drivers/media/usb/dvb-usb-v2/it913x.c b/drivers/media/usb/dvb-usb-v2/it913x.c
2379 index 1cb6899cf797..fe95a586dd5d 100644
2380 --- a/drivers/media/usb/dvb-usb-v2/it913x.c
2381 +++ b/drivers/media/usb/dvb-usb-v2/it913x.c
2382 @@ -799,6 +799,9 @@ static const struct usb_device_id it913x_id_table[] = {
2383 { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CTVDIGDUAL_V2,
2384 &it913x_properties, "Digital Dual TV Receiver CTVDIGDUAL_V2",
2385 RC_MAP_IT913X_V1) },
2386 + { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_H335,
2387 + &it913x_properties, "Avermedia H335",
2388 + RC_MAP_IT913X_V2) },
2389 {} /* Terminating entry */
2390 };
2391
2392 diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
2393 index b5aaaac427ad..0a30dbf3d05c 100644
2394 --- a/drivers/media/v4l2-core/v4l2-dev.c
2395 +++ b/drivers/media/v4l2-core/v4l2-dev.c
2396 @@ -872,8 +872,8 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
2397
2398 /* Should not happen since we thought this minor was free */
2399 WARN_ON(video_device[vdev->minor] != NULL);
2400 - video_device[vdev->minor] = vdev;
2401 vdev->index = get_index(vdev);
2402 + video_device[vdev->minor] = vdev;
2403 mutex_unlock(&videodev_lock);
2404
2405 if (vdev->ioctl_ops)
2406 diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
2407 index 98f95614b5b1..8d39a1221438 100644
2408 --- a/drivers/misc/mei/hbm.c
2409 +++ b/drivers/misc/mei/hbm.c
2410 @@ -593,7 +593,7 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
2411 */
2412 if (dev->hbm_state == MEI_HBM_IDLE) {
2413 dev_dbg(&dev->pdev->dev, "hbm: state is idle ignore spurious messages\n");
2414 - return 0;
2415 + return;
2416 }
2417
2418 switch (mei_msg->hbm_cmd) {
2419 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
2420 index 1a3163f1407e..4e8212c714b1 100644
2421 --- a/drivers/mmc/card/block.c
2422 +++ b/drivers/mmc/card/block.c
2423 @@ -1959,6 +1959,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2424 struct mmc_card *card = md->queue.card;
2425 struct mmc_host *host = card->host;
2426 unsigned long flags;
2427 + unsigned int cmd_flags = req ? req->cmd_flags : 0;
2428
2429 if (req && !mq->mqrq_prev->req)
2430 /* claim host only for the first request */
2431 @@ -1974,7 +1975,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2432 }
2433
2434 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
2435 - if (req && req->cmd_flags & REQ_DISCARD) {
2436 + if (cmd_flags & REQ_DISCARD) {
2437 /* complete ongoing async transfer before issuing discard */
2438 if (card->host->areq)
2439 mmc_blk_issue_rw_rq(mq, NULL);
2440 @@ -1983,7 +1984,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2441 ret = mmc_blk_issue_secdiscard_rq(mq, req);
2442 else
2443 ret = mmc_blk_issue_discard_rq(mq, req);
2444 - } else if (req && req->cmd_flags & REQ_FLUSH) {
2445 + } else if (cmd_flags & REQ_FLUSH) {
2446 /* complete ongoing async transfer before issuing flush */
2447 if (card->host->areq)
2448 mmc_blk_issue_rw_rq(mq, NULL);
2449 @@ -1999,7 +2000,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2450
2451 out:
2452 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
2453 - (req && (req->cmd_flags & MMC_REQ_SPECIAL_MASK)))
2454 + (cmd_flags & MMC_REQ_SPECIAL_MASK))
2455 /*
2456 * Release host when there are no more requests
2457 * and after special request(discard, flush) is done.
2458 diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
2459 index 5e8823dc3ef6..06da0608283a 100644
2460 --- a/drivers/mmc/core/sd.c
2461 +++ b/drivers/mmc/core/sd.c
2462 @@ -11,6 +11,7 @@
2463 */
2464
2465 #include <linux/err.h>
2466 +#include <linux/sizes.h>
2467 #include <linux/slab.h>
2468 #include <linux/stat.h>
2469
2470 @@ -44,6 +45,13 @@ static const unsigned int tacc_mant[] = {
2471 35, 40, 45, 50, 55, 60, 70, 80,
2472 };
2473
2474 +static const unsigned int sd_au_size[] = {
2475 + 0, SZ_16K / 512, SZ_32K / 512, SZ_64K / 512,
2476 + SZ_128K / 512, SZ_256K / 512, SZ_512K / 512, SZ_1M / 512,
2477 + SZ_2M / 512, SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
2478 + SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
2479 +};
2480 +
2481 #define UNSTUFF_BITS(resp,start,size) \
2482 ({ \
2483 const int __size = size; \
2484 @@ -215,7 +223,7 @@ static int mmc_decode_scr(struct mmc_card *card)
2485 static int mmc_read_ssr(struct mmc_card *card)
2486 {
2487 unsigned int au, es, et, eo;
2488 - int err, i, max_au;
2489 + int err, i;
2490 u32 *ssr;
2491
2492 if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
2493 @@ -239,26 +247,25 @@ static int mmc_read_ssr(struct mmc_card *card)
2494 for (i = 0; i < 16; i++)
2495 ssr[i] = be32_to_cpu(ssr[i]);
2496
2497 - /* SD3.0 increases max AU size to 64MB (0xF) from 4MB (0x9) */
2498 - max_au = card->scr.sda_spec3 ? 0xF : 0x9;
2499 -
2500 /*
2501 * UNSTUFF_BITS only works with four u32s so we have to offset the
2502 * bitfield positions accordingly.
2503 */
2504 au = UNSTUFF_BITS(ssr, 428 - 384, 4);
2505 - if (au > 0 && au <= max_au) {
2506 - card->ssr.au = 1 << (au + 4);
2507 - es = UNSTUFF_BITS(ssr, 408 - 384, 16);
2508 - et = UNSTUFF_BITS(ssr, 402 - 384, 6);
2509 - eo = UNSTUFF_BITS(ssr, 400 - 384, 2);
2510 - if (es && et) {
2511 - card->ssr.erase_timeout = (et * 1000) / es;
2512 - card->ssr.erase_offset = eo * 1000;
2513 + if (au) {
2514 + if (au <= 9 || card->scr.sda_spec3) {
2515 + card->ssr.au = sd_au_size[au];
2516 + es = UNSTUFF_BITS(ssr, 408 - 384, 16);
2517 + et = UNSTUFF_BITS(ssr, 402 - 384, 6);
2518 + if (es && et) {
2519 + eo = UNSTUFF_BITS(ssr, 400 - 384, 2);
2520 + card->ssr.erase_timeout = (et * 1000) / es;
2521 + card->ssr.erase_offset = eo * 1000;
2522 + }
2523 + } else {
2524 + pr_warning("%s: SD Status: Invalid Allocation Unit size.\n",
2525 + mmc_hostname(card->host));
2526 }
2527 - } else {
2528 - pr_warning("%s: SD Status: Invalid Allocation Unit "
2529 - "size.\n", mmc_hostname(card->host));
2530 }
2531 out:
2532 kfree(ssr);
2533 diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
2534 index 92c18779d47e..a0752e9ce977 100644
2535 --- a/drivers/mmc/host/atmel-mci.c
2536 +++ b/drivers/mmc/host/atmel-mci.c
2537 @@ -1193,11 +1193,22 @@ static void atmci_start_request(struct atmel_mci *host,
2538 iflags |= ATMCI_CMDRDY;
2539 cmd = mrq->cmd;
2540 cmdflags = atmci_prepare_command(slot->mmc, cmd);
2541 - atmci_send_command(host, cmd, cmdflags);
2542 +
2543 + /*
2544 + * DMA transfer should be started before sending the command to avoid
2545 + * unexpected errors especially for read operations in SDIO mode.
2546 + * Unfortunately, in PDC mode, command has to be sent before starting
2547 + * the transfer.
2548 + */
2549 + if (host->submit_data != &atmci_submit_data_dma)
2550 + atmci_send_command(host, cmd, cmdflags);
2551
2552 if (data)
2553 host->submit_data(host, data);
2554
2555 + if (host->submit_data == &atmci_submit_data_dma)
2556 + atmci_send_command(host, cmd, cmdflags);
2557 +
2558 if (mrq->stop) {
2559 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
2560 host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
2561 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
2562 index d7d6bc8968d2..27ae563d0caa 100644
2563 --- a/drivers/mmc/host/sdhci-pci.c
2564 +++ b/drivers/mmc/host/sdhci-pci.c
2565 @@ -59,6 +59,7 @@ struct sdhci_pci_fixes {
2566 unsigned int quirks;
2567 unsigned int quirks2;
2568 bool allow_runtime_pm;
2569 + bool own_cd_for_runtime_pm;
2570
2571 int (*probe) (struct sdhci_pci_chip *);
2572
2573 @@ -290,6 +291,7 @@ static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
2574 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
2575 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
2576 .allow_runtime_pm = true,
2577 + .own_cd_for_runtime_pm = true,
2578 };
2579
2580 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
2581 @@ -354,6 +356,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
2582 static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
2583 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON,
2584 .allow_runtime_pm = true,
2585 + .own_cd_for_runtime_pm = true,
2586 };
2587
2588 /* O2Micro extra registers */
2589 @@ -1381,6 +1384,15 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
2590
2591 sdhci_pci_add_own_cd(slot);
2592
2593 + /*
2594 + * Check if the chip needs a separate GPIO for card detect to wake up
2595 + * from runtime suspend. If it is not there, don't allow runtime PM.
2596 + * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure.
2597 + */
2598 + if (chip->fixes && chip->fixes->own_cd_for_runtime_pm &&
2599 + !gpio_is_valid(slot->cd_gpio))
2600 + chip->allow_runtime_pm = false;
2601 +
2602 return slot;
2603
2604 remove:
2605 diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
2606 index ce8242b6c3e7..e5c0e593ed1e 100644
2607 --- a/drivers/mtd/nand/mxc_nand.c
2608 +++ b/drivers/mtd/nand/mxc_nand.c
2609 @@ -676,7 +676,6 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
2610 ecc_stat >>= 4;
2611 } while (--no_subpages);
2612
2613 - mtd->ecc_stats.corrected += ret;
2614 pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
2615
2616 return ret;
2617 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
2618 index 3f0f20081979..7c541dc1647e 100644
2619 --- a/drivers/net/xen-netfront.c
2620 +++ b/drivers/net/xen-netfront.c
2621 @@ -2070,7 +2070,7 @@ static int __init netif_init(void)
2622 if (!xen_domain())
2623 return -ENODEV;
2624
2625 - if (xen_hvm_domain() && !xen_platform_pci_unplug)
2626 + if (!xen_has_pv_nic_devices())
2627 return -ENODEV;
2628
2629 pr_info("Initialising Xen virtual ethernet driver\n");
2630 diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
2631 index f7197a790341..eae7cd9fde7b 100644
2632 --- a/drivers/pci/xen-pcifront.c
2633 +++ b/drivers/pci/xen-pcifront.c
2634 @@ -20,6 +20,7 @@
2635 #include <linux/workqueue.h>
2636 #include <linux/bitops.h>
2637 #include <linux/time.h>
2638 +#include <xen/platform_pci.h>
2639
2640 #include <asm/xen/swiotlb-xen.h>
2641 #define INVALID_GRANT_REF (0)
2642 @@ -1138,6 +1139,9 @@ static int __init pcifront_init(void)
2643 if (!xen_pv_domain() || xen_initial_domain())
2644 return -ENODEV;
2645
2646 + if (!xen_has_pv_devices())
2647 + return -ENODEV;
2648 +
2649 pci_frontend_registrar(1 /* enable */);
2650
2651 return xenbus_register_frontend(&xenpci_driver);
2652 diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
2653 index 24e733c98f8b..371a7e91dbf6 100644
2654 --- a/drivers/rtc/rtc-cmos.c
2655 +++ b/drivers/rtc/rtc-cmos.c
2656 @@ -34,11 +34,11 @@
2657 #include <linux/interrupt.h>
2658 #include <linux/spinlock.h>
2659 #include <linux/platform_device.h>
2660 -#include <linux/mod_devicetable.h>
2661 #include <linux/log2.h>
2662 #include <linux/pm.h>
2663 #include <linux/of.h>
2664 #include <linux/of_platform.h>
2665 +#include <linux/dmi.h>
2666
2667 /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
2668 #include <asm-generic/rtc.h>
2669 @@ -377,6 +377,51 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
2670 return 0;
2671 }
2672
2673 +/*
2674 + * Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes.
2675 + */
2676 +static bool alarm_disable_quirk;
2677 +
2678 +static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
2679 +{
2680 + alarm_disable_quirk = true;
2681 + pr_info("rtc-cmos: BIOS has alarm-disable quirk. ");
2682 + pr_info("RTC alarms disabled\n");
2683 + return 0;
2684 +}
2685 +
2686 +static const struct dmi_system_id rtc_quirks[] __initconst = {
2687 + /* https://bugzilla.novell.com/show_bug.cgi?id=805740 */
2688 + {
2689 + .callback = set_alarm_disable_quirk,
2690 + .ident = "IBM Truman",
2691 + .matches = {
2692 + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
2693 + DMI_MATCH(DMI_PRODUCT_NAME, "4852570"),
2694 + },
2695 + },
2696 + /* https://bugzilla.novell.com/show_bug.cgi?id=812592 */
2697 + {
2698 + .callback = set_alarm_disable_quirk,
2699 + .ident = "Gigabyte GA-990XA-UD3",
2700 + .matches = {
2701 + DMI_MATCH(DMI_SYS_VENDOR,
2702 + "Gigabyte Technology Co., Ltd."),
2703 + DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"),
2704 + },
2705 + },
2706 + /* http://permalink.gmane.org/gmane.linux.kernel/1604474 */
2707 + {
2708 + .callback = set_alarm_disable_quirk,
2709 + .ident = "Toshiba Satellite L300",
2710 + .matches = {
2711 + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
2712 + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
2713 + },
2714 + },
2715 + {}
2716 +};
2717 +
2718 static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
2719 {
2720 struct cmos_rtc *cmos = dev_get_drvdata(dev);
2721 @@ -385,6 +430,9 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
2722 if (!is_valid_irq(cmos->irq))
2723 return -EINVAL;
2724
2725 + if (alarm_disable_quirk)
2726 + return 0;
2727 +
2728 spin_lock_irqsave(&rtc_lock, flags);
2729
2730 if (enabled)
2731 @@ -1158,6 +1206,8 @@ static int __init cmos_init(void)
2732 platform_driver_registered = true;
2733 }
2734
2735 + dmi_check_system(rtc_quirks);
2736 +
2737 if (retval == 0)
2738 return 0;
2739
2740 diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
2741 index 536b0e363826..fa905c9d772a 100644
2742 --- a/drivers/spi/spi-bcm63xx.c
2743 +++ b/drivers/spi/spi-bcm63xx.c
2744 @@ -169,8 +169,6 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
2745 transfer_list);
2746 }
2747
2748 - len -= prepend_len;
2749 -
2750 init_completion(&bs->done);
2751
2752 /* Fill in the Message control register */
2753 diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
2754 index 28361f7783cd..7b69e93d8448 100644
2755 --- a/drivers/spi/spi-pxa2xx.c
2756 +++ b/drivers/spi/spi-pxa2xx.c
2757 @@ -1070,6 +1070,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
2758
2759 pdata->num_chipselect = 1;
2760 pdata->enable_dma = true;
2761 + pdata->tx_chan_id = -1;
2762 + pdata->rx_chan_id = -1;
2763
2764 return pdata;
2765 }
2766 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
2767 index 9e039c60c068..30b1229f6406 100644
2768 --- a/drivers/spi/spi.c
2769 +++ b/drivers/spi/spi.c
2770 @@ -600,7 +600,9 @@ static void spi_pump_messages(struct kthread_work *work)
2771 ret = master->transfer_one_message(master, master->cur_msg);
2772 if (ret) {
2773 dev_err(&master->dev,
2774 - "failed to transfer one message from queue\n");
2775 + "failed to transfer one message from queue: %d\n", ret);
2776 + master->cur_msg->status = ret;
2777 + spi_finalize_current_message(master);
2778 return;
2779 }
2780 }
2781 diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
2782 index b0cac0c342e1..1039de499bc6 100644
2783 --- a/drivers/target/iscsi/iscsi_target_util.c
2784 +++ b/drivers/target/iscsi/iscsi_target_util.c
2785 @@ -156,9 +156,13 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
2786 {
2787 struct iscsi_cmd *cmd;
2788 struct se_session *se_sess = conn->sess->se_sess;
2789 - int size, tag;
2790 + int size, tag, state = (gfp_mask & __GFP_WAIT) ? TASK_INTERRUPTIBLE :
2791 + TASK_RUNNING;
2792 +
2793 + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
2794 + if (tag < 0)
2795 + return NULL;
2796
2797 - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask);
2798 size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
2799 cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
2800 memset(cmd, 0, size);
2801 diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
2802 index e663921eebb6..d300fd99a2b8 100644
2803 --- a/drivers/vhost/scsi.c
2804 +++ b/drivers/vhost/scsi.c
2805 @@ -728,7 +728,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
2806 }
2807 se_sess = tv_nexus->tvn_se_sess;
2808
2809 - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
2810 + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
2811 if (tag < 0) {
2812 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
2813 return ERR_PTR(-ENOMEM);
2814 diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
2815 index cd005c227a23..4b2d3ab870f3 100644
2816 --- a/drivers/video/xen-fbfront.c
2817 +++ b/drivers/video/xen-fbfront.c
2818 @@ -35,6 +35,7 @@
2819 #include <xen/interface/io/fbif.h>
2820 #include <xen/interface/io/protocols.h>
2821 #include <xen/xenbus.h>
2822 +#include <xen/platform_pci.h>
2823
2824 struct xenfb_info {
2825 unsigned char *fb;
2826 @@ -699,6 +700,9 @@ static int __init xenfb_init(void)
2827 if (xen_initial_domain())
2828 return -ENODEV;
2829
2830 + if (!xen_has_pv_devices())
2831 + return -ENODEV;
2832 +
2833 return xenbus_register_frontend(&xenfb_driver);
2834 }
2835
2836 diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
2837 index 34b20bfa4e8c..6244f9c8cfb8 100644
2838 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c
2839 +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
2840 @@ -496,7 +496,7 @@ subsys_initcall(xenbus_probe_frontend_init);
2841 #ifndef MODULE
2842 static int __init boot_wait_for_devices(void)
2843 {
2844 - if (xen_hvm_domain() && !xen_platform_pci_unplug)
2845 + if (!xen_has_pv_devices())
2846 return -ENODEV;
2847
2848 ready_to_wait_for_devices = 1;
2849 diff --git a/fs/dcookies.c b/fs/dcookies.c
2850 index ab5954b50267..ac44a69fbea9 100644
2851 --- a/fs/dcookies.c
2852 +++ b/fs/dcookies.c
2853 @@ -204,7 +204,7 @@ out:
2854 }
2855
2856 #ifdef CONFIG_COMPAT
2857 -COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, size_t, len)
2858 +COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, compat_size_t, len)
2859 {
2860 #ifdef __BIG_ENDIAN
2861 return sys_lookup_dcookie(((u64)w0 << 32) | w1, buf, len);
2862 diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
2863 index b74422888604..85cde3e76290 100644
2864 --- a/fs/exofs/ore.c
2865 +++ b/fs/exofs/ore.c
2866 @@ -103,7 +103,7 @@ int ore_verify_layout(unsigned total_comps, struct ore_layout *layout)
2867
2868 layout->max_io_length =
2869 (BIO_MAX_PAGES_KMALLOC * PAGE_SIZE - layout->stripe_unit) *
2870 - layout->group_width;
2871 + (layout->group_width - layout->parity);
2872 if (layout->parity) {
2873 unsigned stripe_length =
2874 (layout->group_width - layout->parity) *
2875 @@ -286,7 +286,8 @@ int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
2876 if (length) {
2877 ore_calc_stripe_info(layout, offset, length, &ios->si);
2878 ios->length = ios->si.length;
2879 - ios->nr_pages = (ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
2880 + ios->nr_pages = ((ios->offset & (PAGE_SIZE - 1)) +
2881 + ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
2882 if (layout->parity)
2883 _ore_post_alloc_raid_stuff(ios);
2884 }
2885 @@ -536,6 +537,7 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
2886 u64 H = LmodS - G * T;
2887
2888 u32 N = div_u64(H, U);
2889 + u32 Nlast;
2890
2891 /* "H - (N * U)" is just "H % U" so it's bound to u32 */
2892 u32 C = (u32)(H - (N * U)) / stripe_unit + G * group_width;
2893 @@ -568,6 +570,10 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
2894 si->length = T - H;
2895 if (si->length > length)
2896 si->length = length;
2897 +
2898 + Nlast = div_u64(H + si->length + U - 1, U);
2899 + si->maxdevUnits = Nlast - N;
2900 +
2901 si->M = M;
2902 }
2903 EXPORT_SYMBOL(ore_calc_stripe_info);
2904 @@ -583,13 +589,16 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
2905 int ret;
2906
2907 if (per_dev->bio == NULL) {
2908 - unsigned pages_in_stripe = ios->layout->group_width *
2909 - (ios->layout->stripe_unit / PAGE_SIZE);
2910 - unsigned nr_pages = ios->nr_pages * ios->layout->group_width /
2911 - (ios->layout->group_width -
2912 - ios->layout->parity);
2913 - unsigned bio_size = (nr_pages + pages_in_stripe) /
2914 - ios->layout->group_width;
2915 + unsigned bio_size;
2916 +
2917 + if (!ios->reading) {
2918 + bio_size = ios->si.maxdevUnits;
2919 + } else {
2920 + bio_size = (ios->si.maxdevUnits + 1) *
2921 + (ios->layout->group_width - ios->layout->parity) /
2922 + ios->layout->group_width;
2923 + }
2924 + bio_size *= (ios->layout->stripe_unit / PAGE_SIZE);
2925
2926 per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
2927 if (unlikely(!per_dev->bio)) {
2928 @@ -609,8 +618,12 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
2929 added_len = bio_add_pc_page(q, per_dev->bio, pages[pg],
2930 pglen, pgbase);
2931 if (unlikely(pglen != added_len)) {
2932 - ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=%u\n",
2933 - per_dev->bio->bi_vcnt);
2934 + /* If bi_vcnt == bi_max then this is a SW BUG */
2935 + ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=0x%x "
2936 + "bi_max=0x%x BIO_MAX=0x%x cur_len=0x%x\n",
2937 + per_dev->bio->bi_vcnt,
2938 + per_dev->bio->bi_max_vecs,
2939 + BIO_MAX_PAGES_KMALLOC, cur_len);
2940 ret = -ENOMEM;
2941 goto out;
2942 }
2943 @@ -1098,7 +1111,7 @@ int ore_truncate(struct ore_layout *layout, struct ore_components *oc,
2944 size_attr->attr = g_attr_logical_length;
2945 size_attr->attr.val_ptr = &size_attr->newsize;
2946
2947 - ORE_DBGMSG("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
2948 + ORE_DBGMSG2("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
2949 _LLU(oc->comps->obj.id), _LLU(obj_size), i);
2950 ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
2951 &size_attr->attr);
2952 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
2953 index ef74ad5fd362..fa8cb4b7b8fe 100644
2954 --- a/fs/fuse/dev.c
2955 +++ b/fs/fuse/dev.c
2956 @@ -1296,22 +1296,6 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
2957 return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
2958 }
2959
2960 -static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
2961 - struct pipe_buffer *buf)
2962 -{
2963 - return 1;
2964 -}
2965 -
2966 -static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
2967 - .can_merge = 0,
2968 - .map = generic_pipe_buf_map,
2969 - .unmap = generic_pipe_buf_unmap,
2970 - .confirm = generic_pipe_buf_confirm,
2971 - .release = generic_pipe_buf_release,
2972 - .steal = fuse_dev_pipe_buf_steal,
2973 - .get = generic_pipe_buf_get,
2974 -};
2975 -
2976 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
2977 struct pipe_inode_info *pipe,
2978 size_t len, unsigned int flags)
2979 @@ -1358,7 +1342,11 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
2980 buf->page = bufs[page_nr].page;
2981 buf->offset = bufs[page_nr].offset;
2982 buf->len = bufs[page_nr].len;
2983 - buf->ops = &fuse_dev_pipe_buf_ops;
2984 + /*
2985 + * Need to be careful about this. Having buf->ops in module
2986 + * code can Oops if the buffer persists after module unload.
2987 + */
2988 + buf->ops = &nosteal_pipe_buf_ops;
2989
2990 pipe->nrbufs++;
2991 page_nr++;
2992 diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
2993 index a860ab566d6e..8a572ddde55b 100644
2994 --- a/fs/nfs/nfs4client.c
2995 +++ b/fs/nfs/nfs4client.c
2996 @@ -407,13 +407,11 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
2997 error = nfs4_discover_server_trunking(clp, &old);
2998 if (error < 0)
2999 goto error;
3000 - nfs_put_client(clp);
3001 - if (clp != old) {
3002 - clp->cl_preserve_clid = true;
3003 - clp = old;
3004 - }
3005
3006 - return clp;
3007 + if (clp != old)
3008 + clp->cl_preserve_clid = true;
3009 + nfs_put_client(clp);
3010 + return old;
3011
3012 error:
3013 nfs_mark_client_ready(clp, error);
3014 @@ -491,9 +489,10 @@ int nfs40_walk_client_list(struct nfs_client *new,
3015 prev = pos;
3016
3017 status = nfs_wait_client_init_complete(pos);
3018 - spin_lock(&nn->nfs_client_lock);
3019 if (status < 0)
3020 - continue;
3021 + goto out;
3022 + status = -NFS4ERR_STALE_CLIENTID;
3023 + spin_lock(&nn->nfs_client_lock);
3024 }
3025 if (pos->cl_cons_state != NFS_CS_READY)
3026 continue;
3027 @@ -631,7 +630,8 @@ int nfs41_walk_client_list(struct nfs_client *new,
3028 }
3029 spin_lock(&nn->nfs_client_lock);
3030 if (status < 0)
3031 - continue;
3032 + break;
3033 + status = -NFS4ERR_STALE_CLIENTID;
3034 }
3035 if (pos->cl_cons_state != NFS_CS_READY)
3036 continue;
3037 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3038 index dacb2979e8ac..29c5a2c08f02 100644
3039 --- a/fs/nfs/nfs4proc.c
3040 +++ b/fs/nfs/nfs4proc.c
3041 @@ -532,7 +532,7 @@ static int nfs40_sequence_done(struct rpc_task *task,
3042 struct nfs4_slot *slot = res->sr_slot;
3043 struct nfs4_slot_table *tbl;
3044
3045 - if (!RPC_WAS_SENT(task))
3046 + if (slot == NULL)
3047 goto out;
3048
3049 tbl = slot->table;
3050 @@ -7057,9 +7057,9 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
3051 struct nfs_server *server = NFS_SERVER(inode);
3052 struct pnfs_layout_hdr *lo;
3053 struct nfs4_state *state = NULL;
3054 - unsigned long timeo, giveup;
3055 + unsigned long timeo, now, giveup;
3056
3057 - dprintk("--> %s\n", __func__);
3058 + dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
3059
3060 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
3061 goto out;
3062 @@ -7067,12 +7067,38 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
3063 switch (task->tk_status) {
3064 case 0:
3065 goto out;
3066 + /*
3067 + * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
3068 + * (or clients) writing to the same RAID stripe
3069 + */
3070 case -NFS4ERR_LAYOUTTRYLATER:
3071 + /*
3072 + * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
3073 + * existing layout before getting a new one).
3074 + */
3075 case -NFS4ERR_RECALLCONFLICT:
3076 timeo = rpc_get_timeout(task->tk_client);
3077 giveup = lgp->args.timestamp + timeo;
3078 - if (time_after(giveup, jiffies))
3079 - task->tk_status = -NFS4ERR_DELAY;
3080 + now = jiffies;
3081 + if (time_after(giveup, now)) {
3082 + unsigned long delay;
3083 +
3084 + /* Delay for:
3085 + * - Not less then NFS4_POLL_RETRY_MIN.
3086 + * - One last time a jiffie before we give up
3087 + * - exponential backoff (time_now minus start_attempt)
3088 + */
3089 + delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
3090 + min((giveup - now - 1),
3091 + now - lgp->args.timestamp));
3092 +
3093 + dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
3094 + __func__, delay);
3095 + rpc_delay(task, delay);
3096 + task->tk_status = 0;
3097 + rpc_restart_call_prepare(task);
3098 + goto out; /* Do not call nfs4_async_handle_error() */
3099 + }
3100 break;
3101 case -NFS4ERR_EXPIRED:
3102 case -NFS4ERR_BAD_STATEID:
3103 @@ -7561,7 +7587,7 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
3104 switch (err) {
3105 case 0:
3106 case -NFS4ERR_WRONGSEC:
3107 - case -NFS4ERR_NOTSUPP:
3108 + case -ENOTSUPP:
3109 goto out;
3110 default:
3111 err = nfs4_handle_exception(server, err, &exception);
3112 @@ -7595,7 +7621,7 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3113 * Fall back on "guess and check" method if
3114 * the server doesn't support SECINFO_NO_NAME
3115 */
3116 - if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
3117 + if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
3118 err = nfs4_find_root_sec(server, fhandle, info);
3119 goto out_freepage;
3120 }
3121 diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
3122 index 79210d23f607..b2f842d0901b 100644
3123 --- a/fs/nfs/nfs4xdr.c
3124 +++ b/fs/nfs/nfs4xdr.c
3125 @@ -3053,7 +3053,8 @@ out_overflow:
3126 return -EIO;
3127 }
3128
3129 -static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
3130 +static bool __decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected,
3131 + int *nfs_retval)
3132 {
3133 __be32 *p;
3134 uint32_t opnum;
3135 @@ -3063,19 +3064,32 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
3136 if (unlikely(!p))
3137 goto out_overflow;
3138 opnum = be32_to_cpup(p++);
3139 - if (opnum != expected) {
3140 - dprintk("nfs: Server returned operation"
3141 - " %d but we issued a request for %d\n",
3142 - opnum, expected);
3143 - return -EIO;
3144 - }
3145 + if (unlikely(opnum != expected))
3146 + goto out_bad_operation;
3147 nfserr = be32_to_cpup(p);
3148 - if (nfserr != NFS_OK)
3149 - return nfs4_stat_to_errno(nfserr);
3150 - return 0;
3151 + if (nfserr == NFS_OK)
3152 + *nfs_retval = 0;
3153 + else
3154 + *nfs_retval = nfs4_stat_to_errno(nfserr);
3155 + return true;
3156 +out_bad_operation:
3157 + dprintk("nfs: Server returned operation"
3158 + " %d but we issued a request for %d\n",
3159 + opnum, expected);
3160 + *nfs_retval = -EREMOTEIO;
3161 + return false;
3162 out_overflow:
3163 print_overflow_msg(__func__, xdr);
3164 - return -EIO;
3165 + *nfs_retval = -EIO;
3166 + return false;
3167 +}
3168 +
3169 +static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
3170 +{
3171 + int retval;
3172 +
3173 + __decode_op_hdr(xdr, expected, &retval);
3174 + return retval;
3175 }
3176
3177 /* Dummy routine */
3178 @@ -4957,11 +4971,12 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
3179 uint32_t savewords, bmlen, i;
3180 int status;
3181
3182 - status = decode_op_hdr(xdr, OP_OPEN);
3183 - if (status != -EIO)
3184 - nfs_increment_open_seqid(status, res->seqid);
3185 - if (!status)
3186 - status = decode_stateid(xdr, &res->stateid);
3187 + if (!__decode_op_hdr(xdr, OP_OPEN, &status))
3188 + return status;
3189 + nfs_increment_open_seqid(status, res->seqid);
3190 + if (status)
3191 + return status;
3192 + status = decode_stateid(xdr, &res->stateid);
3193 if (unlikely(status))
3194 return status;
3195
3196 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
3197 index ac1dc331ba31..28466be64eeb 100644
3198 --- a/fs/nfs/write.c
3199 +++ b/fs/nfs/write.c
3200 @@ -922,19 +922,20 @@ out:
3201 * extend the write to cover the entire page in order to avoid fragmentation
3202 * inefficiencies.
3203 *
3204 - * If the file is opened for synchronous writes or if we have a write delegation
3205 - * from the server then we can just skip the rest of the checks.
3206 + * If the file is opened for synchronous writes then we can just skip the rest
3207 + * of the checks.
3208 */
3209 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
3210 {
3211 if (file->f_flags & O_DSYNC)
3212 return 0;
3213 + if (!nfs_write_pageuptodate(page, inode))
3214 + return 0;
3215 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
3216 return 1;
3217 - if (nfs_write_pageuptodate(page, inode) && (inode->i_flock == NULL ||
3218 - (inode->i_flock->fl_start == 0 &&
3219 + if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
3220 inode->i_flock->fl_end == OFFSET_MAX &&
3221 - inode->i_flock->fl_type != F_RDLCK)))
3222 + inode->i_flock->fl_type != F_RDLCK))
3223 return 1;
3224 return 0;
3225 }
3226 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
3227 index e44cb6427df3..6663511ab33a 100644
3228 --- a/fs/notify/fanotify/fanotify_user.c
3229 +++ b/fs/notify/fanotify/fanotify_user.c
3230 @@ -888,9 +888,9 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
3231 {
3232 return sys_fanotify_mark(fanotify_fd, flags,
3233 #ifdef __BIG_ENDIAN
3234 - ((__u64)mask1 << 32) | mask0,
3235 -#else
3236 ((__u64)mask0 << 32) | mask1,
3237 +#else
3238 + ((__u64)mask1 << 32) | mask0,
3239 #endif
3240 dfd, pathname);
3241 }
3242 diff --git a/fs/read_write.c b/fs/read_write.c
3243 index e3cd280b158c..3889dcc25114 100644
3244 --- a/fs/read_write.c
3245 +++ b/fs/read_write.c
3246 @@ -977,9 +977,9 @@ out:
3247 return ret;
3248 }
3249
3250 -COMPAT_SYSCALL_DEFINE3(readv, unsigned long, fd,
3251 +COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
3252 const struct compat_iovec __user *,vec,
3253 - unsigned long, vlen)
3254 + compat_ulong_t, vlen)
3255 {
3256 struct fd f = fdget(fd);
3257 ssize_t ret;
3258 @@ -1014,9 +1014,9 @@ COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
3259 return ret;
3260 }
3261
3262 -COMPAT_SYSCALL_DEFINE5(preadv, unsigned long, fd,
3263 +COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
3264 const struct compat_iovec __user *,vec,
3265 - unsigned long, vlen, u32, pos_low, u32, pos_high)
3266 + compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
3267 {
3268 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
3269 return compat_sys_preadv64(fd, vec, vlen, pos);
3270 @@ -1044,9 +1044,9 @@ out:
3271 return ret;
3272 }
3273
3274 -COMPAT_SYSCALL_DEFINE3(writev, unsigned long, fd,
3275 +COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
3276 const struct compat_iovec __user *, vec,
3277 - unsigned long, vlen)
3278 + compat_ulong_t, vlen)
3279 {
3280 struct fd f = fdget(fd);
3281 ssize_t ret;
3282 @@ -1081,9 +1081,9 @@ COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
3283 return ret;
3284 }
3285
3286 -COMPAT_SYSCALL_DEFINE5(pwritev, unsigned long, fd,
3287 +COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
3288 const struct compat_iovec __user *,vec,
3289 - unsigned long, vlen, u32, pos_low, u32, pos_high)
3290 + compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
3291 {
3292 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
3293 return compat_sys_pwritev64(fd, vec, vlen, pos);
3294 diff --git a/fs/splice.c b/fs/splice.c
3295 index 3b7ee656f3aa..84f810d63c37 100644
3296 --- a/fs/splice.c
3297 +++ b/fs/splice.c
3298 @@ -555,6 +555,24 @@ static const struct pipe_buf_operations default_pipe_buf_ops = {
3299 .get = generic_pipe_buf_get,
3300 };
3301
3302 +static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
3303 + struct pipe_buffer *buf)
3304 +{
3305 + return 1;
3306 +}
3307 +
3308 +/* Pipe buffer operations for a socket and similar. */
3309 +const struct pipe_buf_operations nosteal_pipe_buf_ops = {
3310 + .can_merge = 0,
3311 + .map = generic_pipe_buf_map,
3312 + .unmap = generic_pipe_buf_unmap,
3313 + .confirm = generic_pipe_buf_confirm,
3314 + .release = generic_pipe_buf_release,
3315 + .steal = generic_pipe_buf_nosteal,
3316 + .get = generic_pipe_buf_get,
3317 +};
3318 +EXPORT_SYMBOL(nosteal_pipe_buf_ops);
3319 +
3320 static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
3321 unsigned long vlen, loff_t offset)
3322 {
3323 diff --git a/include/linux/audit.h b/include/linux/audit.h
3324 index 729a4d165bcc..4fb28b23a4a4 100644
3325 --- a/include/linux/audit.h
3326 +++ b/include/linux/audit.h
3327 @@ -135,7 +135,7 @@ static inline void audit_syscall_exit(void *pt_regs)
3328 {
3329 if (unlikely(current->audit_context)) {
3330 int success = is_syscall_success(pt_regs);
3331 - int return_code = regs_return_value(pt_regs);
3332 + long return_code = regs_return_value(pt_regs);
3333
3334 __audit_syscall_exit(success, return_code);
3335 }
3336 diff --git a/include/linux/compat.h b/include/linux/compat.h
3337 index 345da00a86e0..0f62cb7a4ff0 100644
3338 --- a/include/linux/compat.h
3339 +++ b/include/linux/compat.h
3340 @@ -327,16 +327,16 @@ asmlinkage long compat_sys_keyctl(u32 option,
3341 u32 arg2, u32 arg3, u32 arg4, u32 arg5);
3342 asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
3343
3344 -asmlinkage ssize_t compat_sys_readv(unsigned long fd,
3345 - const struct compat_iovec __user *vec, unsigned long vlen);
3346 -asmlinkage ssize_t compat_sys_writev(unsigned long fd,
3347 - const struct compat_iovec __user *vec, unsigned long vlen);
3348 -asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
3349 +asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd,
3350 + const struct compat_iovec __user *vec, compat_ulong_t vlen);
3351 +asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd,
3352 + const struct compat_iovec __user *vec, compat_ulong_t vlen);
3353 +asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
3354 const struct compat_iovec __user *vec,
3355 - unsigned long vlen, u32 pos_low, u32 pos_high);
3356 -asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
3357 + compat_ulong_t vlen, u32 pos_low, u32 pos_high);
3358 +asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
3359 const struct compat_iovec __user *vec,
3360 - unsigned long vlen, u32 pos_low, u32 pos_high);
3361 + compat_ulong_t vlen, u32 pos_low, u32 pos_high);
3362 asmlinkage long comat_sys_lseek(unsigned int, compat_off_t, unsigned int);
3363
3364 asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
3365 @@ -422,7 +422,7 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
3366 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
3367 compat_long_t addr, compat_long_t data);
3368
3369 -asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
3370 +asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
3371 /*
3372 * epoll (fs/eventpoll.c) compat bits follow ...
3373 */
3374 diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
3375 index 0b23edbee309..67de9b761374 100644
3376 --- a/include/linux/percpu_ida.h
3377 +++ b/include/linux/percpu_ida.h
3378 @@ -4,6 +4,7 @@
3379 #include <linux/types.h>
3380 #include <linux/bitops.h>
3381 #include <linux/init.h>
3382 +#include <linux/sched.h>
3383 #include <linux/spinlock_types.h>
3384 #include <linux/wait.h>
3385 #include <linux/cpumask.h>
3386 @@ -51,7 +52,7 @@ struct percpu_ida {
3387 } ____cacheline_aligned_in_smp;
3388 };
3389
3390 -int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
3391 +int percpu_ida_alloc(struct percpu_ida *pool, int state);
3392 void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
3393
3394 void percpu_ida_destroy(struct percpu_ida *pool);
3395 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
3396 index b8809fef61f5..ab5752692113 100644
3397 --- a/include/linux/pipe_fs_i.h
3398 +++ b/include/linux/pipe_fs_i.h
3399 @@ -157,6 +157,8 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
3400 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
3401 void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
3402
3403 +extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
3404 +
3405 /* for F_SETPIPE_SZ and F_GETPIPE_SZ */
3406 long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
3407 struct pipe_inode_info *get_pipe_info(struct file *file);
3408 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
3409 index e4b948080d20..a67b38415768 100644
3410 --- a/include/linux/vmstat.h
3411 +++ b/include/linux/vmstat.h
3412 @@ -142,8 +142,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
3413 return x;
3414 }
3415
3416 -extern unsigned long global_reclaimable_pages(void);
3417 -
3418 #ifdef CONFIG_NUMA
3419 /*
3420 * Determine the per node value of a stat item. This function
3421 diff --git a/include/scsi/osd_ore.h b/include/scsi/osd_ore.h
3422 index a5f9b960dfc8..6ca3265a4dca 100644
3423 --- a/include/scsi/osd_ore.h
3424 +++ b/include/scsi/osd_ore.h
3425 @@ -102,6 +102,7 @@ struct ore_striping_info {
3426 unsigned unit_off;
3427 unsigned cur_pg;
3428 unsigned cur_comp;
3429 + unsigned maxdevUnits;
3430 };
3431
3432 struct ore_io_state;
3433 diff --git a/include/xen/platform_pci.h b/include/xen/platform_pci.h
3434 index 438c256c274b..b49eeab0262e 100644
3435 --- a/include/xen/platform_pci.h
3436 +++ b/include/xen/platform_pci.h
3437 @@ -48,4 +48,27 @@ static inline int xen_must_unplug_disks(void) {
3438
3439 extern int xen_platform_pci_unplug;
3440
3441 +#if defined(CONFIG_XEN_PVHVM)
3442 +extern bool xen_has_pv_devices(void);
3443 +extern bool xen_has_pv_disk_devices(void);
3444 +extern bool xen_has_pv_nic_devices(void);
3445 +extern bool xen_has_pv_and_legacy_disk_devices(void);
3446 +#else
3447 +static inline bool xen_has_pv_devices(void)
3448 +{
3449 + return IS_ENABLED(CONFIG_XEN);
3450 +}
3451 +static inline bool xen_has_pv_disk_devices(void)
3452 +{
3453 + return IS_ENABLED(CONFIG_XEN);
3454 +}
3455 +static inline bool xen_has_pv_nic_devices(void)
3456 +{
3457 + return IS_ENABLED(CONFIG_XEN);
3458 +}
3459 +static inline bool xen_has_pv_and_legacy_disk_devices(void)
3460 +{
3461 + return false;
3462 +}
3463 +#endif
3464 #endif /* _XEN_PLATFORM_PCI_H */
3465 diff --git a/kernel/audit.c b/kernel/audit.c
3466 index 7ddfd8a00a2a..6def25f1b351 100644
3467 --- a/kernel/audit.c
3468 +++ b/kernel/audit.c
3469 @@ -103,7 +103,8 @@ static int audit_rate_limit;
3470
3471 /* Number of outstanding audit_buffers allowed. */
3472 static int audit_backlog_limit = 64;
3473 -static int audit_backlog_wait_time = 60 * HZ;
3474 +#define AUDIT_BACKLOG_WAIT_TIME (60 * HZ)
3475 +static int audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
3476 static int audit_backlog_wait_overflow = 0;
3477
3478 /* The identity of the user shutting down the audit system. */
3479 @@ -1135,6 +1136,8 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
3480 return NULL;
3481 }
3482
3483 + audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
3484 +
3485 ab = audit_buffer_alloc(ctx, gfp_mask, type);
3486 if (!ab) {
3487 audit_log_lost("out of memory in audit_log_start");
3488 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
3489 index 5cf6c7097a71..bfca770a64e0 100644
3490 --- a/kernel/time/timekeeping.c
3491 +++ b/kernel/time/timekeeping.c
3492 @@ -77,7 +77,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
3493 tk->wall_to_monotonic = wtm;
3494 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
3495 tk->offs_real = timespec_to_ktime(tmp);
3496 - tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tk->tai_offset, 0));
3497 + tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
3498 }
3499
3500 static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
3501 @@ -595,7 +595,7 @@ s32 timekeeping_get_tai_offset(void)
3502 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
3503 {
3504 tk->tai_offset = tai_offset;
3505 - tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tai_offset, 0));
3506 + tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
3507 }
3508
3509 /**
3510 @@ -610,6 +610,7 @@ void timekeeping_set_tai_offset(s32 tai_offset)
3511 raw_spin_lock_irqsave(&timekeeper_lock, flags);
3512 write_seqcount_begin(&timekeeper_seq);
3513 __timekeeping_set_tai_offset(tk, tai_offset);
3514 + timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
3515 write_seqcount_end(&timekeeper_seq);
3516 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
3517 clock_was_set();
3518 @@ -1023,6 +1024,8 @@ static int timekeeping_suspend(void)
3519 timekeeping_suspend_time =
3520 timespec_add(timekeeping_suspend_time, delta_delta);
3521 }
3522 +
3523 + timekeeping_update(tk, TK_MIRROR);
3524 write_seqcount_end(&timekeeper_seq);
3525 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
3526
3527 @@ -1255,7 +1258,7 @@ out_adjust:
3528 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
3529 {
3530 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
3531 - unsigned int action = 0;
3532 + unsigned int clock_set = 0;
3533
3534 while (tk->xtime_nsec >= nsecps) {
3535 int leap;
3536 @@ -1277,11 +1280,10 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
3537
3538 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
3539
3540 - clock_was_set_delayed();
3541 - action = TK_CLOCK_WAS_SET;
3542 + clock_set = TK_CLOCK_WAS_SET;
3543 }
3544 }
3545 - return action;
3546 + return clock_set;
3547 }
3548
3549 /**
3550 @@ -1294,7 +1296,8 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
3551 * Returns the unconsumed cycles.
3552 */
3553 static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
3554 - u32 shift)
3555 + u32 shift,
3556 + unsigned int *clock_set)
3557 {
3558 cycle_t interval = tk->cycle_interval << shift;
3559 u64 raw_nsecs;
3560 @@ -1308,7 +1311,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
3561 tk->cycle_last += interval;
3562
3563 tk->xtime_nsec += tk->xtime_interval << shift;
3564 - accumulate_nsecs_to_secs(tk);
3565 + *clock_set |= accumulate_nsecs_to_secs(tk);
3566
3567 /* Accumulate raw time */
3568 raw_nsecs = (u64)tk->raw_interval << shift;
3569 @@ -1366,7 +1369,7 @@ static void update_wall_time(void)
3570 struct timekeeper *tk = &shadow_timekeeper;
3571 cycle_t offset;
3572 int shift = 0, maxshift;
3573 - unsigned int action;
3574 + unsigned int clock_set = 0;
3575 unsigned long flags;
3576
3577 raw_spin_lock_irqsave(&timekeeper_lock, flags);
3578 @@ -1401,7 +1404,8 @@ static void update_wall_time(void)
3579 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
3580 shift = min(shift, maxshift);
3581 while (offset >= tk->cycle_interval) {
3582 - offset = logarithmic_accumulation(tk, offset, shift);
3583 + offset = logarithmic_accumulation(tk, offset, shift,
3584 + &clock_set);
3585 if (offset < tk->cycle_interval<<shift)
3586 shift--;
3587 }
3588 @@ -1419,7 +1423,7 @@ static void update_wall_time(void)
3589 * Finally, make sure that after the rounding
3590 * xtime_nsec isn't larger than NSEC_PER_SEC
3591 */
3592 - action = accumulate_nsecs_to_secs(tk);
3593 + clock_set |= accumulate_nsecs_to_secs(tk);
3594
3595 write_seqcount_begin(&timekeeper_seq);
3596 /* Update clock->cycle_last with the new value */
3597 @@ -1435,10 +1439,23 @@ static void update_wall_time(void)
3598 * updating.
3599 */
3600 memcpy(real_tk, tk, sizeof(*tk));
3601 - timekeeping_update(real_tk, action);
3602 + timekeeping_update(real_tk, clock_set);
3603 write_seqcount_end(&timekeeper_seq);
3604 out:
3605 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
3606 + if (clock_set) {
3607 + /*
3608 + * XXX - I'd rather we just call clock_was_set(), but
3609 + * since we're currently holding the jiffies lock, calling
3610 + * clock_was_set would trigger an ipi which would then grab
3611 + * the jiffies lock and we'd deadlock. :(
3612 + * The right solution should probably be droping
3613 + * the jiffies lock before calling update_wall_time
3614 + * but that requires some rework of the tick sched
3615 + * code.
3616 + */
3617 + clock_was_set_delayed();
3618 + }
3619 }
3620
3621 /**
3622 @@ -1697,12 +1714,14 @@ int do_adjtimex(struct timex *txc)
3623
3624 if (tai != orig_tai) {
3625 __timekeeping_set_tai_offset(tk, tai);
3626 - update_pvclock_gtod(tk, true);
3627 - clock_was_set_delayed();
3628 + timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
3629 }
3630 write_seqcount_end(&timekeeper_seq);
3631 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
3632
3633 + if (tai != orig_tai)
3634 + clock_was_set();
3635 +
3636 ntp_notify_cmos_timer();
3637
3638 return ret;
3639 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
3640 index 1345d9ff0662..e66411fb55b3 100644
3641 --- a/kernel/trace/ftrace.c
3642 +++ b/kernel/trace/ftrace.c
3643 @@ -85,6 +85,8 @@ int function_trace_stop __read_mostly;
3644
3645 /* Current function tracing op */
3646 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
3647 +/* What to set function_trace_op to */
3648 +static struct ftrace_ops *set_function_trace_op;
3649
3650 /* List for set_ftrace_pid's pids. */
3651 LIST_HEAD(ftrace_pids);
3652 @@ -278,6 +280,29 @@ static void update_global_ops(void)
3653 global_ops.func = func;
3654 }
3655
3656 +static void ftrace_sync(struct work_struct *work)
3657 +{
3658 + /*
3659 + * This function is just a stub to implement a hard force
3660 + * of synchronize_sched(). This requires synchronizing
3661 + * tasks even in userspace and idle.
3662 + *
3663 + * Yes, function tracing is rude.
3664 + */
3665 +}
3666 +
3667 +static void ftrace_sync_ipi(void *data)
3668 +{
3669 + /* Probably not needed, but do it anyway */
3670 + smp_rmb();
3671 +}
3672 +
3673 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3674 +static void update_function_graph_func(void);
3675 +#else
3676 +static inline void update_function_graph_func(void) { }
3677 +#endif
3678 +
3679 static void update_ftrace_function(void)
3680 {
3681 ftrace_func_t func;
3682 @@ -296,16 +321,61 @@ static void update_ftrace_function(void)
3683 !FTRACE_FORCE_LIST_FUNC)) {
3684 /* Set the ftrace_ops that the arch callback uses */
3685 if (ftrace_ops_list == &global_ops)
3686 - function_trace_op = ftrace_global_list;
3687 + set_function_trace_op = ftrace_global_list;
3688 else
3689 - function_trace_op = ftrace_ops_list;
3690 + set_function_trace_op = ftrace_ops_list;
3691 func = ftrace_ops_list->func;
3692 } else {
3693 /* Just use the default ftrace_ops */
3694 - function_trace_op = &ftrace_list_end;
3695 + set_function_trace_op = &ftrace_list_end;
3696 func = ftrace_ops_list_func;
3697 }
3698
3699 + /* If there's no change, then do nothing more here */
3700 + if (ftrace_trace_function == func)
3701 + return;
3702 +
3703 + update_function_graph_func();
3704 +
3705 + /*
3706 + * If we are using the list function, it doesn't care
3707 + * about the function_trace_ops.
3708 + */
3709 + if (func == ftrace_ops_list_func) {
3710 + ftrace_trace_function = func;
3711 + /*
3712 + * Don't even bother setting function_trace_ops,
3713 + * it would be racy to do so anyway.
3714 + */
3715 + return;
3716 + }
3717 +
3718 +#ifndef CONFIG_DYNAMIC_FTRACE
3719 + /*
3720 + * For static tracing, we need to be a bit more careful.
3721 + * The function change takes affect immediately. Thus,
3722 + * we need to coorditate the setting of the function_trace_ops
3723 + * with the setting of the ftrace_trace_function.
3724 + *
3725 + * Set the function to the list ops, which will call the
3726 + * function we want, albeit indirectly, but it handles the
3727 + * ftrace_ops and doesn't depend on function_trace_op.
3728 + */
3729 + ftrace_trace_function = ftrace_ops_list_func;
3730 + /*
3731 + * Make sure all CPUs see this. Yes this is slow, but static
3732 + * tracing is slow and nasty to have enabled.
3733 + */
3734 + schedule_on_each_cpu(ftrace_sync);
3735 + /* Now all cpus are using the list ops. */
3736 + function_trace_op = set_function_trace_op;
3737 + /* Make sure the function_trace_op is visible on all CPUs */
3738 + smp_wmb();
3739 + /* Nasty way to force a rmb on all cpus */
3740 + smp_call_function(ftrace_sync_ipi, NULL, 1);
3741 + /* OK, we are all set to update the ftrace_trace_function now! */
3742 +#endif /* !CONFIG_DYNAMIC_FTRACE */
3743 +
3744 ftrace_trace_function = func;
3745 }
3746
3747 @@ -410,17 +480,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
3748 return 0;
3749 }
3750
3751 -static void ftrace_sync(struct work_struct *work)
3752 -{
3753 - /*
3754 - * This function is just a stub to implement a hard force
3755 - * of synchronize_sched(). This requires synchronizing
3756 - * tasks even in userspace and idle.
3757 - *
3758 - * Yes, function tracing is rude.
3759 - */
3760 -}
3761 -
3762 static int __unregister_ftrace_function(struct ftrace_ops *ops)
3763 {
3764 int ret;
3765 @@ -439,20 +498,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
3766 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
3767 ret = remove_ftrace_list_ops(&ftrace_control_list,
3768 &control_ops, ops);
3769 - if (!ret) {
3770 - /*
3771 - * The ftrace_ops is now removed from the list,
3772 - * so there'll be no new users. We must ensure
3773 - * all current users are done before we free
3774 - * the control data.
3775 - * Note synchronize_sched() is not enough, as we
3776 - * use preempt_disable() to do RCU, but the function
3777 - * tracer can be called where RCU is not active
3778 - * (before user_exit()).
3779 - */
3780 - schedule_on_each_cpu(ftrace_sync);
3781 - control_ops_free(ops);
3782 - }
3783 } else
3784 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
3785
3786 @@ -462,17 +507,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
3787 if (ftrace_enabled)
3788 update_ftrace_function();
3789
3790 - /*
3791 - * Dynamic ops may be freed, we must make sure that all
3792 - * callers are done before leaving this function.
3793 - *
3794 - * Again, normal synchronize_sched() is not good enough.
3795 - * We need to do a hard force of sched synchronization.
3796 - */
3797 - if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
3798 - schedule_on_each_cpu(ftrace_sync);
3799 -
3800 -
3801 return 0;
3802 }
3803
3804 @@ -1992,8 +2026,14 @@ void ftrace_modify_all_code(int command)
3805 else if (command & FTRACE_DISABLE_CALLS)
3806 ftrace_replace_code(0);
3807
3808 - if (update && ftrace_trace_function != ftrace_ops_list_func)
3809 + if (update && ftrace_trace_function != ftrace_ops_list_func) {
3810 + function_trace_op = set_function_trace_op;
3811 + smp_wmb();
3812 + /* If irqs are disabled, we are in stop machine */
3813 + if (!irqs_disabled())
3814 + smp_call_function(ftrace_sync_ipi, NULL, 1);
3815 ftrace_update_ftrace_func(ftrace_trace_function);
3816 + }
3817
3818 if (command & FTRACE_START_FUNC_RET)
3819 ftrace_enable_ftrace_graph_caller();
3820 @@ -2156,10 +2196,41 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
3821 command |= FTRACE_UPDATE_TRACE_FUNC;
3822 }
3823
3824 - if (!command || !ftrace_enabled)
3825 + if (!command || !ftrace_enabled) {
3826 + /*
3827 + * If these are control ops, they still need their
3828 + * per_cpu field freed. Since, function tracing is
3829 + * not currently active, we can just free them
3830 + * without synchronizing all CPUs.
3831 + */
3832 + if (ops->flags & FTRACE_OPS_FL_CONTROL)
3833 + control_ops_free(ops);
3834 return 0;
3835 + }
3836
3837 ftrace_run_update_code(command);
3838 +
3839 + /*
3840 + * Dynamic ops may be freed, we must make sure that all
3841 + * callers are done before leaving this function.
3842 + * The same goes for freeing the per_cpu data of the control
3843 + * ops.
3844 + *
3845 + * Again, normal synchronize_sched() is not good enough.
3846 + * We need to do a hard force of sched synchronization.
3847 + * This is because we use preempt_disable() to do RCU, but
3848 + * the function tracers can be called where RCU is not watching
3849 + * (like before user_exit()). We can not rely on the RCU
3850 + * infrastructure to do the synchronization, thus we must do it
3851 + * ourselves.
3852 + */
3853 + if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
3854 + schedule_on_each_cpu(ftrace_sync);
3855 +
3856 + if (ops->flags & FTRACE_OPS_FL_CONTROL)
3857 + control_ops_free(ops);
3858 + }
3859 +
3860 return 0;
3861 }
3862
3863 @@ -4777,6 +4848,7 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3864 trace_func_graph_ret_t ftrace_graph_return =
3865 (trace_func_graph_ret_t)ftrace_stub;
3866 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3867 +static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
3868
3869 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3870 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3871 @@ -4918,6 +4990,30 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
3872 FTRACE_OPS_FL_RECURSION_SAFE,
3873 };
3874
3875 +static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
3876 +{
3877 + if (!ftrace_ops_test(&global_ops, trace->func, NULL))
3878 + return 0;
3879 + return __ftrace_graph_entry(trace);
3880 +}
3881 +
3882 +/*
3883 + * The function graph tracer should only trace the functions defined
3884 + * by set_ftrace_filter and set_ftrace_notrace. If another function
3885 + * tracer ops is registered, the graph tracer requires testing the
3886 + * function against the global ops, and not just trace any function
3887 + * that any ftrace_ops registered.
3888 + */
3889 +static void update_function_graph_func(void)
3890 +{
3891 + if (ftrace_ops_list == &ftrace_list_end ||
3892 + (ftrace_ops_list == &global_ops &&
3893 + global_ops.next == &ftrace_list_end))
3894 + ftrace_graph_entry = __ftrace_graph_entry;
3895 + else
3896 + ftrace_graph_entry = ftrace_graph_entry_test;
3897 +}
3898 +
3899 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3900 trace_func_graph_ent_t entryfunc)
3901 {
3902 @@ -4942,7 +5038,16 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3903 }
3904
3905 ftrace_graph_return = retfunc;
3906 - ftrace_graph_entry = entryfunc;
3907 +
3908 + /*
3909 + * Update the indirect function to the entryfunc, and the
3910 + * function that gets called to the entry_test first. Then
3911 + * call the update fgraph entry function to determine if
3912 + * the entryfunc should be called directly or not.
3913 + */
3914 + __ftrace_graph_entry = entryfunc;
3915 + ftrace_graph_entry = ftrace_graph_entry_test;
3916 + update_function_graph_func();
3917
3918 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
3919
3920 @@ -4961,6 +5066,7 @@ void unregister_ftrace_graph(void)
3921 ftrace_graph_active--;
3922 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3923 ftrace_graph_entry = ftrace_graph_entry_stub;
3924 + __ftrace_graph_entry = ftrace_graph_entry_stub;
3925 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
3926 unregister_pm_notifier(&ftrace_suspend_notifier);
3927 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3928 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
3929 index b778e96e02a1..138077b1a607 100644
3930 --- a/kernel/trace/trace.c
3931 +++ b/kernel/trace/trace.c
3932 @@ -435,6 +435,9 @@ int __trace_puts(unsigned long ip, const char *str, int size)
3933 unsigned long irq_flags;
3934 int alloc;
3935
3936 + if (unlikely(tracing_selftest_running || tracing_disabled))
3937 + return 0;
3938 +
3939 alloc = sizeof(*entry) + size + 2; /* possible \n added */
3940
3941 local_save_flags(irq_flags);
3942 @@ -475,6 +478,9 @@ int __trace_bputs(unsigned long ip, const char *str)
3943 unsigned long irq_flags;
3944 int size = sizeof(struct bputs_entry);
3945
3946 + if (unlikely(tracing_selftest_running || tracing_disabled))
3947 + return 0;
3948 +
3949 local_save_flags(irq_flags);
3950 buffer = global_trace.trace_buffer.buffer;
3951 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
3952 @@ -5872,6 +5878,8 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
3953
3954 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
3955
3956 + buf->tr = tr;
3957 +
3958 buf->buffer = ring_buffer_alloc(size, rb_flags);
3959 if (!buf->buffer)
3960 return -ENOMEM;
3961 diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
3962 index bab1ba2a4c71..fd7d6d3d88a1 100644
3963 --- a/lib/percpu_ida.c
3964 +++ b/lib/percpu_ida.c
3965 @@ -142,22 +142,22 @@ static inline unsigned alloc_local_tag(struct percpu_ida *pool,
3966 /**
3967 * percpu_ida_alloc - allocate a tag
3968 * @pool: pool to allocate from
3969 - * @gfp: gfp flags
3970 + * @state: task state for prepare_to_wait
3971 *
3972 * Returns a tag - an integer in the range [0..nr_tags) (passed to
3973 * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
3974 *
3975 * Safe to be called from interrupt context (assuming it isn't passed
3976 - * __GFP_WAIT, of course).
3977 + * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
3978 *
3979 * @gfp indicates whether or not to wait until a free id is available (it's not
3980 * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
3981 * however long it takes until another thread frees an id (same semantics as a
3982 * mempool).
3983 *
3984 - * Will not fail if passed __GFP_WAIT.
3985 + * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
3986 */
3987 -int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
3988 +int percpu_ida_alloc(struct percpu_ida *pool, int state)
3989 {
3990 DEFINE_WAIT(wait);
3991 struct percpu_ida_cpu *tags;
3992 @@ -184,7 +184,8 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
3993 *
3994 * global lock held and irqs disabled, don't need percpu lock
3995 */
3996 - prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
3997 + if (state != TASK_RUNNING)
3998 + prepare_to_wait(&pool->wait, &wait, state);
3999
4000 if (!tags->nr_free)
4001 alloc_global_tags(pool, tags);
4002 @@ -201,16 +202,22 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
4003 spin_unlock(&pool->lock);
4004 local_irq_restore(flags);
4005
4006 - if (tag >= 0 || !(gfp & __GFP_WAIT))
4007 + if (tag >= 0 || state == TASK_RUNNING)
4008 break;
4009
4010 + if (signal_pending_state(state, current)) {
4011 + tag = -ERESTARTSYS;
4012 + break;
4013 + }
4014 +
4015 schedule();
4016
4017 local_irq_save(flags);
4018 tags = this_cpu_ptr(pool->tag_cpu);
4019 }
4020 + if (state != TASK_RUNNING)
4021 + finish_wait(&pool->wait, &wait);
4022
4023 - finish_wait(&pool->wait, &wait);
4024 return tag;
4025 }
4026 EXPORT_SYMBOL_GPL(percpu_ida_alloc);
4027 diff --git a/mm/internal.h b/mm/internal.h
4028 index 684f7aa9692a..8b6cfd63b5a5 100644
4029 --- a/mm/internal.h
4030 +++ b/mm/internal.h
4031 @@ -85,7 +85,6 @@ extern unsigned long highest_memmap_pfn;
4032 */
4033 extern int isolate_lru_page(struct page *page);
4034 extern void putback_lru_page(struct page *page);
4035 -extern unsigned long zone_reclaimable_pages(struct zone *zone);
4036 extern bool zone_reclaimable(struct zone *zone);
4037
4038 /*
4039 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4040 index e0e979276df0..8e7adcba8176 100644
4041 --- a/mm/memcontrol.c
4042 +++ b/mm/memcontrol.c
4043 @@ -1079,16 +1079,22 @@ skip_node:
4044 * skipped and we should continue the tree walk.
4045 * last_visited css is safe to use because it is
4046 * protected by css_get and the tree walk is rcu safe.
4047 + *
4048 + * We do not take a reference on the root of the tree walk
4049 + * because we might race with the root removal when it would
4050 + * be the only node in the iterated hierarchy and mem_cgroup_iter
4051 + * would end up in an endless loop because it expects that at
4052 + * least one valid node will be returned. Root cannot disappear
4053 + * because caller of the iterator should hold it already so
4054 + * skipping css reference should be safe.
4055 */
4056 if (next_css) {
4057 - struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
4058 + if ((next_css->flags & CSS_ONLINE) &&
4059 + (next_css == &root->css || css_tryget(next_css)))
4060 + return mem_cgroup_from_css(next_css);
4061
4062 - if (css_tryget(&mem->css))
4063 - return mem;
4064 - else {
4065 - prev_css = next_css;
4066 - goto skip_node;
4067 - }
4068 + prev_css = next_css;
4069 + goto skip_node;
4070 }
4071
4072 return NULL;
4073 @@ -1122,7 +1128,15 @@ mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
4074 if (iter->last_dead_count == *sequence) {
4075 smp_rmb();
4076 position = iter->last_visited;
4077 - if (position && !css_tryget(&position->css))
4078 +
4079 + /*
4080 + * We cannot take a reference to root because we might race
4081 + * with root removal and returning NULL would end up in
4082 + * an endless loop on the iterator user level when root
4083 + * would be returned all the time.
4084 + */
4085 + if (position && position != root &&
4086 + !css_tryget(&position->css))
4087 position = NULL;
4088 }
4089 return position;
4090 @@ -1131,9 +1145,11 @@ mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
4091 static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
4092 struct mem_cgroup *last_visited,
4093 struct mem_cgroup *new_position,
4094 + struct mem_cgroup *root,
4095 int sequence)
4096 {
4097 - if (last_visited)
4098 + /* root reference counting symmetric to mem_cgroup_iter_load */
4099 + if (last_visited && last_visited != root)
4100 css_put(&last_visited->css);
4101 /*
4102 * We store the sequence count from the time @last_visited was
4103 @@ -1208,7 +1224,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
4104 memcg = __mem_cgroup_iter_next(root, last_visited);
4105
4106 if (reclaim) {
4107 - mem_cgroup_iter_update(iter, last_visited, memcg, seq);
4108 + mem_cgroup_iter_update(iter, last_visited, memcg, root,
4109 + seq);
4110
4111 if (!memcg)
4112 iter->generation++;
4113 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
4114 index 9aea53f4551c..428adeedd3be 100644
4115 --- a/mm/memory-failure.c
4116 +++ b/mm/memory-failure.c
4117 @@ -856,14 +856,14 @@ static int page_action(struct page_state *ps, struct page *p,
4118 * the pages and send SIGBUS to the processes if the data was dirty.
4119 */
4120 static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
4121 - int trapno, int flags)
4122 + int trapno, int flags, struct page **hpagep)
4123 {
4124 enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
4125 struct address_space *mapping;
4126 LIST_HEAD(tokill);
4127 int ret;
4128 int kill = 1, forcekill;
4129 - struct page *hpage = compound_head(p);
4130 + struct page *hpage = *hpagep;
4131 struct page *ppage;
4132
4133 if (PageReserved(p) || PageSlab(p))
4134 @@ -942,11 +942,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
4135 * We pinned the head page for hwpoison handling,
4136 * now we split the thp and we are interested in
4137 * the hwpoisoned raw page, so move the refcount
4138 - * to it.
4139 + * to it. Similarly, page lock is shifted.
4140 */
4141 if (hpage != p) {
4142 put_page(hpage);
4143 get_page(p);
4144 + lock_page(p);
4145 + unlock_page(hpage);
4146 + *hpagep = p;
4147 }
4148 /* THP is split, so ppage should be the real poisoned page. */
4149 ppage = p;
4150 @@ -964,17 +967,11 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
4151 if (kill)
4152 collect_procs(ppage, &tokill);
4153
4154 - if (hpage != ppage)
4155 - lock_page(ppage);
4156 -
4157 ret = try_to_unmap(ppage, ttu);
4158 if (ret != SWAP_SUCCESS)
4159 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
4160 pfn, page_mapcount(ppage));
4161
4162 - if (hpage != ppage)
4163 - unlock_page(ppage);
4164 -
4165 /*
4166 * Now that the dirty bit has been propagated to the
4167 * struct page and all unmaps done we can decide if
4168 @@ -1193,8 +1190,12 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
4169 /*
4170 * Now take care of user space mappings.
4171 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
4172 + *
4173 + * When the raw error page is thp tail page, hpage points to the raw
4174 + * page after thp split.
4175 */
4176 - if (hwpoison_user_mappings(p, pfn, trapno, flags) != SWAP_SUCCESS) {
4177 + if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
4178 + != SWAP_SUCCESS) {
4179 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
4180 res = -EBUSY;
4181 goto out;
4182 diff --git a/mm/mmap.c b/mm/mmap.c
4183 index 362e5f1327ec..af99b9ed2007 100644
4184 --- a/mm/mmap.c
4185 +++ b/mm/mmap.c
4186 @@ -895,7 +895,15 @@ again: remove_next = 1 + (end > next->vm_end);
4187 static inline int is_mergeable_vma(struct vm_area_struct *vma,
4188 struct file *file, unsigned long vm_flags)
4189 {
4190 - if (vma->vm_flags ^ vm_flags)
4191 + /*
4192 + * VM_SOFTDIRTY should not prevent from VMA merging, if we
4193 + * match the flags but dirty bit -- the caller should mark
4194 + * merged VMA as dirty. If dirty bit won't be excluded from
4195 + * comparison, we increase pressue on the memory system forcing
4196 + * the kernel to generate new VMAs when old one could be
4197 + * extended instead.
4198 + */
4199 + if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
4200 return 0;
4201 if (vma->vm_file != file)
4202 return 0;
4203 @@ -1084,7 +1092,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
4204 return a->vm_end == b->vm_start &&
4205 mpol_equal(vma_policy(a), vma_policy(b)) &&
4206 a->vm_file == b->vm_file &&
4207 - !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
4208 + !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
4209 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
4210 }
4211
4212 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
4213 index 6738c47f1f72..e73f01c56d10 100644
4214 --- a/mm/oom_kill.c
4215 +++ b/mm/oom_kill.c
4216 @@ -170,7 +170,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
4217 * implementation used by LSMs.
4218 */
4219 if (has_capability_noaudit(p, CAP_SYS_ADMIN))
4220 - adj -= 30;
4221 + points -= (points * 3) / 100;
4222
4223 /* Normalize to oom_score_adj units */
4224 adj *= totalpages / 1000;
4225 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
4226 index 63807583d8e8..2d30e2cfe804 100644
4227 --- a/mm/page-writeback.c
4228 +++ b/mm/page-writeback.c
4229 @@ -191,6 +191,26 @@ static unsigned long writeout_period_time = 0;
4230 * global dirtyable memory first.
4231 */
4232
4233 +/**
4234 + * zone_dirtyable_memory - number of dirtyable pages in a zone
4235 + * @zone: the zone
4236 + *
4237 + * Returns the zone's number of pages potentially available for dirty
4238 + * page cache. This is the base value for the per-zone dirty limits.
4239 + */
4240 +static unsigned long zone_dirtyable_memory(struct zone *zone)
4241 +{
4242 + unsigned long nr_pages;
4243 +
4244 + nr_pages = zone_page_state(zone, NR_FREE_PAGES);
4245 + nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
4246 +
4247 + nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
4248 + nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
4249 +
4250 + return nr_pages;
4251 +}
4252 +
4253 static unsigned long highmem_dirtyable_memory(unsigned long total)
4254 {
4255 #ifdef CONFIG_HIGHMEM
4256 @@ -198,11 +218,9 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
4257 unsigned long x = 0;
4258
4259 for_each_node_state(node, N_HIGH_MEMORY) {
4260 - struct zone *z =
4261 - &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
4262 + struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
4263
4264 - x += zone_page_state(z, NR_FREE_PAGES) +
4265 - zone_reclaimable_pages(z) - z->dirty_balance_reserve;
4266 + x += zone_dirtyable_memory(z);
4267 }
4268 /*
4269 * Unreclaimable memory (kernel memory or anonymous memory
4270 @@ -238,9 +256,12 @@ static unsigned long global_dirtyable_memory(void)
4271 {
4272 unsigned long x;
4273
4274 - x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
4275 + x = global_page_state(NR_FREE_PAGES);
4276 x -= min(x, dirty_balance_reserve);
4277
4278 + x += global_page_state(NR_INACTIVE_FILE);
4279 + x += global_page_state(NR_ACTIVE_FILE);
4280 +
4281 if (!vm_highmem_is_dirtyable)
4282 x -= highmem_dirtyable_memory(x);
4283
4284 @@ -289,32 +310,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
4285 }
4286
4287 /**
4288 - * zone_dirtyable_memory - number of dirtyable pages in a zone
4289 - * @zone: the zone
4290 - *
4291 - * Returns the zone's number of pages potentially available for dirty
4292 - * page cache. This is the base value for the per-zone dirty limits.
4293 - */
4294 -static unsigned long zone_dirtyable_memory(struct zone *zone)
4295 -{
4296 - /*
4297 - * The effective global number of dirtyable pages may exclude
4298 - * highmem as a big-picture measure to keep the ratio between
4299 - * dirty memory and lowmem reasonable.
4300 - *
4301 - * But this function is purely about the individual zone and a
4302 - * highmem zone can hold its share of dirty pages, so we don't
4303 - * care about vm_highmem_is_dirtyable here.
4304 - */
4305 - unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
4306 - zone_reclaimable_pages(zone);
4307 -
4308 - /* don't allow this to underflow */
4309 - nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
4310 - return nr_pages;
4311 -}
4312 -
4313 -/**
4314 * zone_dirty_limit - maximum number of dirty pages allowed in a zone
4315 * @zone: the zone
4316 *
4317 diff --git a/mm/slub.c b/mm/slub.c
4318 index 96f21691b67c..5c1343a391d0 100644
4319 --- a/mm/slub.c
4320 +++ b/mm/slub.c
4321 @@ -4272,7 +4272,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4322
4323 page = ACCESS_ONCE(c->partial);
4324 if (page) {
4325 - x = page->pobjects;
4326 + node = page_to_nid(page);
4327 + if (flags & SO_TOTAL)
4328 + WARN_ON_ONCE(1);
4329 + else if (flags & SO_OBJECTS)
4330 + WARN_ON_ONCE(1);
4331 + else
4332 + x = page->pages;
4333 total += x;
4334 nodes[node] += x;
4335 }
4336 diff --git a/mm/vmscan.c b/mm/vmscan.c
4337 index eea668d9cff6..05e6095159dc 100644
4338 --- a/mm/vmscan.c
4339 +++ b/mm/vmscan.c
4340 @@ -147,7 +147,7 @@ static bool global_reclaim(struct scan_control *sc)
4341 }
4342 #endif
4343
4344 -unsigned long zone_reclaimable_pages(struct zone *zone)
4345 +static unsigned long zone_reclaimable_pages(struct zone *zone)
4346 {
4347 int nr;
4348
4349 @@ -3297,27 +3297,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
4350 wake_up_interruptible(&pgdat->kswapd_wait);
4351 }
4352
4353 -/*
4354 - * The reclaimable count would be mostly accurate.
4355 - * The less reclaimable pages may be
4356 - * - mlocked pages, which will be moved to unevictable list when encountered
4357 - * - mapped pages, which may require several travels to be reclaimed
4358 - * - dirty pages, which is not "instantly" reclaimable
4359 - */
4360 -unsigned long global_reclaimable_pages(void)
4361 -{
4362 - int nr;
4363 -
4364 - nr = global_page_state(NR_ACTIVE_FILE) +
4365 - global_page_state(NR_INACTIVE_FILE);
4366 -
4367 - if (get_nr_swap_pages() > 0)
4368 - nr += global_page_state(NR_ACTIVE_ANON) +
4369 - global_page_state(NR_INACTIVE_ANON);
4370 -
4371 - return nr;
4372 -}
4373 -
4374 #ifdef CONFIG_HIBERNATION
4375 /*
4376 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
4377 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
4378 index 743e6ebf5f9f..2c7baa809913 100644
4379 --- a/net/core/skbuff.c
4380 +++ b/net/core/skbuff.c
4381 @@ -74,36 +74,6 @@
4382 struct kmem_cache *skbuff_head_cache __read_mostly;
4383 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
4384
4385 -static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
4386 - struct pipe_buffer *buf)
4387 -{
4388 - put_page(buf->page);
4389 -}
4390 -
4391 -static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
4392 - struct pipe_buffer *buf)
4393 -{
4394 - get_page(buf->page);
4395 -}
4396 -
4397 -static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
4398 - struct pipe_buffer *buf)
4399 -{
4400 - return 1;
4401 -}
4402 -
4403 -
4404 -/* Pipe buffer operations for a socket. */
4405 -static const struct pipe_buf_operations sock_pipe_buf_ops = {
4406 - .can_merge = 0,
4407 - .map = generic_pipe_buf_map,
4408 - .unmap = generic_pipe_buf_unmap,
4409 - .confirm = generic_pipe_buf_confirm,
4410 - .release = sock_pipe_buf_release,
4411 - .steal = sock_pipe_buf_steal,
4412 - .get = sock_pipe_buf_get,
4413 -};
4414 -
4415 /**
4416 * skb_panic - private function for out-of-line support
4417 * @skb: buffer
4418 @@ -1800,7 +1770,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
4419 .partial = partial,
4420 .nr_pages_max = MAX_SKB_FRAGS,
4421 .flags = flags,
4422 - .ops = &sock_pipe_buf_ops,
4423 + .ops = &nosteal_pipe_buf_ops,
4424 .spd_release = sock_spd_release,
4425 };
4426 struct sk_buff *frag_iter;
4427 diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
4428 index f1eb0d16666c..23fa3c1841cd 100644
4429 --- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
4430 +++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
4431 @@ -137,7 +137,6 @@ void init_gssp_clnt(struct sunrpc_net *sn)
4432 {
4433 mutex_init(&sn->gssp_lock);
4434 sn->gssp_clnt = NULL;
4435 - init_waitqueue_head(&sn->gssp_wq);
4436 }
4437
4438 int set_gssp_clnt(struct net *net)
4439 @@ -154,7 +153,6 @@ int set_gssp_clnt(struct net *net)
4440 sn->gssp_clnt = clnt;
4441 }
4442 mutex_unlock(&sn->gssp_lock);
4443 - wake_up(&sn->gssp_wq);
4444 return ret;
4445 }
4446
4447 diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
4448 index 09fb638bcaa4..e18be86dc486 100644
4449 --- a/net/sunrpc/auth_gss/svcauth_gss.c
4450 +++ b/net/sunrpc/auth_gss/svcauth_gss.c
4451 @@ -1295,34 +1295,9 @@ static int set_gss_proxy(struct net *net, int type)
4452 else
4453 ret = -EBUSY;
4454 spin_unlock(&use_gssp_lock);
4455 - wake_up(&sn->gssp_wq);
4456 return ret;
4457 }
4458
4459 -static inline bool gssp_ready(struct sunrpc_net *sn)
4460 -{
4461 - switch (sn->use_gss_proxy) {
4462 - case -1:
4463 - return false;
4464 - case 0:
4465 - return true;
4466 - case 1:
4467 - return sn->gssp_clnt;
4468 - }
4469 - WARN_ON_ONCE(1);
4470 - return false;
4471 -}
4472 -
4473 -static int wait_for_gss_proxy(struct net *net, struct file *file)
4474 -{
4475 - struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
4476 -
4477 - if (file->f_flags & O_NONBLOCK && !gssp_ready(sn))
4478 - return -EAGAIN;
4479 - return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn));
4480 -}
4481 -
4482 -
4483 static ssize_t write_gssp(struct file *file, const char __user *buf,
4484 size_t count, loff_t *ppos)
4485 {
4486 @@ -1355,16 +1330,12 @@ static ssize_t read_gssp(struct file *file, char __user *buf,
4487 size_t count, loff_t *ppos)
4488 {
4489 struct net *net = PDE_DATA(file_inode(file));
4490 + struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
4491 unsigned long p = *ppos;
4492 char tbuf[10];
4493 size_t len;
4494 - int ret;
4495 -
4496 - ret = wait_for_gss_proxy(net, file);
4497 - if (ret)
4498 - return ret;
4499
4500 - snprintf(tbuf, sizeof(tbuf), "%d\n", use_gss_proxy(net));
4501 + snprintf(tbuf, sizeof(tbuf), "%d\n", sn->use_gss_proxy);
4502 len = strlen(tbuf);
4503 if (p >= len)
4504 return 0;
4505 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
4506 index 941d19f8c999..f318a95ec64d 100644
4507 --- a/net/sunrpc/clnt.c
4508 +++ b/net/sunrpc/clnt.c
4509 @@ -1433,9 +1433,13 @@ call_refreshresult(struct rpc_task *task)
4510 task->tk_action = call_refresh;
4511 switch (status) {
4512 case 0:
4513 - if (rpcauth_uptodatecred(task))
4514 + if (rpcauth_uptodatecred(task)) {
4515 task->tk_action = call_allocate;
4516 - return;
4517 + return;
4518 + }
4519 + /* Use rate-limiting and a max number of retries if refresh
4520 + * had status 0 but failed to update the cred.
4521 + */
4522 case -ETIMEDOUT:
4523 rpc_delay(task, 3*HZ);
4524 case -EAGAIN:
4525 diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
4526 index 779742cfc1ff..3a260e47fad2 100644
4527 --- a/net/sunrpc/netns.h
4528 +++ b/net/sunrpc/netns.h
4529 @@ -26,7 +26,6 @@ struct sunrpc_net {
4530 unsigned int rpcb_is_af_local : 1;
4531
4532 struct mutex gssp_lock;
4533 - wait_queue_head_t gssp_wq;
4534 struct rpc_clnt *gssp_clnt;
4535 int use_gss_proxy;
4536 int pipe_version;
4537 diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
4538 index c8adde3aff8f..7e5bceddc36f 100644
4539 --- a/security/selinux/ss/policydb.c
4540 +++ b/security/selinux/ss/policydb.c
4541 @@ -1941,7 +1941,19 @@ static int filename_trans_read(struct policydb *p, void *fp)
4542 if (rc)
4543 goto out;
4544
4545 - hashtab_insert(p->filename_trans, ft, otype);
4546 + rc = hashtab_insert(p->filename_trans, ft, otype);
4547 + if (rc) {
4548 + /*
4549 + * Do not return -EEXIST to the caller, or the system
4550 + * will not boot.
4551 + */
4552 + if (rc != -EEXIST)
4553 + goto out;
4554 + /* But free memory to avoid memory leak. */
4555 + kfree(ft);
4556 + kfree(name);
4557 + kfree(otype);
4558 + }
4559 }
4560 hash_eval(p->filename_trans, "filenametr");
4561 return 0;
4562 diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
4563 index f09641da40d4..d1b3a361e526 100644
4564 --- a/tools/power/x86/turbostat/Makefile
4565 +++ b/tools/power/x86/turbostat/Makefile
4566 @@ -5,7 +5,7 @@ DESTDIR :=
4567
4568 turbostat : turbostat.c
4569 CFLAGS += -Wall
4570 -CFLAGS += -I../../../../arch/x86/include/uapi/
4571 +CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
4572
4573 %: %.c
4574 @mkdir -p $(BUILD_OUTPUT)
4575 diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
4576 index fe702076ca46..6a7ee5f21c9b 100644
4577 --- a/tools/power/x86/turbostat/turbostat.c
4578 +++ b/tools/power/x86/turbostat/turbostat.c
4579 @@ -20,7 +20,7 @@
4580 */
4581
4582 #define _GNU_SOURCE
4583 -#include <asm/msr.h>
4584 +#include MSRHEADER
4585 #include <stdio.h>
4586 #include <unistd.h>
4587 #include <sys/types.h>
4588 @@ -35,6 +35,7 @@
4589 #include <string.h>
4590 #include <ctype.h>
4591 #include <sched.h>
4592 +#include <cpuid.h>
4593
4594 char *proc_stat = "/proc/stat";
4595 unsigned int interval_sec = 5; /* set with -i interval_sec */
4596 @@ -1894,7 +1895,7 @@ void check_cpuid()
4597
4598 eax = ebx = ecx = edx = 0;
4599
4600 - asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
4601 + __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
4602
4603 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
4604 genuine_intel = 1;
4605 @@ -1903,7 +1904,7 @@ void check_cpuid()
4606 fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
4607 (char *)&ebx, (char *)&edx, (char *)&ecx);
4608
4609 - asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
4610 + __get_cpuid(1, &fms, &ebx, &ecx, &edx);
4611 family = (fms >> 8) & 0xf;
4612 model = (fms >> 4) & 0xf;
4613 stepping = fms & 0xf;
4614 @@ -1925,7 +1926,7 @@ void check_cpuid()
4615 * This check is valid for both Intel and AMD.
4616 */
4617 ebx = ecx = edx = 0;
4618 - asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
4619 + __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
4620
4621 if (max_level < 0x80000007) {
4622 fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
4623 @@ -1936,7 +1937,7 @@ void check_cpuid()
4624 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
4625 * this check is valid for both Intel and AMD
4626 */
4627 - asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
4628 + __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
4629 has_invariant_tsc = edx & (1 << 8);
4630
4631 if (!has_invariant_tsc) {
4632 @@ -1949,7 +1950,7 @@ void check_cpuid()
4633 * this check is valid for both Intel and AMD
4634 */
4635
4636 - asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
4637 + __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
4638 has_aperf = ecx & (1 << 0);
4639 do_dts = eax & (1 << 0);
4640 do_ptm = eax & (1 << 6);