Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.1/0109-3.1.10-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1625 - (hide annotations) (download)
Fri Jan 20 11:52:15 2012 UTC (12 years, 4 months ago) by niro
File size: 64495 byte(s)
-updated to linux.3.1.10
1 niro 1625 diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
2     index b0e4b9c..13ab837 100644
3     --- a/Documentation/virtual/kvm/api.txt
4     +++ b/Documentation/virtual/kvm/api.txt
5     @@ -1131,6 +1131,13 @@ following flags are specified:
6     /* Depends on KVM_CAP_IOMMU */
7     #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
8    
9     +The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure
10     +isolation of the device. Usages not specifying this flag are deprecated.
11     +
12     +Only PCI header type 0 devices with PCI BAR resources are supported by
13     +device assignment. The user requesting this ioctl must have read/write
14     +access to the PCI sysfs resource files associated with the device.
15     +
16     4.49 KVM_DEASSIGN_PCI_DEVICE
17    
18     Capability: KVM_CAP_DEVICE_DEASSIGNMENT
19     diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
20     index 67f87f2..78a1eff 100644
21     --- a/arch/x86/include/asm/amd_nb.h
22     +++ b/arch/x86/include/asm/amd_nb.h
23     @@ -1,6 +1,7 @@
24     #ifndef _ASM_X86_AMD_NB_H
25     #define _ASM_X86_AMD_NB_H
26    
27     +#include <linux/ioport.h>
28     #include <linux/pci.h>
29    
30     struct amd_nb_bus_dev_range {
31     @@ -13,6 +14,7 @@ extern const struct pci_device_id amd_nb_misc_ids[];
32     extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
33    
34     extern bool early_is_amd_nb(u32 value);
35     +extern struct resource *amd_get_mmconfig_range(struct resource *res);
36     extern int amd_cache_northbridges(void);
37     extern void amd_flush_garts(void);
38     extern int amd_numa_init(void);
39     diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
40     index 4c39baa..bae1efe 100644
41     --- a/arch/x86/kernel/amd_nb.c
42     +++ b/arch/x86/kernel/amd_nb.c
43     @@ -119,6 +119,37 @@ bool __init early_is_amd_nb(u32 device)
44     return false;
45     }
46    
47     +struct resource *amd_get_mmconfig_range(struct resource *res)
48     +{
49     + u32 address;
50     + u64 base, msr;
51     + unsigned segn_busn_bits;
52     +
53     + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
54     + return NULL;
55     +
56     + /* assume all cpus from fam10h have mmconfig */
57     + if (boot_cpu_data.x86 < 0x10)
58     + return NULL;
59     +
60     + address = MSR_FAM10H_MMIO_CONF_BASE;
61     + rdmsrl(address, msr);
62     +
63     + /* mmconfig is not enabled */
64     + if (!(msr & FAM10H_MMIO_CONF_ENABLE))
65     + return NULL;
66     +
67     + base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
68     +
69     + segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
70     + FAM10H_MMIO_CONF_BUSRANGE_MASK;
71     +
72     + res->flags = IORESOURCE_MEM;
73     + res->start = base;
74     + res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
75     + return res;
76     +}
77     +
78     int amd_get_subcaches(int cpu)
79     {
80     struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
81     diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
82     index c1a0188..44842d7 100644
83     --- a/arch/x86/kernel/kvmclock.c
84     +++ b/arch/x86/kernel/kvmclock.c
85     @@ -74,9 +74,10 @@ static cycle_t kvm_clock_read(void)
86     struct pvclock_vcpu_time_info *src;
87     cycle_t ret;
88    
89     - src = &get_cpu_var(hv_clock);
90     + preempt_disable_notrace();
91     + src = &__get_cpu_var(hv_clock);
92     ret = pvclock_clocksource_read(src);
93     - put_cpu_var(hv_clock);
94     + preempt_enable_notrace();
95     return ret;
96     }
97    
98     diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
99     index efad723..43e04d1 100644
100     --- a/arch/x86/kvm/i8254.c
101     +++ b/arch/x86/kvm/i8254.c
102     @@ -338,11 +338,15 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
103     return HRTIMER_NORESTART;
104     }
105    
106     -static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
107     +static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
108     {
109     + struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
110     struct kvm_timer *pt = &ps->pit_timer;
111     s64 interval;
112    
113     + if (!irqchip_in_kernel(kvm))
114     + return;
115     +
116     interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
117    
118     pr_debug("create pit timer, interval is %llu nsec\n", interval);
119     @@ -394,13 +398,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
120     /* FIXME: enhance mode 4 precision */
121     case 4:
122     if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
123     - create_pit_timer(ps, val, 0);
124     + create_pit_timer(kvm, val, 0);
125     }
126     break;
127     case 2:
128     case 3:
129     if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
130     - create_pit_timer(ps, val, 1);
131     + create_pit_timer(kvm, val, 1);
132     }
133     break;
134     default:
135     diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
136     index 1dab519..f927429 100644
137     --- a/arch/x86/mm/mmap.c
138     +++ b/arch/x86/mm/mmap.c
139     @@ -87,9 +87,9 @@ static unsigned long mmap_rnd(void)
140     */
141     if (current->flags & PF_RANDOMIZE) {
142     if (mmap_is_ia32())
143     - rnd = (long)get_random_int() % (1<<8);
144     + rnd = get_random_int() % (1<<8);
145     else
146     - rnd = (long)(get_random_int() % (1<<28));
147     + rnd = get_random_int() % (1<<28);
148     }
149     return rnd << PAGE_SHIFT;
150     }
151     diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
152     index 6b8759f..d24d3da 100644
153     --- a/arch/x86/pci/Makefile
154     +++ b/arch/x86/pci/Makefile
155     @@ -18,8 +18,9 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
156     obj-$(CONFIG_X86_MRST) += mrst.o
157    
158     obj-y += common.o early.o
159     -obj-y += amd_bus.o bus_numa.o
160     +obj-y += bus_numa.o
161    
162     +obj-$(CONFIG_AMD_NB) += amd_bus.o
163     obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o
164    
165     ifeq ($(CONFIG_PCI_DEBUG),y)
166     diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
167     index 404f21a..f8348ab 100644
168     --- a/arch/x86/pci/acpi.c
169     +++ b/arch/x86/pci/acpi.c
170     @@ -149,7 +149,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
171     struct acpi_resource_address64 addr;
172     acpi_status status;
173     unsigned long flags;
174     - u64 start, end;
175     + u64 start, orig_end, end;
176    
177     status = resource_to_addr(acpi_res, &addr);
178     if (!ACPI_SUCCESS(status))
179     @@ -165,7 +165,21 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
180     return AE_OK;
181    
182     start = addr.minimum + addr.translation_offset;
183     - end = addr.maximum + addr.translation_offset;
184     + orig_end = end = addr.maximum + addr.translation_offset;
185     +
186     + /* Exclude non-addressable range or non-addressable portion of range */
187     + end = min(end, (u64)iomem_resource.end);
188     + if (end <= start) {
189     + dev_info(&info->bridge->dev,
190     + "host bridge window [%#llx-%#llx] "
191     + "(ignored, not CPU addressable)\n", start, orig_end);
192     + return AE_OK;
193     + } else if (orig_end != end) {
194     + dev_info(&info->bridge->dev,
195     + "host bridge window [%#llx-%#llx] "
196     + "([%#llx-%#llx] ignored, not CPU addressable)\n",
197     + start, orig_end, end + 1, orig_end);
198     + }
199    
200     res = &info->res[info->res_num];
201     res->name = info->name;
202     diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
203     index 026e493..385a940 100644
204     --- a/arch/x86/pci/amd_bus.c
205     +++ b/arch/x86/pci/amd_bus.c
206     @@ -30,34 +30,6 @@ static struct pci_hostbridge_probe pci_probes[] __initdata = {
207     { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 },
208     };
209    
210     -static u64 __initdata fam10h_mmconf_start;
211     -static u64 __initdata fam10h_mmconf_end;
212     -static void __init get_pci_mmcfg_amd_fam10h_range(void)
213     -{
214     - u32 address;
215     - u64 base, msr;
216     - unsigned segn_busn_bits;
217     -
218     - /* assume all cpus from fam10h have mmconf */
219     - if (boot_cpu_data.x86 < 0x10)
220     - return;
221     -
222     - address = MSR_FAM10H_MMIO_CONF_BASE;
223     - rdmsrl(address, msr);
224     -
225     - /* mmconfig is not enable */
226     - if (!(msr & FAM10H_MMIO_CONF_ENABLE))
227     - return;
228     -
229     - base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
230     -
231     - segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
232     - FAM10H_MMIO_CONF_BUSRANGE_MASK;
233     -
234     - fam10h_mmconf_start = base;
235     - fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
236     -}
237     -
238     #define RANGE_NUM 16
239    
240     /**
241     @@ -85,6 +57,9 @@ static int __init early_fill_mp_bus_info(void)
242     u64 val;
243     u32 address;
244     bool found;
245     + struct resource fam10h_mmconf_res, *fam10h_mmconf;
246     + u64 fam10h_mmconf_start;
247     + u64 fam10h_mmconf_end;
248    
249     if (!early_pci_allowed())
250     return -1;
251     @@ -211,12 +186,17 @@ static int __init early_fill_mp_bus_info(void)
252     subtract_range(range, RANGE_NUM, 0, end);
253    
254     /* get mmconfig */
255     - get_pci_mmcfg_amd_fam10h_range();
256     + fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res);
257     /* need to take out mmconf range */
258     - if (fam10h_mmconf_end) {
259     - printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
260     + if (fam10h_mmconf) {
261     + printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf);
262     + fam10h_mmconf_start = fam10h_mmconf->start;
263     + fam10h_mmconf_end = fam10h_mmconf->end;
264     subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
265     fam10h_mmconf_end + 1);
266     + } else {
267     + fam10h_mmconf_start = 0;
268     + fam10h_mmconf_end = 0;
269     }
270    
271     /* mmio resource */
272     diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
273     index b94d871..7642495 100644
274     --- a/drivers/gpu/drm/radeon/r100.c
275     +++ b/drivers/gpu/drm/radeon/r100.c
276     @@ -2069,6 +2069,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev)
277     void r100_bm_disable(struct radeon_device *rdev)
278     {
279     u32 tmp;
280     + u16 tmp16;
281    
282     /* disable bus mastering */
283     tmp = RREG32(R_000030_BUS_CNTL);
284     @@ -2079,8 +2080,8 @@ void r100_bm_disable(struct radeon_device *rdev)
285     WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
286     tmp = RREG32(RADEON_BUS_CNTL);
287     mdelay(1);
288     - pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
289     - pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
290     + pci_read_config_word(rdev->pdev, 0x4, &tmp16);
291     + pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
292     mdelay(1);
293     }
294    
295     diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
296     index f5ac7e7..c45d921 100644
297     --- a/drivers/gpu/drm/radeon/r600_hdmi.c
298     +++ b/drivers/gpu/drm/radeon/r600_hdmi.c
299     @@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
300     frame[0xD] = (right_bar >> 8);
301    
302     r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
303     + /* Our header values (type, version, length) should be alright, Intel
304     + * is using the same. Checksum function also seems to be OK, it works
305     + * fine for audio infoframe. However calculated value is always lower
306     + * by 2 in comparison to fglrx. It breaks displaying anything in case
307     + * of TVs that strictly check the checksum. Hack it manually here to
308     + * workaround this issue. */
309     + frame[0x0] += 2;
310    
311     WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
312     frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
313     diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
314     index b51e157..50d105a 100644
315     --- a/drivers/gpu/drm/radeon/radeon_device.c
316     +++ b/drivers/gpu/drm/radeon/radeon_device.c
317     @@ -224,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev)
318     if (radeon_no_wb == 1)
319     rdev->wb.enabled = false;
320     else {
321     - /* often unreliable on AGP */
322     if (rdev->flags & RADEON_IS_AGP) {
323     + /* often unreliable on AGP */
324     + rdev->wb.enabled = false;
325     + } else if (rdev->family < CHIP_R300) {
326     + /* often unreliable on pre-r300 */
327     rdev->wb.enabled = false;
328     } else {
329     rdev->wb.enabled = true;
330     diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
331     index 0e89a9b..d9b0bc4 100644
332     --- a/drivers/gpu/drm/radeon/rs600.c
333     +++ b/drivers/gpu/drm/radeon/rs600.c
334     @@ -324,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev)
335    
336     void rs600_bm_disable(struct radeon_device *rdev)
337     {
338     - u32 tmp;
339     + u16 tmp;
340    
341     /* disable bus mastering */
342     - pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
343     + pci_read_config_word(rdev->pdev, 0x4, &tmp);
344     pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
345     mdelay(1);
346     }
347     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
348     index f26ae31..e9c8f80 100644
349     --- a/drivers/hid/hid-core.c
350     +++ b/drivers/hid/hid-core.c
351     @@ -361,7 +361,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
352    
353     case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
354     parser->global.report_size = item_udata(item);
355     - if (parser->global.report_size > 32) {
356     + if (parser->global.report_size > 96) {
357     dbg_hid("invalid report_size %d\n",
358     parser->global.report_size);
359     return -1;
360     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
361     index d9587df..606fc04 100644
362     --- a/drivers/md/raid1.c
363     +++ b/drivers/md/raid1.c
364     @@ -508,8 +508,17 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors)
365     if (test_bit(WriteMostly, &rdev->flags)) {
366     /* Don't balance among write-mostly, just
367     * use the first as a last resort */
368     - if (best_disk < 0)
369     + if (best_disk < 0) {
370     + if (is_badblock(rdev, this_sector, sectors,
371     + &first_bad, &bad_sectors)) {
372     + if (first_bad < this_sector)
373     + /* Cannot use this */
374     + continue;
375     + best_good_sectors = first_bad - this_sector;
376     + } else
377     + best_good_sectors = sectors;
378     best_disk = disk;
379     + }
380     continue;
381     }
382     /* This is a reasonable device to use. It might
383     diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
384     index ca38569..bff8d46 100644
385     --- a/drivers/mtd/mtd_blkdevs.c
386     +++ b/drivers/mtd/mtd_blkdevs.c
387     @@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
388    
389     mutex_lock(&dev->lock);
390    
391     - if (dev->open++)
392     + if (dev->open)
393     goto unlock;
394    
395     kref_get(&dev->ref);
396     @@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
397     goto error_release;
398    
399     unlock:
400     + dev->open++;
401     mutex_unlock(&dev->lock);
402     blktrans_dev_put(dev);
403     return ret;
404     diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
405     index e3e40f4..43130e8 100644
406     --- a/drivers/mtd/mtdoops.c
407     +++ b/drivers/mtd/mtdoops.c
408     @@ -253,6 +253,9 @@ static void find_next_position(struct mtdoops_context *cxt)
409     size_t retlen;
410    
411     for (page = 0; page < cxt->oops_pages; page++) {
412     + if (mtd->block_isbad &&
413     + mtd->block_isbad(mtd, page * record_size))
414     + continue;
415     /* Assume the page is used */
416     mark_page_used(cxt, page);
417     ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
418     @@ -369,7 +372,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
419    
420     /* oops_page_used is a bit field */
421     cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
422     - BITS_PER_LONG));
423     + BITS_PER_LONG) * sizeof(unsigned long));
424     if (!cxt->oops_page_used) {
425     printk(KERN_ERR "mtdoops: could not allocate page array\n");
426     return;
427     diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
428     index 531625f..129bad2 100644
429     --- a/drivers/mtd/tests/mtd_stresstest.c
430     +++ b/drivers/mtd/tests/mtd_stresstest.c
431     @@ -277,6 +277,12 @@ static int __init mtd_stresstest_init(void)
432     (unsigned long long)mtd->size, mtd->erasesize,
433     pgsize, ebcnt, pgcnt, mtd->oobsize);
434    
435     + if (ebcnt < 2) {
436     + printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
437     + err = -ENOSPC;
438     + goto out_put_mtd;
439     + }
440     +
441     /* Read or write up 2 eraseblocks at a time */
442     bufsize = mtd->erasesize * 2;
443    
444     @@ -315,6 +321,7 @@ out:
445     kfree(bbt);
446     vfree(writebuf);
447     vfree(readbuf);
448     +out_put_mtd:
449     put_mtd_device(mtd);
450     if (err)
451     printk(PRINT_PREF "error %d occurred\n", err);
452     diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
453     index 4be6718..c696c94 100644
454     --- a/drivers/mtd/ubi/eba.c
455     +++ b/drivers/mtd/ubi/eba.c
456     @@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
457     * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
458     * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
459     * LEB is already locked, we just do not move it and return
460     - * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
461     + * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
462     + * we do not know the reasons of the contention - it may be just a
463     + * normal I/O on this LEB, so we want to re-try.
464     */
465     err = leb_write_trylock(ubi, vol_id, lnum);
466     if (err) {
467     dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
468     - return MOVE_CANCEL_RACE;
469     + return MOVE_RETRY;
470     }
471    
472     /*
473     diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
474     index dc64c76..d51d75d 100644
475     --- a/drivers/mtd/ubi/ubi.h
476     +++ b/drivers/mtd/ubi/ubi.h
477     @@ -120,6 +120,7 @@ enum {
478     * PEB
479     * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
480     * target PEB
481     + * MOVE_RETRY: retry scrubbing the PEB
482     */
483     enum {
484     MOVE_CANCEL_RACE = 1,
485     @@ -127,6 +128,7 @@ enum {
486     MOVE_TARGET_RD_ERR,
487     MOVE_TARGET_WR_ERR,
488     MOVE_CANCEL_BITFLIPS,
489     + MOVE_RETRY,
490     };
491    
492     /**
493     diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
494     index 42c684c..0696e36 100644
495     --- a/drivers/mtd/ubi/wl.c
496     +++ b/drivers/mtd/ubi/wl.c
497     @@ -795,7 +795,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
498     protect = 1;
499     goto out_not_moved;
500     }
501     -
502     + if (err == MOVE_RETRY) {
503     + scrubbing = 1;
504     + goto out_not_moved;
505     + }
506     if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
507     err == MOVE_TARGET_RD_ERR) {
508     /*
509     @@ -1049,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
510    
511     ubi_err("failed to erase PEB %d, error %d", pnum, err);
512     kfree(wl_wrk);
513     - kmem_cache_free(ubi_wl_entry_slab, e);
514    
515     if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
516     err == -EBUSY) {
517     @@ -1062,14 +1064,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
518     goto out_ro;
519     }
520     return err;
521     - } else if (err != -EIO) {
522     + }
523     +
524     + kmem_cache_free(ubi_wl_entry_slab, e);
525     + if (err != -EIO)
526     /*
527     * If this is not %-EIO, we have no idea what to do. Scheduling
528     * this physical eraseblock for erasure again would cause
529     * errors again and again. Well, lets switch to R/O mode.
530     */
531     goto out_ro;
532     - }
533    
534     /* It is %-EIO, the PEB went bad */
535    
536     diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
537     index 6f91a14..3fda6b1 100644
538     --- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
539     +++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
540     @@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
541     /* Allocate skb buffer to contain firmware */
542     /* info and tx descriptor info. */
543     skb = dev_alloc_skb(frag_length);
544     + if (!skb)
545     + return false;
546     skb_reserve(skb, extra_descoffset);
547     seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
548     extra_descoffset));
549     @@ -573,6 +575,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
550    
551     len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
552     skb = dev_alloc_skb(len);
553     + if (!skb)
554     + return false;
555     cb_desc = (struct rtl_tcb_desc *)(skb->cb);
556     cb_desc->queue_index = TXCMD_QUEUE;
557     cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
558     diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
559     index 2f10328..e174982 100644
560     --- a/drivers/pci/msi.c
561     +++ b/drivers/pci/msi.c
562     @@ -869,5 +869,15 @@ EXPORT_SYMBOL(pci_msi_enabled);
563    
564     void pci_msi_init_pci_dev(struct pci_dev *dev)
565     {
566     + int pos;
567     INIT_LIST_HEAD(&dev->msi_list);
568     +
569     + /* Disable the msi hardware to avoid screaming interrupts
570     + * during boot. This is the power on reset default so
571     + * usually this should be a noop.
572     + */
573     + pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
574     + if (pos)
575     + msi_set_enable(dev, pos, 0);
576     + msix_set_enable(dev, 0);
577     }
578     diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
579     index 0c59541..0d94eec 100644
580     --- a/drivers/platform/x86/ideapad-laptop.c
581     +++ b/drivers/platform/x86/ideapad-laptop.c
582     @@ -493,6 +493,8 @@ static void ideapad_backlight_notify_power(struct ideapad_private *priv)
583     unsigned long power;
584     struct backlight_device *blightdev = priv->blightdev;
585    
586     + if (!blightdev)
587     + return;
588     if (read_ec_data(ideapad_handle, 0x18, &power))
589     return;
590     blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
591     diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
592     index dfbd5a6..258fef2 100644
593     --- a/drivers/pnp/quirks.c
594     +++ b/drivers/pnp/quirks.c
595     @@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
596     }
597     }
598    
599     +#ifdef CONFIG_AMD_NB
600     +
601     +#include <asm/amd_nb.h>
602     +
603     +static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
604     +{
605     + resource_size_t start, end;
606     + struct pnp_resource *pnp_res;
607     + struct resource *res;
608     + struct resource mmconfig_res, *mmconfig;
609     +
610     + mmconfig = amd_get_mmconfig_range(&mmconfig_res);
611     + if (!mmconfig)
612     + return;
613     +
614     + list_for_each_entry(pnp_res, &dev->resources, list) {
615     + res = &pnp_res->res;
616     + if (res->end < mmconfig->start || res->start > mmconfig->end ||
617     + (res->start == mmconfig->start && res->end == mmconfig->end))
618     + continue;
619     +
620     + dev_info(&dev->dev, FW_BUG
621     + "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
622     + res, mmconfig);
623     + if (mmconfig->start < res->start) {
624     + start = mmconfig->start;
625     + end = res->start - 1;
626     + pnp_add_mem_resource(dev, start, end, 0);
627     + }
628     + if (mmconfig->end > res->end) {
629     + start = res->end + 1;
630     + end = mmconfig->end;
631     + pnp_add_mem_resource(dev, start, end, 0);
632     + }
633     + break;
634     + }
635     +}
636     +#endif
637     +
638     /*
639     * PnP Quirks
640     * Cards or devices that need some tweaking due to incomplete resource info
641     @@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = {
642     /* PnP resources that might overlap PCI BARs */
643     {"PNP0c01", quirk_system_pci_resources},
644     {"PNP0c02", quirk_system_pci_resources},
645     +#ifdef CONFIG_AMD_NB
646     + {"PNP0c01", quirk_amd_mmconfig_area},
647     +#endif
648     {""}
649     };
650    
651     diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
652     index 44e91e5..a86f301 100644
653     --- a/drivers/rtc/interface.c
654     +++ b/drivers/rtc/interface.c
655     @@ -227,11 +227,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
656     alarm->time.tm_hour = now.tm_hour;
657    
658     /* For simplicity, only support date rollover for now */
659     - if (alarm->time.tm_mday == -1) {
660     + if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
661     alarm->time.tm_mday = now.tm_mday;
662     missing = day;
663     }
664     - if (alarm->time.tm_mon == -1) {
665     + if ((unsigned)alarm->time.tm_mon >= 12) {
666     alarm->time.tm_mon = now.tm_mon;
667     if (missing == none)
668     missing = month;
669     diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
670     index 39e81cd..10f16a3 100644
671     --- a/drivers/scsi/mpt2sas/mpt2sas_base.c
672     +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
673     @@ -66,6 +66,8 @@ static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
674    
675     #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
676    
677     +#define MAX_HBA_QUEUE_DEPTH 30000
678     +#define MAX_CHAIN_DEPTH 100000
679     static int max_queue_depth = -1;
680     module_param(max_queue_depth, int, 0);
681     MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
682     @@ -2098,8 +2100,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
683     }
684     if (ioc->chain_dma_pool)
685     pci_pool_destroy(ioc->chain_dma_pool);
686     - }
687     - if (ioc->chain_lookup) {
688     free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
689     ioc->chain_lookup = NULL;
690     }
691     @@ -2117,9 +2117,7 @@ static int
692     _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
693     {
694     struct mpt2sas_facts *facts;
695     - u32 queue_size, queue_diff;
696     u16 max_sge_elements;
697     - u16 num_of_reply_frames;
698     u16 chains_needed_per_io;
699     u32 sz, total_sz;
700     u32 retry_sz;
701     @@ -2146,7 +2144,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
702     max_request_credit = (max_queue_depth < facts->RequestCredit)
703     ? max_queue_depth : facts->RequestCredit;
704     else
705     - max_request_credit = facts->RequestCredit;
706     + max_request_credit = min_t(u16, facts->RequestCredit,
707     + MAX_HBA_QUEUE_DEPTH);
708    
709     ioc->hba_queue_depth = max_request_credit;
710     ioc->hi_priority_depth = facts->HighPriorityCredit;
711     @@ -2187,50 +2186,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
712     }
713     ioc->chains_needed_per_io = chains_needed_per_io;
714    
715     - /* reply free queue sizing - taking into account for events */
716     - num_of_reply_frames = ioc->hba_queue_depth + 32;
717     -
718     - /* number of replies frames can't be a multiple of 16 */
719     - /* decrease number of reply frames by 1 */
720     - if (!(num_of_reply_frames % 16))
721     - num_of_reply_frames--;
722     -
723     - /* calculate number of reply free queue entries
724     - * (must be multiple of 16)
725     - */
726     -
727     - /* (we know reply_free_queue_depth is not a multiple of 16) */
728     - queue_size = num_of_reply_frames;
729     - queue_size += 16 - (queue_size % 16);
730     - ioc->reply_free_queue_depth = queue_size;
731     -
732     - /* reply descriptor post queue sizing */
733     - /* this size should be the number of request frames + number of reply
734     - * frames
735     - */
736     -
737     - queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
738     - /* round up to 16 byte boundary */
739     - if (queue_size % 16)
740     - queue_size += 16 - (queue_size % 16);
741     -
742     - /* check against IOC maximum reply post queue depth */
743     - if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
744     - queue_diff = queue_size -
745     - facts->MaxReplyDescriptorPostQueueDepth;
746     + /* reply free queue sizing - taking into account for 64 FW events */
747     + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
748    
749     - /* round queue_diff up to multiple of 16 */
750     - if (queue_diff % 16)
751     - queue_diff += 16 - (queue_diff % 16);
752     -
753     - /* adjust hba_queue_depth, reply_free_queue_depth,
754     - * and queue_size
755     - */
756     - ioc->hba_queue_depth -= (queue_diff / 2);
757     - ioc->reply_free_queue_depth -= (queue_diff / 2);
758     - queue_size = facts->MaxReplyDescriptorPostQueueDepth;
759     + /* align the reply post queue on the next 16 count boundary */
760     + if (!ioc->reply_free_queue_depth % 16)
761     + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
762     + else
763     + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
764     + 32 - (ioc->reply_free_queue_depth % 16);
765     + if (ioc->reply_post_queue_depth >
766     + facts->MaxReplyDescriptorPostQueueDepth) {
767     + ioc->reply_post_queue_depth = min_t(u16,
768     + (facts->MaxReplyDescriptorPostQueueDepth -
769     + (facts->MaxReplyDescriptorPostQueueDepth % 16)),
770     + (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
771     + ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
772     + ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
773     }
774     - ioc->reply_post_queue_depth = queue_size;
775     +
776    
777     dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
778     "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
779     @@ -2316,15 +2290,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
780     "depth(%d)\n", ioc->name, ioc->request,
781     ioc->scsiio_depth));
782    
783     - /* loop till the allocation succeeds */
784     - do {
785     - sz = ioc->chain_depth * sizeof(struct chain_tracker);
786     - ioc->chain_pages = get_order(sz);
787     - ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
788     - GFP_KERNEL, ioc->chain_pages);
789     - if (ioc->chain_lookup == NULL)
790     - ioc->chain_depth -= 100;
791     - } while (ioc->chain_lookup == NULL);
792     + ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
793     + sz = ioc->chain_depth * sizeof(struct chain_tracker);
794     + ioc->chain_pages = get_order(sz);
795     +
796     + ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
797     + GFP_KERNEL, ioc->chain_pages);
798     ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
799     ioc->request_sz, 16, 0);
800     if (!ioc->chain_dma_pool) {
801     diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
802     index 7375124..011b864 100644
803     --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
804     +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
805     @@ -978,8 +978,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
806     spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
807     if (list_empty(&ioc->free_chain_list)) {
808     spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
809     - printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
810     - ioc->name);
811     + dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
812     + "available\n", ioc->name));
813     return NULL;
814     }
815     chain_req = list_entry(ioc->free_chain_list.next,
816     @@ -6564,6 +6564,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
817     } else
818     sas_target_priv_data = NULL;
819     raid_device->responding = 1;
820     + spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
821     starget_printk(KERN_INFO, raid_device->starget,
822     "handle(0x%04x), wwid(0x%016llx)\n", handle,
823     (unsigned long long)raid_device->wwid);
824     @@ -6574,16 +6575,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
825     */
826     _scsih_init_warpdrive_properties(ioc, raid_device);
827     if (raid_device->handle == handle)
828     - goto out;
829     + return;
830     printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
831     raid_device->handle);
832     raid_device->handle = handle;
833     if (sas_target_priv_data)
834     sas_target_priv_data->handle = handle;
835     - goto out;
836     + return;
837     }
838     }
839     - out:
840     +
841     spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
842     }
843    
844     diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
845     index 5534690..daee5db 100644
846     --- a/drivers/xen/xenbus/xenbus_xs.c
847     +++ b/drivers/xen/xenbus/xenbus_xs.c
848     @@ -801,6 +801,12 @@ static int process_msg(void)
849     goto out;
850     }
851    
852     + if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
853     + kfree(msg);
854     + err = -EINVAL;
855     + goto out;
856     + }
857     +
858     body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
859     if (body == NULL) {
860     kfree(msg);
861     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
862     index 065ff37..2669cc3 100644
863     --- a/fs/ext4/super.c
864     +++ b/fs/ext4/super.c
865     @@ -1984,17 +1984,16 @@ static int ext4_fill_flex_info(struct super_block *sb)
866     struct ext4_group_desc *gdp = NULL;
867     ext4_group_t flex_group_count;
868     ext4_group_t flex_group;
869     - int groups_per_flex = 0;
870     + unsigned int groups_per_flex = 0;
871     size_t size;
872     int i;
873    
874     sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
875     - groups_per_flex = 1 << sbi->s_log_groups_per_flex;
876     -
877     - if (groups_per_flex < 2) {
878     + if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
879     sbi->s_log_groups_per_flex = 0;
880     return 1;
881     }
882     + groups_per_flex = 1 << sbi->s_log_groups_per_flex;
883    
884     /* We allocate both existing and potentially added groups */
885     flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
886     diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
887     index 43926ad..54cea8a 100644
888     --- a/fs/nfs/callback_proc.c
889     +++ b/fs/nfs/callback_proc.c
890     @@ -339,7 +339,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
891     dprintk("%s enter. slotid %d seqid %d\n",
892     __func__, args->csa_slotid, args->csa_sequenceid);
893    
894     - if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
895     + if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
896     return htonl(NFS4ERR_BADSLOT);
897    
898     slot = tbl->slots + args->csa_slotid;
899     diff --git a/fs/nfs/file.c b/fs/nfs/file.c
900     index babaf3a..b76be2f 100644
901     --- a/fs/nfs/file.c
902     +++ b/fs/nfs/file.c
903     @@ -321,13 +321,13 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
904     datasync);
905    
906     ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
907     - if (ret)
908     - return ret;
909     mutex_lock(&inode->i_mutex);
910    
911     nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
912     have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
913     status = nfs_commit_inode(inode, FLUSH_SYNC);
914     + if (status >= 0 && ret < 0)
915     + status = ret;
916     have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
917     if (have_error)
918     ret = xchg(&ctx->error, 0);
919     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
920     index 2d8a169..003cb69 100644
921     --- a/fs/nfs/nfs4proc.c
922     +++ b/fs/nfs/nfs4proc.c
923     @@ -3442,19 +3442,6 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
924     */
925     #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
926    
927     -static void buf_to_pages(const void *buf, size_t buflen,
928     - struct page **pages, unsigned int *pgbase)
929     -{
930     - const void *p = buf;
931     -
932     - *pgbase = offset_in_page(buf);
933     - p -= *pgbase;
934     - while (p < buf + buflen) {
935     - *(pages++) = virt_to_page(p);
936     - p += PAGE_CACHE_SIZE;
937     - }
938     -}
939     -
940     static int buf_to_pages_noslab(const void *buf, size_t buflen,
941     struct page **pages, unsigned int *pgbase)
942     {
943     @@ -3551,9 +3538,19 @@ out:
944     nfs4_set_cached_acl(inode, acl);
945     }
946    
947     +/*
948     + * The getxattr API returns the required buffer length when called with a
949     + * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
950     + * the required buf. On a NULL buf, we send a page of data to the server
951     + * guessing that the ACL request can be serviced by a page. If so, we cache
952     + * up to the page of ACL data, and the 2nd call to getxattr is serviced by
953     + * the cache. If not so, we throw away the page, and cache the required
954     + * length. The next getxattr call will then produce another round trip to
955     + * the server, this time with the input buf of the required size.
956     + */
957     static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
958     {
959     - struct page *pages[NFS4ACL_MAXPAGES];
960     + struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
961     struct nfs_getaclargs args = {
962     .fh = NFS_FH(inode),
963     .acl_pages = pages,
964     @@ -3568,41 +3565,60 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
965     .rpc_argp = &args,
966     .rpc_resp = &res,
967     };
968     - struct page *localpage = NULL;
969     - int ret;
970     + int ret = -ENOMEM, npages, i, acl_len = 0;
971    
972     - if (buflen < PAGE_SIZE) {
973     - /* As long as we're doing a round trip to the server anyway,
974     - * let's be prepared for a page of acl data. */
975     - localpage = alloc_page(GFP_KERNEL);
976     - resp_buf = page_address(localpage);
977     - if (localpage == NULL)
978     - return -ENOMEM;
979     - args.acl_pages[0] = localpage;
980     - args.acl_pgbase = 0;
981     - args.acl_len = PAGE_SIZE;
982     - } else {
983     - resp_buf = buf;
984     - buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
985     + npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
986     + /* As long as we're doing a round trip to the server anyway,
987     + * let's be prepared for a page of acl data. */
988     + if (npages == 0)
989     + npages = 1;
990     +
991     + for (i = 0; i < npages; i++) {
992     + pages[i] = alloc_page(GFP_KERNEL);
993     + if (!pages[i])
994     + goto out_free;
995     + }
996     + if (npages > 1) {
997     + /* for decoding across pages */
998     + args.acl_scratch = alloc_page(GFP_KERNEL);
999     + if (!args.acl_scratch)
1000     + goto out_free;
1001     }
1002     - ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
1003     + args.acl_len = npages * PAGE_SIZE;
1004     + args.acl_pgbase = 0;
1005     + /* Let decode_getfacl know not to fail if the ACL data is larger than
1006     + * the page we send as a guess */
1007     + if (buf == NULL)
1008     + res.acl_flags |= NFS4_ACL_LEN_REQUEST;
1009     + resp_buf = page_address(pages[0]);
1010     +
1011     + dprintk("%s buf %p buflen %ld npages %d args.acl_len %ld\n",
1012     + __func__, buf, buflen, npages, args.acl_len);
1013     + ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
1014     + &msg, &args.seq_args, &res.seq_res, 0);
1015     if (ret)
1016     goto out_free;
1017     - if (res.acl_len > args.acl_len)
1018     - nfs4_write_cached_acl(inode, NULL, res.acl_len);
1019     +
1020     + acl_len = res.acl_len - res.acl_data_offset;
1021     + if (acl_len > args.acl_len)
1022     + nfs4_write_cached_acl(inode, NULL, acl_len);
1023     else
1024     - nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
1025     + nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
1026     + acl_len);
1027     if (buf) {
1028     ret = -ERANGE;
1029     - if (res.acl_len > buflen)
1030     + if (acl_len > buflen)
1031     goto out_free;
1032     - if (localpage)
1033     - memcpy(buf, resp_buf, res.acl_len);
1034     + _copy_from_pages(buf, pages, res.acl_data_offset,
1035     + res.acl_len);
1036     }
1037     - ret = res.acl_len;
1038     + ret = acl_len;
1039     out_free:
1040     - if (localpage)
1041     - __free_page(localpage);
1042     + for (i = 0; i < npages; i++)
1043     + if (pages[i])
1044     + __free_page(pages[i]);
1045     + if (args.acl_scratch)
1046     + __free_page(args.acl_scratch);
1047     return ret;
1048     }
1049    
1050     @@ -3633,6 +3649,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
1051     nfs_zap_acl_cache(inode);
1052     ret = nfs4_read_cached_acl(inode, buf, buflen);
1053     if (ret != -ENOENT)
1054     + /* -ENOENT is returned if there is no ACL or if there is an ACL
1055     + * but no cached acl data, just the acl length */
1056     return ret;
1057     return nfs4_get_acl_uncached(inode, buf, buflen);
1058     }
1059     diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
1060     index 1dce12f..97f987a 100644
1061     --- a/fs/nfs/nfs4xdr.c
1062     +++ b/fs/nfs/nfs4xdr.c
1063     @@ -2517,11 +2517,13 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
1064     encode_compound_hdr(xdr, req, &hdr);
1065     encode_sequence(xdr, &args->seq_args, &hdr);
1066     encode_putfh(xdr, args->fh, &hdr);
1067     - replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
1068     + replen = hdr.replen + op_decode_hdr_maxsz + 1;
1069     encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
1070    
1071     xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
1072     args->acl_pages, args->acl_pgbase, args->acl_len);
1073     + xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE);
1074     +
1075     encode_nops(&hdr);
1076     }
1077    
1078     @@ -4957,17 +4959,18 @@ decode_restorefh(struct xdr_stream *xdr)
1079     }
1080    
1081     static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
1082     - size_t *acl_len)
1083     + struct nfs_getaclres *res)
1084     {
1085     - __be32 *savep;
1086     + __be32 *savep, *bm_p;
1087     uint32_t attrlen,
1088     bitmap[3] = {0};
1089     struct kvec *iov = req->rq_rcv_buf.head;
1090     int status;
1091    
1092     - *acl_len = 0;
1093     + res->acl_len = 0;
1094     if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
1095     goto out;
1096     + bm_p = xdr->p;
1097     if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
1098     goto out;
1099     if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
1100     @@ -4979,18 +4982,30 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
1101     size_t hdrlen;
1102     u32 recvd;
1103    
1104     + /* The bitmap (xdr len + bitmaps) and the attr xdr len words
1105     + * are stored with the acl data to handle the problem of
1106     + * variable length bitmaps.*/
1107     + xdr->p = bm_p;
1108     + res->acl_data_offset = be32_to_cpup(bm_p) + 2;
1109     + res->acl_data_offset <<= 2;
1110     +
1111     /* We ignore &savep and don't do consistency checks on
1112     * the attr length. Let userspace figure it out.... */
1113     hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
1114     + attrlen += res->acl_data_offset;
1115     recvd = req->rq_rcv_buf.len - hdrlen;
1116     if (attrlen > recvd) {
1117     - dprintk("NFS: server cheating in getattr"
1118     - " acl reply: attrlen %u > recvd %u\n",
1119     + if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
1120     + /* getxattr interface called with a NULL buf */
1121     + res->acl_len = attrlen;
1122     + goto out;
1123     + }
1124     + dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
1125     attrlen, recvd);
1126     return -EINVAL;
1127     }
1128     xdr_read_pages(xdr, attrlen);
1129     - *acl_len = attrlen;
1130     + res->acl_len = attrlen;
1131     } else
1132     status = -EOPNOTSUPP;
1133    
1134     @@ -6028,7 +6043,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
1135     status = decode_putfh(xdr);
1136     if (status)
1137     goto out;
1138     - status = decode_getacl(xdr, rqstp, &res->acl_len);
1139     + status = decode_getacl(xdr, rqstp, res);
1140    
1141     out:
1142     return status;
1143     diff --git a/fs/nfs/super.c b/fs/nfs/super.c
1144     index 5b19b6a..c4daf4e 100644
1145     --- a/fs/nfs/super.c
1146     +++ b/fs/nfs/super.c
1147     @@ -904,10 +904,24 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
1148     data->auth_flavor_len = 1;
1149     data->version = version;
1150     data->minorversion = 0;
1151     + security_init_mnt_opts(&data->lsm_opts);
1152     }
1153     return data;
1154     }
1155    
1156     +static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
1157     +{
1158     + if (data) {
1159     + kfree(data->client_address);
1160     + kfree(data->mount_server.hostname);
1161     + kfree(data->nfs_server.export_path);
1162     + kfree(data->nfs_server.hostname);
1163     + kfree(data->fscache_uniq);
1164     + security_free_mnt_opts(&data->lsm_opts);
1165     + kfree(data);
1166     + }
1167     +}
1168     +
1169     /*
1170     * Sanity-check a server address provided by the mount command.
1171     *
1172     @@ -2215,9 +2229,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
1173     data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
1174     mntfh = nfs_alloc_fhandle();
1175     if (data == NULL || mntfh == NULL)
1176     - goto out_free_fh;
1177     -
1178     - security_init_mnt_opts(&data->lsm_opts);
1179     + goto out;
1180    
1181     /* Validate the mount data */
1182     error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
1183     @@ -2229,8 +2241,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
1184     #ifdef CONFIG_NFS_V4
1185     if (data->version == 4) {
1186     mntroot = nfs4_try_mount(flags, dev_name, data);
1187     - kfree(data->client_address);
1188     - kfree(data->nfs_server.export_path);
1189     goto out;
1190     }
1191     #endif /* CONFIG_NFS_V4 */
1192     @@ -2285,13 +2295,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
1193     s->s_flags |= MS_ACTIVE;
1194    
1195     out:
1196     - kfree(data->nfs_server.hostname);
1197     - kfree(data->mount_server.hostname);
1198     - kfree(data->fscache_uniq);
1199     - security_free_mnt_opts(&data->lsm_opts);
1200     -out_free_fh:
1201     + nfs_free_parsed_mount_data(data);
1202     nfs_free_fhandle(mntfh);
1203     - kfree(data);
1204     return mntroot;
1205    
1206     out_err_nosb:
1207     @@ -2618,9 +2623,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
1208    
1209     mntfh = nfs_alloc_fhandle();
1210     if (data == NULL || mntfh == NULL)
1211     - goto out_free_fh;
1212     -
1213     - security_init_mnt_opts(&data->lsm_opts);
1214     + goto out;
1215    
1216     /* Get a volume representation */
1217     server = nfs4_create_server(data, mntfh);
1218     @@ -2672,13 +2675,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
1219    
1220     s->s_flags |= MS_ACTIVE;
1221    
1222     - security_free_mnt_opts(&data->lsm_opts);
1223     nfs_free_fhandle(mntfh);
1224     return mntroot;
1225    
1226     out:
1227     - security_free_mnt_opts(&data->lsm_opts);
1228     -out_free_fh:
1229     nfs_free_fhandle(mntfh);
1230     return ERR_PTR(error);
1231    
1232     @@ -2858,7 +2858,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
1233    
1234     data = nfs_alloc_parsed_mount_data(4);
1235     if (data == NULL)
1236     - goto out_free_data;
1237     + goto out;
1238    
1239     /* Validate the mount data */
1240     error = nfs4_validate_mount_data(raw_data, data, dev_name);
1241     @@ -2872,12 +2872,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
1242     error = PTR_ERR(res);
1243    
1244     out:
1245     - kfree(data->client_address);
1246     - kfree(data->nfs_server.export_path);
1247     - kfree(data->nfs_server.hostname);
1248     - kfree(data->fscache_uniq);
1249     -out_free_data:
1250     - kfree(data);
1251     + nfs_free_parsed_mount_data(data);
1252     dprintk("<-- nfs4_mount() = %d%s\n", error,
1253     error != 0 ? " [error]" : "");
1254     return res;
1255     diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
1256     index 343bd76..b9c1c06 100644
1257     --- a/include/linux/memcontrol.h
1258     +++ b/include/linux/memcontrol.h
1259     @@ -116,6 +116,8 @@ struct zone_reclaim_stat*
1260     mem_cgroup_get_reclaim_stat_from_page(struct page *page);
1261     extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
1262     struct task_struct *p);
1263     +extern void mem_cgroup_replace_page_cache(struct page *oldpage,
1264     + struct page *newpage);
1265    
1266     #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1267     extern int do_swap_account;
1268     @@ -361,6 +363,10 @@ static inline
1269     void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
1270     {
1271     }
1272     +static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
1273     + struct page *newpage)
1274     +{
1275     +}
1276     #endif /* CONFIG_CGROUP_MEM_CONT */
1277    
1278     #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
1279     diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
1280     index 8475f34..5c0e10d 100644
1281     --- a/include/linux/nfs_xdr.h
1282     +++ b/include/linux/nfs_xdr.h
1283     @@ -602,11 +602,16 @@ struct nfs_getaclargs {
1284     size_t acl_len;
1285     unsigned int acl_pgbase;
1286     struct page ** acl_pages;
1287     + struct page * acl_scratch;
1288     struct nfs4_sequence_args seq_args;
1289     };
1290    
1291     +/* getxattr ACL interface flags */
1292     +#define NFS4_ACL_LEN_REQUEST 0x0001 /* zero length getxattr buffer */
1293     struct nfs_getaclres {
1294     size_t acl_len;
1295     + size_t acl_data_offset;
1296     + int acl_flags;
1297     struct nfs4_sequence_res seq_res;
1298     };
1299    
1300     diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
1301     index e884096..dad7d9a 100644
1302     --- a/include/linux/pci_regs.h
1303     +++ b/include/linux/pci_regs.h
1304     @@ -392,7 +392,7 @@
1305     #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
1306     #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
1307     #define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
1308     -#define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */
1309     +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
1310     #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
1311     #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
1312     #define PCI_EXP_DEVCAP 4 /* Device capabilities */
1313     diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
1314     index a20970e..af70af3 100644
1315     --- a/include/linux/sunrpc/xdr.h
1316     +++ b/include/linux/sunrpc/xdr.h
1317     @@ -191,6 +191,8 @@ extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1318     struct xdr_array2_desc *desc);
1319     extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1320     struct xdr_array2_desc *desc);
1321     +extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
1322     + size_t len);
1323    
1324     /*
1325     * Provide some simple tools for XDR buffer overflow-checking etc.
1326     diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
1327     index 99fcffb..454ee26 100644
1328     --- a/include/xen/interface/io/xs_wire.h
1329     +++ b/include/xen/interface/io/xs_wire.h
1330     @@ -84,4 +84,7 @@ struct xenstore_domain_interface {
1331     XENSTORE_RING_IDX rsp_cons, rsp_prod;
1332     };
1333    
1334     +/* Violating this is very bad. See docs/misc/xenstore.txt. */
1335     +#define XENSTORE_PAYLOAD_MAX 4096
1336     +
1337     #endif /* _XS_WIRE_H */
1338     diff --git a/init/do_mounts.c b/init/do_mounts.c
1339     index c0851a8..ef6478f 100644
1340     --- a/init/do_mounts.c
1341     +++ b/init/do_mounts.c
1342     @@ -360,15 +360,42 @@ out:
1343     }
1344    
1345     #ifdef CONFIG_ROOT_NFS
1346     +
1347     +#define NFSROOT_TIMEOUT_MIN 5
1348     +#define NFSROOT_TIMEOUT_MAX 30
1349     +#define NFSROOT_RETRY_MAX 5
1350     +
1351     static int __init mount_nfs_root(void)
1352     {
1353     char *root_dev, *root_data;
1354     + unsigned int timeout;
1355     + int try, err;
1356    
1357     - if (nfs_root_data(&root_dev, &root_data) != 0)
1358     - return 0;
1359     - if (do_mount_root(root_dev, "nfs", root_mountflags, root_data) != 0)
1360     + err = nfs_root_data(&root_dev, &root_data);
1361     + if (err != 0)
1362     return 0;
1363     - return 1;
1364     +
1365     + /*
1366     + * The server or network may not be ready, so try several
1367     + * times. Stop after a few tries in case the client wants
1368     + * to fall back to other boot methods.
1369     + */
1370     + timeout = NFSROOT_TIMEOUT_MIN;
1371     + for (try = 1; ; try++) {
1372     + err = do_mount_root(root_dev, "nfs",
1373     + root_mountflags, root_data);
1374     + if (err == 0)
1375     + return 1;
1376     + if (try > NFSROOT_RETRY_MAX)
1377     + break;
1378     +
1379     + /* Wait, in case the server refused us immediately */
1380     + ssleep(timeout);
1381     + timeout <<= 1;
1382     + if (timeout > NFSROOT_TIMEOUT_MAX)
1383     + timeout = NFSROOT_TIMEOUT_MAX;
1384     + }
1385     + return 0;
1386     }
1387     #endif
1388    
1389     diff --git a/mm/filemap.c b/mm/filemap.c
1390     index b91f3aa..0eedbf8 100644
1391     --- a/mm/filemap.c
1392     +++ b/mm/filemap.c
1393     @@ -393,24 +393,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range);
1394     int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
1395     {
1396     int error;
1397     - struct mem_cgroup *memcg = NULL;
1398    
1399     VM_BUG_ON(!PageLocked(old));
1400     VM_BUG_ON(!PageLocked(new));
1401     VM_BUG_ON(new->mapping);
1402    
1403     - /*
1404     - * This is not page migration, but prepare_migration and
1405     - * end_migration does enough work for charge replacement.
1406     - *
1407     - * In the longer term we probably want a specialized function
1408     - * for moving the charge from old to new in a more efficient
1409     - * manner.
1410     - */
1411     - error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
1412     - if (error)
1413     - return error;
1414     -
1415     error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
1416     if (!error) {
1417     struct address_space *mapping = old->mapping;
1418     @@ -432,13 +419,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
1419     if (PageSwapBacked(new))
1420     __inc_zone_page_state(new, NR_SHMEM);
1421     spin_unlock_irq(&mapping->tree_lock);
1422     + /* mem_cgroup codes must not be called under tree_lock */
1423     + mem_cgroup_replace_page_cache(old, new);
1424     radix_tree_preload_end();
1425     if (freepage)
1426     freepage(old);
1427     page_cache_release(old);
1428     - mem_cgroup_end_migration(memcg, old, new, true);
1429     - } else {
1430     - mem_cgroup_end_migration(memcg, old, new, false);
1431     }
1432    
1433     return error;
1434     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
1435     index afde618..dd81ddc 100644
1436     --- a/mm/memcontrol.c
1437     +++ b/mm/memcontrol.c
1438     @@ -3355,6 +3355,50 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
1439     cgroup_release_and_wakeup_rmdir(&mem->css);
1440     }
1441    
1442     +/*
1443     + * At replace page cache, newpage is not under any memcg but it's on
1444     + * LRU. So, this function doesn't touch res_counter but handles LRU
1445     + * in correct way. Both pages are locked so we cannot race with uncharge.
1446     + */
1447     +void mem_cgroup_replace_page_cache(struct page *oldpage,
1448     + struct page *newpage)
1449     +{
1450     + struct mem_cgroup *memcg;
1451     + struct page_cgroup *pc;
1452     + struct zone *zone;
1453     + enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
1454     + unsigned long flags;
1455     +
1456     + if (mem_cgroup_disabled())
1457     + return;
1458     +
1459     + pc = lookup_page_cgroup(oldpage);
1460     + /* fix accounting on old pages */
1461     + lock_page_cgroup(pc);
1462     + memcg = pc->mem_cgroup;
1463     + mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
1464     + ClearPageCgroupUsed(pc);
1465     + unlock_page_cgroup(pc);
1466     +
1467     + if (PageSwapBacked(oldpage))
1468     + type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1469     +
1470     + zone = page_zone(newpage);
1471     + pc = lookup_page_cgroup(newpage);
1472     + /*
1473     + * Even if newpage->mapping was NULL before starting replacement,
1474     + * the newpage may be on LRU(or pagevec for LRU) already. We lock
1475     + * LRU while we overwrite pc->mem_cgroup.
1476     + */
1477     + spin_lock_irqsave(&zone->lru_lock, flags);
1478     + if (PageLRU(newpage))
1479     + del_page_from_lru_list(zone, newpage, page_lru(newpage));
1480     + __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
1481     + if (PageLRU(newpage))
1482     + add_page_to_lru_list(zone, newpage, page_lru(newpage));
1483     + spin_unlock_irqrestore(&zone->lru_lock, flags);
1484     +}
1485     +
1486     #ifdef CONFIG_DEBUG_VM
1487     static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
1488     {
1489     diff --git a/mm/slub.c b/mm/slub.c
1490     index 7c54fe8..f73234d 100644
1491     --- a/mm/slub.c
1492     +++ b/mm/slub.c
1493     @@ -2077,6 +2077,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1494     goto new_slab;
1495     }
1496    
1497     + /* must check again c->freelist in case of cpu migration or IRQ */
1498     + object = c->freelist;
1499     + if (object)
1500     + goto load_freelist;
1501     +
1502     stat(s, ALLOC_SLOWPATH);
1503    
1504     do {
1505     diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
1506     index 7bc8702..ea70837 100644
1507     --- a/net/mac80211/wpa.c
1508     +++ b/net/mac80211/wpa.c
1509     @@ -105,7 +105,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
1510     if (status->flag & RX_FLAG_MMIC_ERROR)
1511     goto mic_fail;
1512    
1513     - if (!(status->flag & RX_FLAG_IV_STRIPPED))
1514     + if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
1515     goto update_iv;
1516    
1517     return RX_CONTINUE;
1518     diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
1519     index 277ebd4..593f4c6 100644
1520     --- a/net/sunrpc/xdr.c
1521     +++ b/net/sunrpc/xdr.c
1522     @@ -296,7 +296,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
1523     * Copies data into an arbitrary memory location from an array of pages
1524     * The copy is assumed to be non-overlapping.
1525     */
1526     -static void
1527     +void
1528     _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
1529     {
1530     struct page **pgfrom;
1531     @@ -324,6 +324,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
1532    
1533     } while ((len -= copy) != 0);
1534     }
1535     +EXPORT_SYMBOL_GPL(_copy_from_pages);
1536    
1537     /*
1538     * xdr_shrink_bufhead
1539     diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
1540     index da36d2c..5335605 100644
1541     --- a/security/integrity/ima/ima_api.c
1542     +++ b/security/integrity/ima/ima_api.c
1543     @@ -177,8 +177,8 @@ void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
1544     strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
1545    
1546     result = ima_store_template(entry, violation, inode);
1547     - if (!result)
1548     + if (!result || result == -EEXIST)
1549     iint->flags |= IMA_MEASURED;
1550     - else
1551     + if (result < 0)
1552     kfree(entry);
1553     }
1554     diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
1555     index 8e28f04..55a6271 100644
1556     --- a/security/integrity/ima/ima_queue.c
1557     +++ b/security/integrity/ima/ima_queue.c
1558     @@ -23,6 +23,8 @@
1559     #include <linux/slab.h>
1560     #include "ima.h"
1561    
1562     +#define AUDIT_CAUSE_LEN_MAX 32
1563     +
1564     LIST_HEAD(ima_measurements); /* list of all measurements */
1565    
1566     /* key: inode (before secure-hashing a file) */
1567     @@ -94,7 +96,8 @@ static int ima_pcr_extend(const u8 *hash)
1568    
1569     result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
1570     if (result != 0)
1571     - pr_err("IMA: Error Communicating to TPM chip\n");
1572     + pr_err("IMA: Error Communicating to TPM chip, result: %d\n",
1573     + result);
1574     return result;
1575     }
1576    
1577     @@ -106,14 +109,16 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
1578     {
1579     u8 digest[IMA_DIGEST_SIZE];
1580     const char *audit_cause = "hash_added";
1581     + char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
1582     int audit_info = 1;
1583     - int result = 0;
1584     + int result = 0, tpmresult = 0;
1585    
1586     mutex_lock(&ima_extend_list_mutex);
1587     if (!violation) {
1588     memcpy(digest, entry->digest, sizeof digest);
1589     if (ima_lookup_digest_entry(digest)) {
1590     audit_cause = "hash_exists";
1591     + result = -EEXIST;
1592     goto out;
1593     }
1594     }
1595     @@ -128,9 +133,11 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
1596     if (violation) /* invalidate pcr */
1597     memset(digest, 0xff, sizeof digest);
1598    
1599     - result = ima_pcr_extend(digest);
1600     - if (result != 0) {
1601     - audit_cause = "TPM error";
1602     + tpmresult = ima_pcr_extend(digest);
1603     + if (tpmresult != 0) {
1604     + snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
1605     + tpmresult);
1606     + audit_cause = tpm_audit_cause;
1607     audit_info = 0;
1608     }
1609     out:
1610     diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
1611     index 2e7ac31..f52ebe8 100644
1612     --- a/sound/pci/hda/hda_local.h
1613     +++ b/sound/pci/hda/hda_local.h
1614     @@ -476,7 +476,12 @@ static inline u32 get_wcaps(struct hda_codec *codec, hda_nid_t nid)
1615     }
1616    
1617     /* get the widget type from widget capability bits */
1618     -#define get_wcaps_type(wcaps) (((wcaps) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT)
1619     +static inline int get_wcaps_type(unsigned int wcaps)
1620     +{
1621     + if (!wcaps)
1622     + return -1; /* invalid type */
1623     + return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
1624     +}
1625    
1626     static inline unsigned int get_wcaps_channels(u32 wcaps)
1627     {
1628     diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
1629     index 2be57b0..6936c37 100644
1630     --- a/sound/pci/hda/hda_proc.c
1631     +++ b/sound/pci/hda/hda_proc.c
1632     @@ -54,6 +54,8 @@ static const char *get_wid_type_name(unsigned int wid_value)
1633     [AC_WID_BEEP] = "Beep Generator Widget",
1634     [AC_WID_VENDOR] = "Vendor Defined Widget",
1635     };
1636     + if (wid_value == -1)
1637     + return "UNKNOWN Widget";
1638     wid_value &= 0xf;
1639     if (names[wid_value])
1640     return names[wid_value];
1641     diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
1642     index 4346ad2..3546d38 100644
1643     --- a/sound/pci/hda/patch_cirrus.c
1644     +++ b/sound/pci/hda/patch_cirrus.c
1645     @@ -916,16 +916,14 @@ static void cs_automute(struct hda_codec *codec)
1646    
1647     /* mute speakers if spdif or hp jack is plugged in */
1648     for (i = 0; i < cfg->speaker_outs; i++) {
1649     + int pin_ctl = hp_present ? 0 : PIN_OUT;
1650     + /* detect on spdif is specific to CS421x */
1651     + if (spdif_present && (spec->vendor_nid == CS421X_VENDOR_NID))
1652     + pin_ctl = 0;
1653     +
1654     nid = cfg->speaker_pins[i];
1655     snd_hda_codec_write(codec, nid, 0,
1656     - AC_VERB_SET_PIN_WIDGET_CONTROL,
1657     - hp_present ? 0 : PIN_OUT);
1658     - /* detect on spdif is specific to CS421x */
1659     - if (spec->vendor_nid == CS421X_VENDOR_NID) {
1660     - snd_hda_codec_write(codec, nid, 0,
1661     - AC_VERB_SET_PIN_WIDGET_CONTROL,
1662     - spdif_present ? 0 : PIN_OUT);
1663     - }
1664     + AC_VERB_SET_PIN_WIDGET_CONTROL, pin_ctl);
1665     }
1666     if (spec->board_config == CS420X_MBP53 ||
1667     spec->board_config == CS420X_MBP55 ||
1668     @@ -1756,30 +1754,19 @@ static int build_cs421x_output(struct hda_codec *codec)
1669     struct auto_pin_cfg *cfg = &spec->autocfg;
1670     struct snd_kcontrol *kctl;
1671     int err;
1672     - char *name = "HP/Speakers";
1673     + char *name = "Master";
1674    
1675     fix_volume_caps(codec, dac);
1676     - if (!spec->vmaster_sw) {
1677     - err = add_vmaster(codec, dac);
1678     - if (err < 0)
1679     - return err;
1680     - }
1681    
1682     err = add_mute(codec, name, 0,
1683     HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
1684     if (err < 0)
1685     return err;
1686     - err = snd_ctl_add_slave(spec->vmaster_sw, kctl);
1687     - if (err < 0)
1688     - return err;
1689    
1690     err = add_volume(codec, name, 0,
1691     HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
1692     if (err < 0)
1693     return err;
1694     - err = snd_ctl_add_slave(spec->vmaster_vol, kctl);
1695     - if (err < 0)
1696     - return err;
1697    
1698     if (cfg->speaker_outs) {
1699     err = snd_hda_ctl_add(codec, 0,
1700     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
1701     index 41fecc1..85fe10d 100644
1702     --- a/sound/pci/hda/patch_conexant.c
1703     +++ b/sound/pci/hda/patch_conexant.c
1704     @@ -1119,8 +1119,6 @@ static const char * const cxt5045_models[CXT5045_MODELS] = {
1705    
1706     static const struct snd_pci_quirk cxt5045_cfg_tbl[] = {
1707     SND_PCI_QUIRK(0x103c, 0x30d5, "HP 530", CXT5045_LAPTOP_HP530),
1708     - SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3000, "HP DV Series",
1709     - CXT5045_LAPTOP_HPSENSE),
1710     SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba P105", CXT5045_LAPTOP_MICSENSE),
1711     SND_PCI_QUIRK(0x152d, 0x0753, "Benq R55E", CXT5045_BENQ),
1712     SND_PCI_QUIRK(0x1734, 0x10ad, "Fujitsu Si1520", CXT5045_LAPTOP_MICSENSE),
1713     diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
1714     index 5261fd8..fc3c903 100644
1715     --- a/sound/pci/hda/patch_sigmatel.c
1716     +++ b/sound/pci/hda/patch_sigmatel.c
1717     @@ -4306,6 +4306,27 @@ static void stac_store_hints(struct hda_codec *codec)
1718     }
1719     }
1720    
1721     +static void stac_issue_unsol_events(struct hda_codec *codec, int num_pins,
1722     + const hda_nid_t *pins)
1723     +{
1724     + while (num_pins--)
1725     + stac_issue_unsol_event(codec, *pins++);
1726     +}
1727     +
1728     +/* fake event to set up pins */
1729     +static void stac_fake_hp_events(struct hda_codec *codec)
1730     +{
1731     + struct sigmatel_spec *spec = codec->spec;
1732     +
1733     + if (spec->autocfg.hp_outs)
1734     + stac_issue_unsol_events(codec, spec->autocfg.hp_outs,
1735     + spec->autocfg.hp_pins);
1736     + if (spec->autocfg.line_outs &&
1737     + spec->autocfg.line_out_pins[0] != spec->autocfg.hp_pins[0])
1738     + stac_issue_unsol_events(codec, spec->autocfg.line_outs,
1739     + spec->autocfg.line_out_pins);
1740     +}
1741     +
1742     static int stac92xx_init(struct hda_codec *codec)
1743     {
1744     struct sigmatel_spec *spec = codec->spec;
1745     @@ -4356,10 +4377,7 @@ static int stac92xx_init(struct hda_codec *codec)
1746     stac92xx_auto_set_pinctl(codec, spec->autocfg.line_out_pins[0],
1747     AC_PINCTL_OUT_EN);
1748     /* fake event to set up pins */
1749     - if (cfg->hp_pins[0])
1750     - stac_issue_unsol_event(codec, cfg->hp_pins[0]);
1751     - else if (cfg->line_out_pins[0])
1752     - stac_issue_unsol_event(codec, cfg->line_out_pins[0]);
1753     + stac_fake_hp_events(codec);
1754     } else {
1755     stac92xx_auto_init_multi_out(codec);
1756     stac92xx_auto_init_hp_out(codec);
1757     @@ -5000,19 +5018,11 @@ static void stac927x_proc_hook(struct snd_info_buffer *buffer,
1758     #ifdef CONFIG_PM
1759     static int stac92xx_resume(struct hda_codec *codec)
1760     {
1761     - struct sigmatel_spec *spec = codec->spec;
1762     -
1763     stac92xx_init(codec);
1764     snd_hda_codec_resume_amp(codec);
1765     snd_hda_codec_resume_cache(codec);
1766     /* fake event to set up pins again to override cached values */
1767     - if (spec->hp_detect) {
1768     - if (spec->autocfg.hp_pins[0])
1769     - stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0]);
1770     - else if (spec->autocfg.line_out_pins[0])
1771     - stac_issue_unsol_event(codec,
1772     - spec->autocfg.line_out_pins[0]);
1773     - }
1774     + stac_fake_hp_events(codec);
1775     return 0;
1776     }
1777    
1778     diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
1779     index d636d93..c3babe3 100644
1780     --- a/sound/pci/hda/patch_via.c
1781     +++ b/sound/pci/hda/patch_via.c
1782     @@ -2187,7 +2187,10 @@ static int via_auto_create_loopback_switch(struct hda_codec *codec)
1783     {
1784     struct via_spec *spec = codec->spec;
1785    
1786     - if (!spec->aa_mix_nid || !spec->out_mix_path.depth)
1787     + if (!spec->aa_mix_nid)
1788     + return 0; /* no loopback switching available */
1789     + if (!(spec->out_mix_path.depth || spec->hp_mix_path.depth ||
1790     + spec->speaker_path.depth))
1791     return 0; /* no loopback switching available */
1792     if (!via_clone_control(spec, &via_aamix_ctl_enum))
1793     return -ENOMEM;
1794     diff --git a/sound/pci/ice1712/amp.c b/sound/pci/ice1712/amp.c
1795     index e328cfb..e525da2 100644
1796     --- a/sound/pci/ice1712/amp.c
1797     +++ b/sound/pci/ice1712/amp.c
1798     @@ -68,8 +68,11 @@ static int __devinit snd_vt1724_amp_init(struct snd_ice1712 *ice)
1799    
1800     static int __devinit snd_vt1724_amp_add_controls(struct snd_ice1712 *ice)
1801     {
1802     - /* we use pins 39 and 41 of the VT1616 for left and right read outputs */
1803     - snd_ac97_write_cache(ice->ac97, 0x5a, snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
1804     + if (ice->ac97)
1805     + /* we use pins 39 and 41 of the VT1616 for left and right
1806     + read outputs */
1807     + snd_ac97_write_cache(ice->ac97, 0x5a,
1808     + snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
1809     return 0;
1810     }
1811    
1812     diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
1813     index c400ade..1e7a47a 100644
1814     --- a/sound/usb/usx2y/usb_stream.c
1815     +++ b/sound/usb/usx2y/usb_stream.c
1816     @@ -674,7 +674,7 @@ dotry:
1817     inurb->transfer_buffer_length =
1818     inurb->number_of_packets *
1819     inurb->iso_frame_desc[0].length;
1820     - preempt_disable();
1821     +
1822     if (u == 0) {
1823     int now;
1824     struct usb_device *dev = inurb->dev;
1825     @@ -686,19 +686,17 @@ dotry:
1826     }
1827     err = usb_submit_urb(inurb, GFP_ATOMIC);
1828     if (err < 0) {
1829     - preempt_enable();
1830     snd_printk(KERN_ERR"usb_submit_urb(sk->inurb[%i])"
1831     " returned %i\n", u, err);
1832     return err;
1833     }
1834     err = usb_submit_urb(outurb, GFP_ATOMIC);
1835     if (err < 0) {
1836     - preempt_enable();
1837     snd_printk(KERN_ERR"usb_submit_urb(sk->outurb[%i])"
1838     " returned %i\n", u, err);
1839     return err;
1840     }
1841     - preempt_enable();
1842     +
1843     if (inurb->start_frame != outurb->start_frame) {
1844     snd_printd(KERN_DEBUG
1845     "u[%i] start_frames differ in:%u out:%u\n",
1846     diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
1847     index 4e9eaeb..af79102 100644
1848     --- a/virt/kvm/assigned-dev.c
1849     +++ b/virt/kvm/assigned-dev.c
1850     @@ -17,6 +17,8 @@
1851     #include <linux/pci.h>
1852     #include <linux/interrupt.h>
1853     #include <linux/slab.h>
1854     +#include <linux/namei.h>
1855     +#include <linux/fs.h>
1856     #include "irq.h"
1857    
1858     static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
1859     @@ -474,12 +476,76 @@ out:
1860     return r;
1861     }
1862    
1863     +/*
1864     + * We want to test whether the caller has been granted permissions to
1865     + * use this device. To be able to configure and control the device,
1866     + * the user needs access to PCI configuration space and BAR resources.
1867     + * These are accessed through PCI sysfs. PCI config space is often
1868     + * passed to the process calling this ioctl via file descriptor, so we
1869     + * can't rely on access to that file. We can check for permissions
1870     + * on each of the BAR resource files, which is a pretty clear
1871     + * indicator that the user has been granted access to the device.
1872     + */
1873     +static int probe_sysfs_permissions(struct pci_dev *dev)
1874     +{
1875     +#ifdef CONFIG_SYSFS
1876     + int i;
1877     + bool bar_found = false;
1878     +
1879     + for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
1880     + char *kpath, *syspath;
1881     + struct path path;
1882     + struct inode *inode;
1883     + int r;
1884     +
1885     + if (!pci_resource_len(dev, i))
1886     + continue;
1887     +
1888     + kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
1889     + if (!kpath)
1890     + return -ENOMEM;
1891     +
1892     + /* Per sysfs-rules, sysfs is always at /sys */
1893     + syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
1894     + kfree(kpath);
1895     + if (!syspath)
1896     + return -ENOMEM;
1897     +
1898     + r = kern_path(syspath, LOOKUP_FOLLOW, &path);
1899     + kfree(syspath);
1900     + if (r)
1901     + return r;
1902     +
1903     + inode = path.dentry->d_inode;
1904     +
1905     + r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
1906     + path_put(&path);
1907     + if (r)
1908     + return r;
1909     +
1910     + bar_found = true;
1911     + }
1912     +
1913     + /* If no resources, probably something special */
1914     + if (!bar_found)
1915     + return -EPERM;
1916     +
1917     + return 0;
1918     +#else
1919     + return -EINVAL; /* No way to control the device without sysfs */
1920     +#endif
1921     +}
1922     +
1923     static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
1924     struct kvm_assigned_pci_dev *assigned_dev)
1925     {
1926     int r = 0, idx;
1927     struct kvm_assigned_dev_kernel *match;
1928     struct pci_dev *dev;
1929     + u8 header_type;
1930     +
1931     + if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
1932     + return -EINVAL;
1933    
1934     mutex_lock(&kvm->lock);
1935     idx = srcu_read_lock(&kvm->srcu);
1936     @@ -507,6 +573,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
1937     r = -EINVAL;
1938     goto out_free;
1939     }
1940     +
1941     + /* Don't allow bridges to be assigned */
1942     + pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
1943     + if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
1944     + r = -EPERM;
1945     + goto out_put;
1946     + }
1947     +
1948     + r = probe_sysfs_permissions(dev);
1949     + if (r)
1950     + goto out_put;
1951     +
1952     if (pci_enable_device(dev)) {
1953     printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
1954     r = -EBUSY;
1955     @@ -538,16 +616,14 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
1956    
1957     list_add(&match->list, &kvm->arch.assigned_dev_head);
1958    
1959     - if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
1960     - if (!kvm->arch.iommu_domain) {
1961     - r = kvm_iommu_map_guest(kvm);
1962     - if (r)
1963     - goto out_list_del;
1964     - }
1965     - r = kvm_assign_device(kvm, match);
1966     + if (!kvm->arch.iommu_domain) {
1967     + r = kvm_iommu_map_guest(kvm);
1968     if (r)
1969     goto out_list_del;
1970     }
1971     + r = kvm_assign_device(kvm, match);
1972     + if (r)
1973     + goto out_list_del;
1974    
1975     out:
1976     srcu_read_unlock(&kvm->srcu, idx);
1977     @@ -587,8 +663,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
1978     goto out;
1979     }
1980    
1981     - if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
1982     - kvm_deassign_device(kvm, match);
1983     + kvm_deassign_device(kvm, match);
1984    
1985     kvm_free_assigned_device(kvm, match);
1986