diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index b0e4b9c..13ab837 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -1131,6 +1131,13 @@ following flags are specified: /* Depends on KVM_CAP_IOMMU */ #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) +The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure +isolation of the device. Usages not specifying this flag are deprecated. + +Only PCI header type 0 devices with PCI BAR resources are supported by +device assignment. The user requesting this ioctl must have read/write +access to the PCI sysfs resource files associated with the device. + 4.49 KVM_DEASSIGN_PCI_DEVICE Capability: KVM_CAP_DEVICE_DEASSIGNMENT diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 67f87f2..78a1eff 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_AMD_NB_H #define _ASM_X86_AMD_NB_H +#include #include struct amd_nb_bus_dev_range { @@ -13,6 +14,7 @@ extern const struct pci_device_id amd_nb_misc_ids[]; extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[]; extern bool early_is_amd_nb(u32 value); +extern struct resource *amd_get_mmconfig_range(struct resource *res); extern int amd_cache_northbridges(void); extern void amd_flush_garts(void); extern int amd_numa_init(void); diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 4c39baa..bae1efe 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -119,6 +119,37 @@ bool __init early_is_amd_nb(u32 device) return false; } +struct resource *amd_get_mmconfig_range(struct resource *res) +{ + u32 address; + u64 base, msr; + unsigned segn_busn_bits; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return NULL; + + /* assume all cpus from fam10h have mmconfig */ + if (boot_cpu_data.x86 < 0x10) + return NULL; + + address = MSR_FAM10H_MMIO_CONF_BASE; + rdmsrl(address, msr); + + /* mmconfig is not enabled */ + if (!(msr & FAM10H_MMIO_CONF_ENABLE)) + return NULL; + + base = msr & (FAM10H_MMIO_CONF_BASE_MASK<> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & + FAM10H_MMIO_CONF_BUSRANGE_MASK; + + res->flags = IORESOURCE_MEM; + res->start = base; + res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; + return res; +} + int amd_get_subcaches(int cpu) { struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index c1a0188..44842d7 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -74,9 +74,10 @@ static cycle_t kvm_clock_read(void) struct pvclock_vcpu_time_info *src; cycle_t ret; - src = &get_cpu_var(hv_clock); + preempt_disable_notrace(); + src = &__get_cpu_var(hv_clock); ret = pvclock_clocksource_read(src); - put_cpu_var(hv_clock); + preempt_enable_notrace(); return ret; } diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index efad723..43e04d1 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -338,11 +338,15 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) return HRTIMER_NORESTART; } -static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) +static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) { + struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; struct kvm_timer *pt = &ps->pit_timer; s64 interval; + if (!irqchip_in_kernel(kvm)) + return; + interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); pr_debug("create pit timer, interval is %llu nsec\n", interval); @@ -394,13 +398,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) /* FIXME: enhance mode 4 precision */ case 4: if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) { - create_pit_timer(ps, val, 0); + create_pit_timer(kvm, val, 0); } break; case 2: case 3: if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){ - create_pit_timer(ps, val, 1); + create_pit_timer(kvm, val, 1); } break; default: diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 1dab519..f927429 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -87,9 +87,9 @@ static unsigned long mmap_rnd(void) */ if (current->flags & PF_RANDOMIZE) { if (mmap_is_ia32()) - rnd = (long)get_random_int() % (1<<8); + rnd = get_random_int() % (1<<8); else - rnd = (long)(get_random_int() % (1<<28)); + rnd = get_random_int() % (1<<28); } return rnd << PAGE_SHIFT; } diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile index 6b8759f..d24d3da 100644 --- a/arch/x86/pci/Makefile +++ b/arch/x86/pci/Makefile @@ -18,8 +18,9 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o obj-$(CONFIG_X86_MRST) += mrst.o obj-y += common.o early.o -obj-y += amd_bus.o bus_numa.o +obj-y += bus_numa.o +obj-$(CONFIG_AMD_NB) += amd_bus.o obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o ifeq ($(CONFIG_PCI_DEBUG),y) diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 404f21a..f8348ab 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -149,7 +149,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data) struct acpi_resource_address64 addr; acpi_status status; unsigned long flags; - u64 start, end; + u64 start, orig_end, end; status = resource_to_addr(acpi_res, &addr); if (!ACPI_SUCCESS(status)) @@ -165,7 +165,21 @@ setup_resource(struct acpi_resource *acpi_res, void *data) return AE_OK; start = addr.minimum + addr.translation_offset; - end = addr.maximum + addr.translation_offset; + orig_end = end = addr.maximum + addr.translation_offset; + + /* Exclude non-addressable range or non-addressable portion of range */ + end = min(end, (u64)iomem_resource.end); + if (end <= start) { + dev_info(&info->bridge->dev, + "host bridge window [%#llx-%#llx] " + "(ignored, not CPU addressable)\n", start, orig_end); + return AE_OK; + } else if (orig_end != end) { + dev_info(&info->bridge->dev, + "host bridge window [%#llx-%#llx] " + "([%#llx-%#llx] ignored, not CPU addressable)\n", + start, orig_end, end + 1, orig_end); + } res = &info->res[info->res_num]; res->name = info->name; diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index 026e493..385a940 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c @@ -30,34 +30,6 @@ static struct pci_hostbridge_probe pci_probes[] __initdata = { { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 }, }; -static u64 __initdata fam10h_mmconf_start; -static u64 __initdata fam10h_mmconf_end; -static void __init get_pci_mmcfg_amd_fam10h_range(void) -{ - u32 address; - u64 base, msr; - unsigned segn_busn_bits; - - /* assume all cpus from fam10h have mmconf */ - if (boot_cpu_data.x86 < 0x10) - return; - - address = MSR_FAM10H_MMIO_CONF_BASE; - rdmsrl(address, msr); - - /* mmconfig is not enable */ - if (!(msr & FAM10H_MMIO_CONF_ENABLE)) - return; - - base = msr & (FAM10H_MMIO_CONF_BASE_MASK<> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & - FAM10H_MMIO_CONF_BUSRANGE_MASK; - - fam10h_mmconf_start = base; - fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1; -} - #define RANGE_NUM 16 /** @@ -85,6 +57,9 @@ static int __init early_fill_mp_bus_info(void) u64 val; u32 address; bool found; + struct resource fam10h_mmconf_res, *fam10h_mmconf; + u64 fam10h_mmconf_start; + u64 fam10h_mmconf_end; if (!early_pci_allowed()) return -1; @@ -211,12 +186,17 @@ static int __init early_fill_mp_bus_info(void) subtract_range(range, RANGE_NUM, 0, end); /* get mmconfig */ - get_pci_mmcfg_amd_fam10h_range(); + fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res); /* need to take out mmconf range */ - if (fam10h_mmconf_end) { - printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end); + if (fam10h_mmconf) { + printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf); + fam10h_mmconf_start = fam10h_mmconf->start; + fam10h_mmconf_end = fam10h_mmconf->end; subtract_range(range, RANGE_NUM, fam10h_mmconf_start, fam10h_mmconf_end + 1); + } else { + fam10h_mmconf_start = 0; + fam10h_mmconf_end = 0; } /* mmio resource */ diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index b94d871..7642495 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -2069,6 +2069,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev) void r100_bm_disable(struct radeon_device *rdev) { u32 tmp; + u16 tmp16; /* disable bus mastering */ tmp = RREG32(R_000030_BUS_CNTL); @@ -2079,8 +2080,8 @@ void r100_bm_disable(struct radeon_device *rdev) WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); tmp = RREG32(RADEON_BUS_CNTL); mdelay(1); - pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); - pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); + pci_read_config_word(rdev->pdev, 0x4, &tmp16); + pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB); mdelay(1); } diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index f5ac7e7..c45d921 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe( frame[0xD] = (right_bar >> 8); r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame); + /* Our header values (type, version, length) should be alright, Intel + * is using the same. Checksum function also seems to be OK, it works + * fine for audio infoframe. However calculated value is always lower + * by 2 in comparison to fglrx. It breaks displaying anything in case + * of TVs that strictly check the checksum. Hack it manually here to + * workaround this issue. */ + frame[0x0] += 2; WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0, frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b51e157..50d105a 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -224,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev) if (radeon_no_wb == 1) rdev->wb.enabled = false; else { - /* often unreliable on AGP */ if (rdev->flags & RADEON_IS_AGP) { + /* often unreliable on AGP */ + rdev->wb.enabled = false; + } else if (rdev->family < CHIP_R300) { + /* often unreliable on pre-r300 */ rdev->wb.enabled = false; } else { rdev->wb.enabled = true; diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 0e89a9b..d9b0bc4 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -324,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev) void rs600_bm_disable(struct radeon_device *rdev) { - u32 tmp; + u16 tmp; /* disable bus mastering */ - pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); + pci_read_config_word(rdev->pdev, 0x4, &tmp); pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); mdelay(1); } diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index f26ae31..e9c8f80 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -361,7 +361,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) case HID_GLOBAL_ITEM_TAG_REPORT_SIZE: parser->global.report_size = item_udata(item); - if (parser->global.report_size > 32) { + if (parser->global.report_size > 96) { dbg_hid("invalid report_size %d\n", parser->global.report_size); return -1; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d9587df..606fc04 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -508,8 +508,17 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors) if (test_bit(WriteMostly, &rdev->flags)) { /* Don't balance among write-mostly, just * use the first as a last resort */ - if (best_disk < 0) + if (best_disk < 0) { + if (is_badblock(rdev, this_sector, sectors, + &first_bad, &bad_sectors)) { + if (first_bad < this_sector) + /* Cannot use this */ + continue; + best_good_sectors = first_bad - this_sector; + } else + best_good_sectors = sectors; best_disk = disk; + } continue; } /* This is a reasonable device to use. It might diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index ca38569..bff8d46 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) mutex_lock(&dev->lock); - if (dev->open++) + if (dev->open) goto unlock; kref_get(&dev->ref); @@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) goto error_release; unlock: + dev->open++; mutex_unlock(&dev->lock); blktrans_dev_put(dev); return ret; diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index e3e40f4..43130e8 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -253,6 +253,9 @@ static void find_next_position(struct mtdoops_context *cxt) size_t retlen; for (page = 0; page < cxt->oops_pages; page++) { + if (mtd->block_isbad && + mtd->block_isbad(mtd, page * record_size)) + continue; /* Assume the page is used */ mark_page_used(cxt, page); ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, @@ -369,7 +372,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd) /* oops_page_used is a bit field */ cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, - BITS_PER_LONG)); + BITS_PER_LONG) * sizeof(unsigned long)); if (!cxt->oops_page_used) { printk(KERN_ERR "mtdoops: could not allocate page array\n"); return; diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c index 531625f..129bad2 100644 --- a/drivers/mtd/tests/mtd_stresstest.c +++ b/drivers/mtd/tests/mtd_stresstest.c @@ -277,6 +277,12 @@ static int __init mtd_stresstest_init(void) (unsigned long long)mtd->size, mtd->erasesize, pgsize, ebcnt, pgcnt, mtd->oobsize); + if (ebcnt < 2) { + printk(PRINT_PREF "error: need at least 2 eraseblocks\n"); + err = -ENOSPC; + goto out_put_mtd; + } + /* Read or write up 2 eraseblocks at a time */ bufsize = mtd->erasesize * 2; @@ -315,6 +321,7 @@ out: kfree(bbt); vfree(writebuf); vfree(readbuf); +out_put_mtd: put_mtd_device(mtd); if (err) printk(PRINT_PREF "error %d occurred\n", err); diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 4be6718..c696c94 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the * LEB is already locked, we just do not move it and return - * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later. + * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because + * we do not know the reasons of the contention - it may be just a + * normal I/O on this LEB, so we want to re-try. */ err = leb_write_trylock(ubi, vol_id, lnum); if (err) { dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum); - return MOVE_CANCEL_RACE; + return MOVE_RETRY; } /* diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index dc64c76..d51d75d 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -120,6 +120,7 @@ enum { * PEB * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the * target PEB + * MOVE_RETRY: retry scrubbing the PEB */ enum { MOVE_CANCEL_RACE = 1, @@ -127,6 +128,7 @@ enum { MOVE_TARGET_RD_ERR, MOVE_TARGET_WR_ERR, MOVE_CANCEL_BITFLIPS, + MOVE_RETRY, }; /** diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 42c684c..0696e36 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -795,7 +795,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, protect = 1; goto out_not_moved; } - + if (err == MOVE_RETRY) { + scrubbing = 1; + goto out_not_moved; + } if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR || err == MOVE_TARGET_RD_ERR) { /* @@ -1049,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, ubi_err("failed to erase PEB %d, error %d", pnum, err); kfree(wl_wrk); - kmem_cache_free(ubi_wl_entry_slab, e); if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || err == -EBUSY) { @@ -1062,14 +1064,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, goto out_ro; } return err; - } else if (err != -EIO) { + } + + kmem_cache_free(ubi_wl_entry_slab, e); + if (err != -EIO) /* * If this is not %-EIO, we have no idea what to do. Scheduling * this physical eraseblock for erasure again would cause * errors again and again. Well, lets switch to R/O mode. */ goto out_ro; - } /* It is %-EIO, the PEB went bad */ diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c index 6f91a14..3fda6b1 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c @@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw, /* Allocate skb buffer to contain firmware */ /* info and tx descriptor info. */ skb = dev_alloc_skb(frag_length); + if (!skb) + return false; skb_reserve(skb, extra_descoffset); seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length - extra_descoffset)); @@ -573,6 +575,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd, len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len); skb = dev_alloc_skb(len); + if (!skb) + return false; cb_desc = (struct rtl_tcb_desc *)(skb->cb); cb_desc->queue_index = TXCMD_QUEUE; cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL; diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 2f10328..e174982 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -869,5 +869,15 @@ EXPORT_SYMBOL(pci_msi_enabled); void pci_msi_init_pci_dev(struct pci_dev *dev) { + int pos; INIT_LIST_HEAD(&dev->msi_list); + + /* Disable the msi hardware to avoid screaming interrupts + * during boot. This is the power on reset default so + * usually this should be a noop. + */ + pos = pci_find_capability(dev, PCI_CAP_ID_MSI); + if (pos) + msi_set_enable(dev, pos, 0); + msix_set_enable(dev, 0); } diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 0c59541..0d94eec 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -493,6 +493,8 @@ static void ideapad_backlight_notify_power(struct ideapad_private *priv) unsigned long power; struct backlight_device *blightdev = priv->blightdev; + if (!blightdev) + return; if (read_ec_data(ideapad_handle, 0x18, &power)) return; blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index dfbd5a6..258fef2 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c @@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev) } } +#ifdef CONFIG_AMD_NB + +#include + +static void quirk_amd_mmconfig_area(struct pnp_dev *dev) +{ + resource_size_t start, end; + struct pnp_resource *pnp_res; + struct resource *res; + struct resource mmconfig_res, *mmconfig; + + mmconfig = amd_get_mmconfig_range(&mmconfig_res); + if (!mmconfig) + return; + + list_for_each_entry(pnp_res, &dev->resources, list) { + res = &pnp_res->res; + if (res->end < mmconfig->start || res->start > mmconfig->end || + (res->start == mmconfig->start && res->end == mmconfig->end)) + continue; + + dev_info(&dev->dev, FW_BUG + "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n", + res, mmconfig); + if (mmconfig->start < res->start) { + start = mmconfig->start; + end = res->start - 1; + pnp_add_mem_resource(dev, start, end, 0); + } + if (mmconfig->end > res->end) { + start = res->end + 1; + end = mmconfig->end; + pnp_add_mem_resource(dev, start, end, 0); + } + break; + } +} +#endif + /* * PnP Quirks * Cards or devices that need some tweaking due to incomplete resource info @@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = { /* PnP resources that might overlap PCI BARs */ {"PNP0c01", quirk_system_pci_resources}, {"PNP0c02", quirk_system_pci_resources}, +#ifdef CONFIG_AMD_NB + {"PNP0c01", quirk_amd_mmconfig_area}, +#endif {""} }; diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 44e91e5..a86f301 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -227,11 +227,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) alarm->time.tm_hour = now.tm_hour; /* For simplicity, only support date rollover for now */ - if (alarm->time.tm_mday == -1) { + if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) { alarm->time.tm_mday = now.tm_mday; missing = day; } - if (alarm->time.tm_mon == -1) { + if ((unsigned)alarm->time.tm_mon >= 12) { alarm->time.tm_mon = now.tm_mon; if (missing == none) missing = month; diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 39e81cd..10f16a3 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -66,6 +66,8 @@ static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ +#define MAX_HBA_QUEUE_DEPTH 30000 +#define MAX_CHAIN_DEPTH 100000 static int max_queue_depth = -1; module_param(max_queue_depth, int, 0); MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); @@ -2098,8 +2100,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc) } if (ioc->chain_dma_pool) pci_pool_destroy(ioc->chain_dma_pool); - } - if (ioc->chain_lookup) { free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); ioc->chain_lookup = NULL; } @@ -2117,9 +2117,7 @@ static int _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) { struct mpt2sas_facts *facts; - u32 queue_size, queue_diff; u16 max_sge_elements; - u16 num_of_reply_frames; u16 chains_needed_per_io; u32 sz, total_sz; u32 retry_sz; @@ -2146,7 +2144,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) max_request_credit = (max_queue_depth < facts->RequestCredit) ? max_queue_depth : facts->RequestCredit; else - max_request_credit = facts->RequestCredit; + max_request_credit = min_t(u16, facts->RequestCredit, + MAX_HBA_QUEUE_DEPTH); ioc->hba_queue_depth = max_request_credit; ioc->hi_priority_depth = facts->HighPriorityCredit; @@ -2187,50 +2186,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) } ioc->chains_needed_per_io = chains_needed_per_io; - /* reply free queue sizing - taking into account for events */ - num_of_reply_frames = ioc->hba_queue_depth + 32; - - /* number of replies frames can't be a multiple of 16 */ - /* decrease number of reply frames by 1 */ - if (!(num_of_reply_frames % 16)) - num_of_reply_frames--; - - /* calculate number of reply free queue entries - * (must be multiple of 16) - */ - - /* (we know reply_free_queue_depth is not a multiple of 16) */ - queue_size = num_of_reply_frames; - queue_size += 16 - (queue_size % 16); - ioc->reply_free_queue_depth = queue_size; - - /* reply descriptor post queue sizing */ - /* this size should be the number of request frames + number of reply - * frames - */ - - queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1; - /* round up to 16 byte boundary */ - if (queue_size % 16) - queue_size += 16 - (queue_size % 16); - - /* check against IOC maximum reply post queue depth */ - if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) { - queue_diff = queue_size - - facts->MaxReplyDescriptorPostQueueDepth; + /* reply free queue sizing - taking into account for 64 FW events */ + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; - /* round queue_diff up to multiple of 16 */ - if (queue_diff % 16) - queue_diff += 16 - (queue_diff % 16); - - /* adjust hba_queue_depth, reply_free_queue_depth, - * and queue_size - */ - ioc->hba_queue_depth -= (queue_diff / 2); - ioc->reply_free_queue_depth -= (queue_diff / 2); - queue_size = facts->MaxReplyDescriptorPostQueueDepth; + /* align the reply post queue on the next 16 count boundary */ + if (!ioc->reply_free_queue_depth % 16) + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16; + else + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + + 32 - (ioc->reply_free_queue_depth % 16); + if (ioc->reply_post_queue_depth > + facts->MaxReplyDescriptorPostQueueDepth) { + ioc->reply_post_queue_depth = min_t(u16, + (facts->MaxReplyDescriptorPostQueueDepth - + (facts->MaxReplyDescriptorPostQueueDepth % 16)), + (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16))); + ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16; + ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64; } - ioc->reply_post_queue_depth = queue_size; + dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: " "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " @@ -2316,15 +2290,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) "depth(%d)\n", ioc->name, ioc->request, ioc->scsiio_depth)); - /* loop till the allocation succeeds */ - do { - sz = ioc->chain_depth * sizeof(struct chain_tracker); - ioc->chain_pages = get_order(sz); - ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( - GFP_KERNEL, ioc->chain_pages); - if (ioc->chain_lookup == NULL) - ioc->chain_depth -= 100; - } while (ioc->chain_lookup == NULL); + ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); + sz = ioc->chain_depth * sizeof(struct chain_tracker); + ioc->chain_pages = get_order(sz); + + ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( + GFP_KERNEL, ioc->chain_pages); ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, ioc->request_sz, 16, 0); if (!ioc->chain_dma_pool) { diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 7375124..011b864 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -978,8 +978,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid) spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); if (list_empty(&ioc->free_chain_list)) { spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); - printk(MPT2SAS_WARN_FMT "chain buffers not available\n", - ioc->name); + dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not " + "available\n", ioc->name)); return NULL; } chain_req = list_entry(ioc->free_chain_list.next, @@ -6564,6 +6564,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid, } else sas_target_priv_data = NULL; raid_device->responding = 1; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); starget_printk(KERN_INFO, raid_device->starget, "handle(0x%04x), wwid(0x%016llx)\n", handle, (unsigned long long)raid_device->wwid); @@ -6574,16 +6575,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid, */ _scsih_init_warpdrive_properties(ioc, raid_device); if (raid_device->handle == handle) - goto out; + return; printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", raid_device->handle); raid_device->handle = handle; if (sas_target_priv_data) sas_target_priv_data->handle = handle; - goto out; + return; } } - out: + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 5534690..daee5db 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -801,6 +801,12 @@ static int process_msg(void) goto out; } + if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) { + kfree(msg); + err = -EINVAL; + goto out; + } + body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 065ff37..2669cc3 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1984,17 +1984,16 @@ static int ext4_fill_flex_info(struct super_block *sb) struct ext4_group_desc *gdp = NULL; ext4_group_t flex_group_count; ext4_group_t flex_group; - int groups_per_flex = 0; + unsigned int groups_per_flex = 0; size_t size; int i; sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; - groups_per_flex = 1 << sbi->s_log_groups_per_flex; - - if (groups_per_flex < 2) { + if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) { sbi->s_log_groups_per_flex = 0; return 1; } + groups_per_flex = 1 << sbi->s_log_groups_per_flex; /* We allocate both existing and potentially added groups */ flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) + diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 43926ad..54cea8a 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -339,7 +339,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) dprintk("%s enter. slotid %d seqid %d\n", __func__, args->csa_slotid, args->csa_sequenceid); - if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS) + if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS) return htonl(NFS4ERR_BADSLOT); slot = tbl->slots + args->csa_slotid; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index babaf3a..b76be2f 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -321,13 +321,13 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) datasync); ret = filemap_write_and_wait_range(inode->i_mapping, start, end); - if (ret) - return ret; mutex_lock(&inode->i_mutex); nfs_inc_stats(inode, NFSIOS_VFSFSYNC); have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); status = nfs_commit_inode(inode, FLUSH_SYNC); + if (status >= 0 && ret < 0) + status = ret; have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); if (have_error) ret = xchg(&ctx->error, 0); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 2d8a169..003cb69 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3442,19 +3442,6 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server) */ #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT) -static void buf_to_pages(const void *buf, size_t buflen, - struct page **pages, unsigned int *pgbase) -{ - const void *p = buf; - - *pgbase = offset_in_page(buf); - p -= *pgbase; - while (p < buf + buflen) { - *(pages++) = virt_to_page(p); - p += PAGE_CACHE_SIZE; - } -} - static int buf_to_pages_noslab(const void *buf, size_t buflen, struct page **pages, unsigned int *pgbase) { @@ -3551,9 +3538,19 @@ out: nfs4_set_cached_acl(inode, acl); } +/* + * The getxattr API returns the required buffer length when called with a + * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating + * the required buf. On a NULL buf, we send a page of data to the server + * guessing that the ACL request can be serviced by a page. If so, we cache + * up to the page of ACL data, and the 2nd call to getxattr is serviced by + * the cache. If not so, we throw away the page, and cache the required + * length. The next getxattr call will then produce another round trip to + * the server, this time with the input buf of the required size. + */ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) { - struct page *pages[NFS4ACL_MAXPAGES]; + struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; struct nfs_getaclargs args = { .fh = NFS_FH(inode), .acl_pages = pages, @@ -3568,41 +3565,60 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu .rpc_argp = &args, .rpc_resp = &res, }; - struct page *localpage = NULL; - int ret; + int ret = -ENOMEM, npages, i, acl_len = 0; - if (buflen < PAGE_SIZE) { - /* As long as we're doing a round trip to the server anyway, - * let's be prepared for a page of acl data. */ - localpage = alloc_page(GFP_KERNEL); - resp_buf = page_address(localpage); - if (localpage == NULL) - return -ENOMEM; - args.acl_pages[0] = localpage; - args.acl_pgbase = 0; - args.acl_len = PAGE_SIZE; - } else { - resp_buf = buf; - buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase); + npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT; + /* As long as we're doing a round trip to the server anyway, + * let's be prepared for a page of acl data. */ + if (npages == 0) + npages = 1; + + for (i = 0; i < npages; i++) { + pages[i] = alloc_page(GFP_KERNEL); + if (!pages[i]) + goto out_free; + } + if (npages > 1) { + /* for decoding across pages */ + args.acl_scratch = alloc_page(GFP_KERNEL); + if (!args.acl_scratch) + goto out_free; } - ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); + args.acl_len = npages * PAGE_SIZE; + args.acl_pgbase = 0; + /* Let decode_getfacl know not to fail if the ACL data is larger than + * the page we send as a guess */ + if (buf == NULL) + res.acl_flags |= NFS4_ACL_LEN_REQUEST; + resp_buf = page_address(pages[0]); + + dprintk("%s buf %p buflen %ld npages %d args.acl_len %ld\n", + __func__, buf, buflen, npages, args.acl_len); + ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), + &msg, &args.seq_args, &res.seq_res, 0); if (ret) goto out_free; - if (res.acl_len > args.acl_len) - nfs4_write_cached_acl(inode, NULL, res.acl_len); + + acl_len = res.acl_len - res.acl_data_offset; + if (acl_len > args.acl_len) + nfs4_write_cached_acl(inode, NULL, acl_len); else - nfs4_write_cached_acl(inode, resp_buf, res.acl_len); + nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset, + acl_len); if (buf) { ret = -ERANGE; - if (res.acl_len > buflen) + if (acl_len > buflen) goto out_free; - if (localpage) - memcpy(buf, resp_buf, res.acl_len); + _copy_from_pages(buf, pages, res.acl_data_offset, + res.acl_len); } - ret = res.acl_len; + ret = acl_len; out_free: - if (localpage) - __free_page(localpage); + for (i = 0; i < npages; i++) + if (pages[i]) + __free_page(pages[i]); + if (args.acl_scratch) + __free_page(args.acl_scratch); return ret; } @@ -3633,6 +3649,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) nfs_zap_acl_cache(inode); ret = nfs4_read_cached_acl(inode, buf, buflen); if (ret != -ENOENT) + /* -ENOENT is returned if there is no ACL or if there is an ACL + * but no cached acl data, just the acl length */ return ret; return nfs4_get_acl_uncached(inode, buf, buflen); } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 1dce12f..97f987a 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -2517,11 +2517,13 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); - replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1; + replen = hdr.replen + op_decode_hdr_maxsz + 1; encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr); xdr_inline_pages(&req->rq_rcv_buf, replen << 2, args->acl_pages, args->acl_pgbase, args->acl_len); + xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE); + encode_nops(&hdr); } @@ -4957,17 +4959,18 @@ decode_restorefh(struct xdr_stream *xdr) } static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, - size_t *acl_len) + struct nfs_getaclres *res) { - __be32 *savep; + __be32 *savep, *bm_p; uint32_t attrlen, bitmap[3] = {0}; struct kvec *iov = req->rq_rcv_buf.head; int status; - *acl_len = 0; + res->acl_len = 0; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto out; + bm_p = xdr->p; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto out; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) @@ -4979,18 +4982,30 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, size_t hdrlen; u32 recvd; + /* The bitmap (xdr len + bitmaps) and the attr xdr len words + * are stored with the acl data to handle the problem of + * variable length bitmaps.*/ + xdr->p = bm_p; + res->acl_data_offset = be32_to_cpup(bm_p) + 2; + res->acl_data_offset <<= 2; + /* We ignore &savep and don't do consistency checks on * the attr length. Let userspace figure it out.... */ hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base; + attrlen += res->acl_data_offset; recvd = req->rq_rcv_buf.len - hdrlen; if (attrlen > recvd) { - dprintk("NFS: server cheating in getattr" - " acl reply: attrlen %u > recvd %u\n", + if (res->acl_flags & NFS4_ACL_LEN_REQUEST) { + /* getxattr interface called with a NULL buf */ + res->acl_len = attrlen; + goto out; + } + dprintk("NFS: acl reply: attrlen %u > recvd %u\n", attrlen, recvd); return -EINVAL; } xdr_read_pages(xdr, attrlen); - *acl_len = attrlen; + res->acl_len = attrlen; } else status = -EOPNOTSUPP; @@ -6028,7 +6043,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr, status = decode_putfh(xdr); if (status) goto out; - status = decode_getacl(xdr, rqstp, &res->acl_len); + status = decode_getacl(xdr, rqstp, res); out: return status; diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 5b19b6a..c4daf4e 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -904,10 +904,24 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve data->auth_flavor_len = 1; data->version = version; data->minorversion = 0; + security_init_mnt_opts(&data->lsm_opts); } return data; } +static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data) +{ + if (data) { + kfree(data->client_address); + kfree(data->mount_server.hostname); + kfree(data->nfs_server.export_path); + kfree(data->nfs_server.hostname); + kfree(data->fscache_uniq); + security_free_mnt_opts(&data->lsm_opts); + kfree(data); + } +} + /* * Sanity-check a server address provided by the mount command. * @@ -2215,9 +2229,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type, data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION); mntfh = nfs_alloc_fhandle(); if (data == NULL || mntfh == NULL) - goto out_free_fh; - - security_init_mnt_opts(&data->lsm_opts); + goto out; /* Validate the mount data */ error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name); @@ -2229,8 +2241,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type, #ifdef CONFIG_NFS_V4 if (data->version == 4) { mntroot = nfs4_try_mount(flags, dev_name, data); - kfree(data->client_address); - kfree(data->nfs_server.export_path); goto out; } #endif /* CONFIG_NFS_V4 */ @@ -2285,13 +2295,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type, s->s_flags |= MS_ACTIVE; out: - kfree(data->nfs_server.hostname); - kfree(data->mount_server.hostname); - kfree(data->fscache_uniq); - security_free_mnt_opts(&data->lsm_opts); -out_free_fh: + nfs_free_parsed_mount_data(data); nfs_free_fhandle(mntfh); - kfree(data); return mntroot; out_err_nosb: @@ -2618,9 +2623,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags, mntfh = nfs_alloc_fhandle(); if (data == NULL || mntfh == NULL) - goto out_free_fh; - - security_init_mnt_opts(&data->lsm_opts); + goto out; /* Get a volume representation */ server = nfs4_create_server(data, mntfh); @@ -2672,13 +2675,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags, s->s_flags |= MS_ACTIVE; - security_free_mnt_opts(&data->lsm_opts); nfs_free_fhandle(mntfh); return mntroot; out: - security_free_mnt_opts(&data->lsm_opts); -out_free_fh: nfs_free_fhandle(mntfh); return ERR_PTR(error); @@ -2858,7 +2858,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type, data = nfs_alloc_parsed_mount_data(4); if (data == NULL) - goto out_free_data; + goto out; /* Validate the mount data */ error = nfs4_validate_mount_data(raw_data, data, dev_name); @@ -2872,12 +2872,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type, error = PTR_ERR(res); out: - kfree(data->client_address); - kfree(data->nfs_server.export_path); - kfree(data->nfs_server.hostname); - kfree(data->fscache_uniq); -out_free_data: - kfree(data); + nfs_free_parsed_mount_data(data); dprintk("<-- nfs4_mount() = %d%s\n", error, error != 0 ? " [error]" : ""); return res; diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 343bd76..b9c1c06 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -116,6 +116,8 @@ struct zone_reclaim_stat* mem_cgroup_get_reclaim_stat_from_page(struct page *page); extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p); +extern void mem_cgroup_replace_page_cache(struct page *oldpage, + struct page *newpage); #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP extern int do_swap_account; @@ -361,6 +363,10 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) { } +static inline void mem_cgroup_replace_page_cache(struct page *oldpage, + struct page *newpage) +{ +} #endif /* CONFIG_CGROUP_MEM_CONT */ #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 8475f34..5c0e10d 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -602,11 +602,16 @@ struct nfs_getaclargs { size_t acl_len; unsigned int acl_pgbase; struct page ** acl_pages; + struct page * acl_scratch; struct nfs4_sequence_args seq_args; }; +/* getxattr ACL interface flags */ +#define NFS4_ACL_LEN_REQUEST 0x0001 /* zero length getxattr buffer */ struct nfs_getaclres { size_t acl_len; + size_t acl_data_offset; + int acl_flags; struct nfs4_sequence_res seq_res; }; diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index e884096..dad7d9a 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -392,7 +392,7 @@ #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */ #define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */ -#define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */ +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */ #define PCI_EXP_DEVCAP 4 /* Device capabilities */ diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index a20970e..af70af3 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -191,6 +191,8 @@ extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc); extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc); +extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase, + size_t len); /* * Provide some simple tools for XDR buffer overflow-checking etc. diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h index 99fcffb..454ee26 100644 --- a/include/xen/interface/io/xs_wire.h +++ b/include/xen/interface/io/xs_wire.h @@ -84,4 +84,7 @@ struct xenstore_domain_interface { XENSTORE_RING_IDX rsp_cons, rsp_prod; }; +/* Violating this is very bad. See docs/misc/xenstore.txt. */ +#define XENSTORE_PAYLOAD_MAX 4096 + #endif /* _XS_WIRE_H */ diff --git a/init/do_mounts.c b/init/do_mounts.c index c0851a8..ef6478f 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -360,15 +360,42 @@ out: } #ifdef CONFIG_ROOT_NFS + +#define NFSROOT_TIMEOUT_MIN 5 +#define NFSROOT_TIMEOUT_MAX 30 +#define NFSROOT_RETRY_MAX 5 + static int __init mount_nfs_root(void) { char *root_dev, *root_data; + unsigned int timeout; + int try, err; - if (nfs_root_data(&root_dev, &root_data) != 0) - return 0; - if (do_mount_root(root_dev, "nfs", root_mountflags, root_data) != 0) + err = nfs_root_data(&root_dev, &root_data); + if (err != 0) return 0; - return 1; + + /* + * The server or network may not be ready, so try several + * times. Stop after a few tries in case the client wants + * to fall back to other boot methods. + */ + timeout = NFSROOT_TIMEOUT_MIN; + for (try = 1; ; try++) { + err = do_mount_root(root_dev, "nfs", + root_mountflags, root_data); + if (err == 0) + return 1; + if (try > NFSROOT_RETRY_MAX) + break; + + /* Wait, in case the server refused us immediately */ + ssleep(timeout); + timeout <<= 1; + if (timeout > NFSROOT_TIMEOUT_MAX) + timeout = NFSROOT_TIMEOUT_MAX; + } + return 0; } #endif diff --git a/mm/filemap.c b/mm/filemap.c index b91f3aa..0eedbf8 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -393,24 +393,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range); int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) { int error; - struct mem_cgroup *memcg = NULL; VM_BUG_ON(!PageLocked(old)); VM_BUG_ON(!PageLocked(new)); VM_BUG_ON(new->mapping); - /* - * This is not page migration, but prepare_migration and - * end_migration does enough work for charge replacement. - * - * In the longer term we probably want a specialized function - * for moving the charge from old to new in a more efficient - * manner. - */ - error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask); - if (error) - return error; - error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); if (!error) { struct address_space *mapping = old->mapping; @@ -432,13 +419,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) if (PageSwapBacked(new)) __inc_zone_page_state(new, NR_SHMEM); spin_unlock_irq(&mapping->tree_lock); + /* mem_cgroup codes must not be called under tree_lock */ + mem_cgroup_replace_page_cache(old, new); radix_tree_preload_end(); if (freepage) freepage(old); page_cache_release(old); - mem_cgroup_end_migration(memcg, old, new, true); - } else { - mem_cgroup_end_migration(memcg, old, new, false); } return error; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index afde618..dd81ddc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3355,6 +3355,50 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem, cgroup_release_and_wakeup_rmdir(&mem->css); } +/* + * At replace page cache, newpage is not under any memcg but it's on + * LRU. So, this function doesn't touch res_counter but handles LRU + * in correct way. Both pages are locked so we cannot race with uncharge. + */ +void mem_cgroup_replace_page_cache(struct page *oldpage, + struct page *newpage) +{ + struct mem_cgroup *memcg; + struct page_cgroup *pc; + struct zone *zone; + enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE; + unsigned long flags; + + if (mem_cgroup_disabled()) + return; + + pc = lookup_page_cgroup(oldpage); + /* fix accounting on old pages */ + lock_page_cgroup(pc); + memcg = pc->mem_cgroup; + mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1); + ClearPageCgroupUsed(pc); + unlock_page_cgroup(pc); + + if (PageSwapBacked(oldpage)) + type = MEM_CGROUP_CHARGE_TYPE_SHMEM; + + zone = page_zone(newpage); + pc = lookup_page_cgroup(newpage); + /* + * Even if newpage->mapping was NULL before starting replacement, + * the newpage may be on LRU(or pagevec for LRU) already. We lock + * LRU while we overwrite pc->mem_cgroup. + */ + spin_lock_irqsave(&zone->lru_lock, flags); + if (PageLRU(newpage)) + del_page_from_lru_list(zone, newpage, page_lru(newpage)); + __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type); + if (PageLRU(newpage)) + add_page_to_lru_list(zone, newpage, page_lru(newpage)); + spin_unlock_irqrestore(&zone->lru_lock, flags); +} + #ifdef CONFIG_DEBUG_VM static struct page_cgroup *lookup_page_cgroup_used(struct page *page) { diff --git a/mm/slub.c b/mm/slub.c index 7c54fe8..f73234d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2077,6 +2077,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, goto new_slab; } + /* must check again c->freelist in case of cpu migration or IRQ */ + object = c->freelist; + if (object) + goto load_freelist; + stat(s, ALLOC_SLOWPATH); do { diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 7bc8702..ea70837 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -105,7 +105,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) if (status->flag & RX_FLAG_MMIC_ERROR) goto mic_fail; - if (!(status->flag & RX_FLAG_IV_STRIPPED)) + if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key) goto update_iv; return RX_CONTINUE; diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 277ebd4..593f4c6 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -296,7 +296,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) * Copies data into an arbitrary memory location from an array of pages * The copy is assumed to be non-overlapping. */ -static void +void _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) { struct page **pgfrom; @@ -324,6 +324,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) } while ((len -= copy) != 0); } +EXPORT_SYMBOL_GPL(_copy_from_pages); /* * xdr_shrink_bufhead diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c index da36d2c..5335605 100644 --- a/security/integrity/ima/ima_api.c +++ b/security/integrity/ima/ima_api.c @@ -177,8 +177,8 @@ void ima_store_measurement(struct ima_iint_cache *iint, struct file *file, strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX); result = ima_store_template(entry, violation, inode); - if (!result) + if (!result || result == -EEXIST) iint->flags |= IMA_MEASURED; - else + if (result < 0) kfree(entry); } diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c index 8e28f04..55a6271 100644 --- a/security/integrity/ima/ima_queue.c +++ b/security/integrity/ima/ima_queue.c @@ -23,6 +23,8 @@ #include #include "ima.h" +#define AUDIT_CAUSE_LEN_MAX 32 + LIST_HEAD(ima_measurements); /* list of all measurements */ /* key: inode (before secure-hashing a file) */ @@ -94,7 +96,8 @@ static int ima_pcr_extend(const u8 *hash) result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash); if (result != 0) - pr_err("IMA: Error Communicating to TPM chip\n"); + pr_err("IMA: Error Communicating to TPM chip, result: %d\n", + result); return result; } @@ -106,14 +109,16 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation, { u8 digest[IMA_DIGEST_SIZE]; const char *audit_cause = "hash_added"; + char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX]; int audit_info = 1; - int result = 0; + int result = 0, tpmresult = 0; mutex_lock(&ima_extend_list_mutex); if (!violation) { memcpy(digest, entry->digest, sizeof digest); if (ima_lookup_digest_entry(digest)) { audit_cause = "hash_exists"; + result = -EEXIST; goto out; } } @@ -128,9 +133,11 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation, if (violation) /* invalidate pcr */ memset(digest, 0xff, sizeof digest); - result = ima_pcr_extend(digest); - if (result != 0) { - audit_cause = "TPM error"; + tpmresult = ima_pcr_extend(digest); + if (tpmresult != 0) { + snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)", + tpmresult); + audit_cause = tpm_audit_cause; audit_info = 0; } out: diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h index 2e7ac31..f52ebe8 100644 --- a/sound/pci/hda/hda_local.h +++ b/sound/pci/hda/hda_local.h @@ -476,7 +476,12 @@ static inline u32 get_wcaps(struct hda_codec *codec, hda_nid_t nid) } /* get the widget type from widget capability bits */ -#define get_wcaps_type(wcaps) (((wcaps) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT) +static inline int get_wcaps_type(unsigned int wcaps) +{ + if (!wcaps) + return -1; /* invalid type */ + return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT; +} static inline unsigned int get_wcaps_channels(u32 wcaps) { diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c index 2be57b0..6936c37 100644 --- a/sound/pci/hda/hda_proc.c +++ b/sound/pci/hda/hda_proc.c @@ -54,6 +54,8 @@ static const char *get_wid_type_name(unsigned int wid_value) [AC_WID_BEEP] = "Beep Generator Widget", [AC_WID_VENDOR] = "Vendor Defined Widget", }; + if (wid_value == -1) + return "UNKNOWN Widget"; wid_value &= 0xf; if (names[wid_value]) return names[wid_value]; diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 4346ad2..3546d38 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -916,16 +916,14 @@ static void cs_automute(struct hda_codec *codec) /* mute speakers if spdif or hp jack is plugged in */ for (i = 0; i < cfg->speaker_outs; i++) { + int pin_ctl = hp_present ? 0 : PIN_OUT; + /* detect on spdif is specific to CS421x */ + if (spdif_present && (spec->vendor_nid == CS421X_VENDOR_NID)) + pin_ctl = 0; + nid = cfg->speaker_pins[i]; snd_hda_codec_write(codec, nid, 0, - AC_VERB_SET_PIN_WIDGET_CONTROL, - hp_present ? 0 : PIN_OUT); - /* detect on spdif is specific to CS421x */ - if (spec->vendor_nid == CS421X_VENDOR_NID) { - snd_hda_codec_write(codec, nid, 0, - AC_VERB_SET_PIN_WIDGET_CONTROL, - spdif_present ? 0 : PIN_OUT); - } + AC_VERB_SET_PIN_WIDGET_CONTROL, pin_ctl); } if (spec->board_config == CS420X_MBP53 || spec->board_config == CS420X_MBP55 || @@ -1756,30 +1754,19 @@ static int build_cs421x_output(struct hda_codec *codec) struct auto_pin_cfg *cfg = &spec->autocfg; struct snd_kcontrol *kctl; int err; - char *name = "HP/Speakers"; + char *name = "Master"; fix_volume_caps(codec, dac); - if (!spec->vmaster_sw) { - err = add_vmaster(codec, dac); - if (err < 0) - return err; - } err = add_mute(codec, name, 0, HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl); if (err < 0) return err; - err = snd_ctl_add_slave(spec->vmaster_sw, kctl); - if (err < 0) - return err; err = add_volume(codec, name, 0, HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl); if (err < 0) return err; - err = snd_ctl_add_slave(spec->vmaster_vol, kctl); - if (err < 0) - return err; if (cfg->speaker_outs) { err = snd_hda_ctl_add(codec, 0, diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 41fecc1..85fe10d 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -1119,8 +1119,6 @@ static const char * const cxt5045_models[CXT5045_MODELS] = { static const struct snd_pci_quirk cxt5045_cfg_tbl[] = { SND_PCI_QUIRK(0x103c, 0x30d5, "HP 530", CXT5045_LAPTOP_HP530), - SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3000, "HP DV Series", - CXT5045_LAPTOP_HPSENSE), SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba P105", CXT5045_LAPTOP_MICSENSE), SND_PCI_QUIRK(0x152d, 0x0753, "Benq R55E", CXT5045_BENQ), SND_PCI_QUIRK(0x1734, 0x10ad, "Fujitsu Si1520", CXT5045_LAPTOP_MICSENSE), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 5261fd8..fc3c903 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -4306,6 +4306,27 @@ static void stac_store_hints(struct hda_codec *codec) } } +static void stac_issue_unsol_events(struct hda_codec *codec, int num_pins, + const hda_nid_t *pins) +{ + while (num_pins--) + stac_issue_unsol_event(codec, *pins++); +} + +/* fake event to set up pins */ +static void stac_fake_hp_events(struct hda_codec *codec) +{ + struct sigmatel_spec *spec = codec->spec; + + if (spec->autocfg.hp_outs) + stac_issue_unsol_events(codec, spec->autocfg.hp_outs, + spec->autocfg.hp_pins); + if (spec->autocfg.line_outs && + spec->autocfg.line_out_pins[0] != spec->autocfg.hp_pins[0]) + stac_issue_unsol_events(codec, spec->autocfg.line_outs, + spec->autocfg.line_out_pins); +} + static int stac92xx_init(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; @@ -4356,10 +4377,7 @@ static int stac92xx_init(struct hda_codec *codec) stac92xx_auto_set_pinctl(codec, spec->autocfg.line_out_pins[0], AC_PINCTL_OUT_EN); /* fake event to set up pins */ - if (cfg->hp_pins[0]) - stac_issue_unsol_event(codec, cfg->hp_pins[0]); - else if (cfg->line_out_pins[0]) - stac_issue_unsol_event(codec, cfg->line_out_pins[0]); + stac_fake_hp_events(codec); } else { stac92xx_auto_init_multi_out(codec); stac92xx_auto_init_hp_out(codec); @@ -5000,19 +5018,11 @@ static void stac927x_proc_hook(struct snd_info_buffer *buffer, #ifdef CONFIG_PM static int stac92xx_resume(struct hda_codec *codec) { - struct sigmatel_spec *spec = codec->spec; - stac92xx_init(codec); snd_hda_codec_resume_amp(codec); snd_hda_codec_resume_cache(codec); /* fake event to set up pins again to override cached values */ - if (spec->hp_detect) { - if (spec->autocfg.hp_pins[0]) - stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0]); - else if (spec->autocfg.line_out_pins[0]) - stac_issue_unsol_event(codec, - spec->autocfg.line_out_pins[0]); - } + stac_fake_hp_events(codec); return 0; } diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index d636d93..c3babe3 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -2187,7 +2187,10 @@ static int via_auto_create_loopback_switch(struct hda_codec *codec) { struct via_spec *spec = codec->spec; - if (!spec->aa_mix_nid || !spec->out_mix_path.depth) + if (!spec->aa_mix_nid) + return 0; /* no loopback switching available */ + if (!(spec->out_mix_path.depth || spec->hp_mix_path.depth || + spec->speaker_path.depth)) return 0; /* no loopback switching available */ if (!via_clone_control(spec, &via_aamix_ctl_enum)) return -ENOMEM; diff --git a/sound/pci/ice1712/amp.c b/sound/pci/ice1712/amp.c index e328cfb..e525da2 100644 --- a/sound/pci/ice1712/amp.c +++ b/sound/pci/ice1712/amp.c @@ -68,8 +68,11 @@ static int __devinit snd_vt1724_amp_init(struct snd_ice1712 *ice) static int __devinit snd_vt1724_amp_add_controls(struct snd_ice1712 *ice) { - /* we use pins 39 and 41 of the VT1616 for left and right read outputs */ - snd_ac97_write_cache(ice->ac97, 0x5a, snd_ac97_read(ice->ac97, 0x5a) & ~0x8000); + if (ice->ac97) + /* we use pins 39 and 41 of the VT1616 for left and right + read outputs */ + snd_ac97_write_cache(ice->ac97, 0x5a, + snd_ac97_read(ice->ac97, 0x5a) & ~0x8000); return 0; } diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c index c400ade..1e7a47a 100644 --- a/sound/usb/usx2y/usb_stream.c +++ b/sound/usb/usx2y/usb_stream.c @@ -674,7 +674,7 @@ dotry: inurb->transfer_buffer_length = inurb->number_of_packets * inurb->iso_frame_desc[0].length; - preempt_disable(); + if (u == 0) { int now; struct usb_device *dev = inurb->dev; @@ -686,19 +686,17 @@ dotry: } err = usb_submit_urb(inurb, GFP_ATOMIC); if (err < 0) { - preempt_enable(); snd_printk(KERN_ERR"usb_submit_urb(sk->inurb[%i])" " returned %i\n", u, err); return err; } err = usb_submit_urb(outurb, GFP_ATOMIC); if (err < 0) { - preempt_enable(); snd_printk(KERN_ERR"usb_submit_urb(sk->outurb[%i])" " returned %i\n", u, err); return err; } - preempt_enable(); + if (inurb->start_frame != outurb->start_frame) { snd_printd(KERN_DEBUG "u[%i] start_frames differ in:%u out:%u\n", diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index 4e9eaeb..af79102 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include "irq.h" static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, @@ -474,12 +476,76 @@ out: return r; } +/* + * We want to test whether the caller has been granted permissions to + * use this device. To be able to configure and control the device, + * the user needs access to PCI configuration space and BAR resources. + * These are accessed through PCI sysfs. PCI config space is often + * passed to the process calling this ioctl via file descriptor, so we + * can't rely on access to that file. We can check for permissions + * on each of the BAR resource files, which is a pretty clear + * indicator that the user has been granted access to the device. + */ +static int probe_sysfs_permissions(struct pci_dev *dev) +{ +#ifdef CONFIG_SYSFS + int i; + bool bar_found = false; + + for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) { + char *kpath, *syspath; + struct path path; + struct inode *inode; + int r; + + if (!pci_resource_len(dev, i)) + continue; + + kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); + if (!kpath) + return -ENOMEM; + + /* Per sysfs-rules, sysfs is always at /sys */ + syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i); + kfree(kpath); + if (!syspath) + return -ENOMEM; + + r = kern_path(syspath, LOOKUP_FOLLOW, &path); + kfree(syspath); + if (r) + return r; + + inode = path.dentry->d_inode; + + r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS); + path_put(&path); + if (r) + return r; + + bar_found = true; + } + + /* If no resources, probably something special */ + if (!bar_found) + return -EPERM; + + return 0; +#else + return -EINVAL; /* No way to control the device without sysfs */ +#endif +} + static int kvm_vm_ioctl_assign_device(struct kvm *kvm, struct kvm_assigned_pci_dev *assigned_dev) { int r = 0, idx; struct kvm_assigned_dev_kernel *match; struct pci_dev *dev; + u8 header_type; + + if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) + return -EINVAL; mutex_lock(&kvm->lock); idx = srcu_read_lock(&kvm->srcu); @@ -507,6 +573,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, r = -EINVAL; goto out_free; } + + /* Don't allow bridges to be assigned */ + pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); + if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) { + r = -EPERM; + goto out_put; + } + + r = probe_sysfs_permissions(dev); + if (r) + goto out_put; + if (pci_enable_device(dev)) { printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); r = -EBUSY; @@ -538,16 +616,14 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, list_add(&match->list, &kvm->arch.assigned_dev_head); - if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { - if (!kvm->arch.iommu_domain) { - r = kvm_iommu_map_guest(kvm); - if (r) - goto out_list_del; - } - r = kvm_assign_device(kvm, match); + if (!kvm->arch.iommu_domain) { + r = kvm_iommu_map_guest(kvm); if (r) goto out_list_del; } + r = kvm_assign_device(kvm, match); + if (r) + goto out_list_del; out: srcu_read_unlock(&kvm->srcu, idx); @@ -587,8 +663,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, goto out; } - if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) - kvm_deassign_device(kvm, match); + kvm_deassign_device(kvm, match); kvm_free_assigned_device(kvm, match);